feat: Complete deadline experiment cleanup

main
Sean McBride 4 years ago
parent 1a72791c1b
commit e32339bbc1

@ -76,6 +76,10 @@ RUN tar xvfz wasmception.tar.gz -C /sledge/awsm/wasmception
# RUN curl -sS -L -O $WASI_SDK_URL && dpkg -i wasi-sdk_8.0_amd64.deb && rm -f wasi-sdk_8.0_amd64.deb # RUN curl -sS -L -O $WASI_SDK_URL && dpkg -i wasi-sdk_8.0_amd64.deb && rm -f wasi-sdk_8.0_amd64.deb
# ENV WASI_SDK=/opt/wasi-sdk # ENV WASI_SDK=/opt/wasi-sdk
# PERF
ADD install_perf.sh /sledge/install_perf.sh
RUN ./sledge/install_perf.sh
# Create non-root user and add to sudoers # Create non-root user and add to sudoers
ARG USERNAME=dev ARG USERNAME=dev
ARG USER_UID=1000 ARG USER_UID=1000
@ -116,4 +120,4 @@ ENV PATH=/opt/sledge/bin:$PATH
# TODO: Does the build process for the sample applications actually copy here? # TODO: Does the build process for the sample applications actually copy here?
# TODO: Should we create a special SLEDGE_MODULE_PATH that is searched for these modules? # TODO: Should we create a special SLEDGE_MODULE_PATH that is searched for these modules?
ENV LD_LIBRARY_PATH=/opt/sledge/bin:LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/opt/sledge/bin:$LD_LIBRARY_PATH

@ -0,0 +1,26 @@
#!/bin/bash
# If already installed, just return
[[ -x perf ]] && return 0
[[ "$(whoami)" != "root" ]] && {
echo "Expected to run as root"
exit 1
}
# Under WSL2, perf has to be installed from source
if grep --silent 'WSL2' <(uname -r); then
echo "WSL detected. perf must be built from source"
echo "WSL2 support is WIP and not currently functional"
exit 0
sudo apt-get install flex bison python3-dev liblzma-dev libnuma-dev zlib1g libperl-dev libgtk2.0-dev libslang2-dev systemtap-sdt-dev libelf-dev binutils-dev libbabeltrace-dev libdw-dev libunwind-dev libiberty-dev --yes
git clone --depth 1 https://github.com/microsoft/WSL2-Linux-Kernel ~/WSL2-Linux-Kernel
make -Wno-error -j8 -C ~/WSL2-Linux-Kernel/tools/perf
sudo cp ~/WSL2-Linux-Kernel/tools/perf/perf /usr/local/bin
# rm -rf ~/WSL2-Linux-Kernel
else
apt-get install "linux-tools-$(uname -r)" linux-tools-generic -y
fi
exit 0

@ -1,9 +1,20 @@
#!/bin/bash #!/bin/bash
dump_bash_stack() { declare __common_did_dump_callstack=false
error_msg() {
[[ "$__common_did_dump_callstack" == false ]] && {
printf "%.23s %s() in %s, line %s: %s\n" "$(date +%F.%T.%N)" "${FUNCNAME[1]}" "${BASH_SOURCE[1]##*/}" "${BASH_LINENO[0]}" "${@}"
__common_dump_callstack
__common_did_dump_callstack=true
}
}
__common_dump_callstack() {
echo "Call Stack:" echo "Call Stack:"
for func in "${FUNCNAME[@]}"; do # Skip the dump_bash_stack and error_msg_frames
echo "$func" for ((i = 2; i < ${#FUNCNAME[@]}; i++)); do
printf "\t%d - %s\n" "$((i - 2))" "${FUNCNAME[i]}"
done done
} }
@ -45,14 +56,17 @@ log_environment() {
# Given a file, returns the number of results # Given a file, returns the number of results
# This assumes a *.csv file with a header # This assumes a *.csv file with a header
# $1 the file we want to check for results # $1 the file we want to check for results
# $2 an optional return nameref. If not set, writes results to STDOUT # $2 an optional return nameref
get_result_count() { get_result_count() {
if (($# != 1)); then if (($# != 1)); then
echo "${FUNCNAME[0]} error: insufficient parameters" error_msg "insufficient parameters. $#/1"
dump_bash_stack return 1
elif [[ ! -f $1 ]]; then elif [[ ! -f $1 ]]; then
echo "${FUNCNAME[0]} error: the file $1 does not exist" error_msg "the file $1 does not exist"
dump_bash_stack return 1
elif [[ ! -s $1 ]]; then
error_msg "the file $1 is size 0"
return 1
fi fi
local -r file=$1 local -r file=$1
@ -61,10 +75,8 @@ get_result_count() {
local -i count=$(($(wc -l < "$file") - 1)) local -i count=$(($(wc -l < "$file") - 1))
if (($# == 2)); then if (($# == 2)); then
# shellcheck disable=2034
local -n __result=$2 local -n __result=$2
__result=count
else
echo "$count"
fi fi
if ((count > 0)); then if ((count > 0)); then
@ -75,10 +87,10 @@ get_result_count() {
} }
kill_runtime() { kill_runtime() {
echo -n "Killing Runtime: " printf "Stopping Runtime: "
pkill sledgert > /dev/null 2> /dev/null pkill sledgert > /dev/null 2> /dev/null
pkill hey > /dev/null 2> /dev/null pkill hey > /dev/null 2> /dev/null
echo "[DONE]" printf "[OK]\n"
} }
generate_gnuplots() { generate_gnuplots() {

@ -1,105 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
host=localhost
timestamp=$(date +%s)
experiment_directory=$(pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt
mkdir -p "$results_directory"
log_environment >> "$results_directory/$log"
inputs=(40 10)
duration_sec=60
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for input in ${inputs[*]}; do
hey -n 16 -c 4 -t 0 -o csv -m GET -d "$input\n" http://${host}:$((10000 + input))
done
echo "[DONE]"
sleep 5
echo "Running Experiments"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -n 1000 -c 1000 -cpus 6 -t 0 -o csv -m GET -d "40\n" http://${host}:10040 > "$results_directory/fib40-con.csv"
sleep $offset
hey -n 25000 -c 1000000 -t 0 -o csv -m GET -d "10\n" http://${host}:10010 > "$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset + 45))
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
deadlines_ms=(20 20000)
# durations_s=(60 70)
payloads=(fib10-con fib40-con)
for ((i = 1; i < 2; i++)); do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
# duration=${durations_s[$i]}
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f\n", (ok / (NR - 1) * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
# throughput=$(echo "$oks/$duration" | bc)
# printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"

@ -1,109 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
host=192.168.1.13
# host=localhost
timestamp=$(date +%s)
experiment_directory=$(pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt
mkdir -p "$results_directory"
log_environment >> "$results_directory/$log"
inputs=(40 10)
duration_sec=30
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for input in ${inputs[*]}; do
hey -n 16 -c 4 -t 0 -o csv -m GET -d "$input\n" http://${host}:$((10000 + input))
done
echo "[DONE]"
sleep 5
echo "Running Experiments"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -z $((duration_sec + 2 * offset))s -cpus 3 -c 200 -t 0 -o csv -m GET -d "40\n" http://${host}:10040 > "$results_directory/fib40-con.csv" &
sleep $offset
hey -z ${duration_sec}s -cpus 3 -c 200 -t 0 -o csv -m GET -d "10\n" http://${host}:10010 > "$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset + 15))
sleep 30
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
deadlines_ms=(20 20000)
payloads=(fib10-con fib40-con)
durations_s=(30 40)
for ((i = 0; i < 2; i++)); do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
duration=${durations_s[$i]}
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 {denom++}
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f\n", (ok / denom * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"

@ -1,107 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
host=192.168.1.13
# host=localhost
timestamp=$(date +%s)
experiment_directory=$(pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt
mkdir -p "$results_directory"
log_environment >> "$results_directory/$log"
inputs=(10)
duration_sec=30
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
hey -n 16 -c 4 -t 0 -o csv -m GET -d "10\n" http://${host}:10010
echo "[DONE]"
sleep 5
echo "Running Experiments"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
# hey -z $((duration_sec + 2 * offset))s -cpus 3 -c 200 -t 0 -o csv -m GET -d "40\n" http://${host}:10040 >"$results_directory/fib40-con.csv" &
# sleep $offset
hey -z ${duration_sec}s -cpus 6 -c 400 -t 0 -o csv -m GET -d "10\n" http://${host}:10010 > "$results_directory/fib10-con.csv"
# sleep $((duration_sec + offset + 15))
# sleep 30
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
deadlines_ms=(20 20000)
payloads=(fib10-con fib40-con)
durations_s=(30 40)
for ((i = 0; i < 1; i++)); do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
duration=${durations_s[$i]}
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 {denom++}
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5%\n", (ok / denom * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"

@ -1,19 +0,0 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run $experiment_directory/spec.json" \
sledgert

@ -1,77 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
experiment_directory=$(pwd)
results_directory="$experiment_directory/res/1606615320-fifo-adm"
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
deadlines_ms=(20 20000)
payloads=(fib10-con fib40-con)
for ((i = 0; i < 2; i++)); do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f\n", (ok / (NR - 1) * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"

@ -1,111 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
host=192.168.1.13
# host=localhost
# timestamp=$(date +%s)
timestamp=1606697099
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt
mkdir -p "$results_directory"
log_environment >> "$results_directory/$log"
inputs=(40 10)
duration_sec=60
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
# echo -n "Running Samples: "
# for input in ${inputs[*]}; do
# hey -n 16 -c 4 -t 0 -o csv -m GET -d "$input\n" http://${host}:$((10000 + input))
# done
# echo "[DONE]"
# sleep 5
# echo "Running Experiments"
# # Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
# hey -z $((duration_sec + 2 * offset))s -cpus 3 -c 200 -t 0 -o csv -m GET -d "40\n" http://${host}:10040 >"$results_directory/fib40-con.csv" &
# sleep $offset
# hey -z ${duration_sec}s -cpus 3 -c 200 -t 0 -o csv -m GET -d "10\n" http://${host}:10010 >"$results_directory/fib10-con.csv" &
# sleep $((duration_sec + offset + 15))
# sleep 30
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
deadlines_ms=(20 20000)
payloads=(fib10-con fib40-con)
durations_s=(60 70)
for ((i = 0; i < 2; i++)); do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
duration=${durations_s[$i]}
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 {denom++}
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f\n", (ok / denom * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
# duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"

@ -1,14 +0,0 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=EDF perf record -g -s sledgert "$experiment_directory/spec.json"

@ -2,16 +2,24 @@
source ../common.sh source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate # This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Success - The percentage of requests that complete by their deadlines
# TODO: Does this handle non-200s?
# Throughput - The mean number of successful requests per second
# Latency - the rount-trip resonse time (unit?) of successful requests at the p50, p90, p99, and p100 percetiles
# Use -d flag if running under gdb # Use -d flag if running under gdb
# TODO: GDB? Debug? # TODO: Just use ENV for policy and other runtime dynamic variables?
usage() { usage() {
echo "$0 [options...]" echo "$0 [options...]"
echo "" echo ""
echo "Options:" echo "Options:"
echo " -t,--target=<target url> Execute as client against remote URL" echo " -t,--target=<target url> Execute as client against remote URL"
echo " -s,--serve=<EDF|FIFO> Serve with scheduling policy, but do not run client" echo " -s,--serve=<EDF|FIFO> Serve with scheduling policy, but do not run client"
echo " -d,--debug=<EDF|FIFO> Debug under GDB with scheduling policy, but do not run client"
echo " -p,--perf=<EDF|FIFO> Run under perf with scheduling policy, but do not run client"
} }
# Declares application level global state
initialize_globals() { initialize_globals() {
# timestamp is used to name the results directory for a particular test run # timestamp is used to name the results directory for a particular test run
# shellcheck disable=SC2155 # shellcheck disable=SC2155
@ -24,19 +32,27 @@ initialize_globals() {
declare -gr binary_directory=$(cd ../../bin && pwd) declare -gr binary_directory=$(cd ../../bin && pwd)
# Scrape the perf window size from the source if possible # Scrape the perf window size from the source if possible
declare -gr perf_window_path="../../include/perf_window.h" local -r perf_window_path="../../include/perf_window.h"
declare -gi perf_window_buffer_size declare -gi perf_window_buffer_size
if ! perf_window_buffer_size=$(grep "#define PERF_WINDOW_BUFFER_SIZE" < "$perf_window_path" | cut -d\ -f3); then if ! perf_window_buffer_size=$(grep "#define PERF_WINDOW_BUFFER_SIZE" < "$perf_window_path" | cut -d\ -f3); then
echo "Failed to scrape PERF_WINDOW_BUFFER_SIZE from ../../include/perf_window.h" echo "Failed to scrape PERF_WINDOW_BUFFER_SIZE from ../../include/perf_window.h"
echo "Defaulting to 16" echo "Defaulting to 16"
declare -ir perf_window_buffer_size=16 declare -ir perf_window_buffer_size=16
fi fi
declare -gir perf_window_buffer_size
# Globals used by parse_arguments
declare -g target=""
declare -g policy=""
declare -g role=""
declare -gx target="" # Configure environment variables
declare -gx policy="" export PATH=$binary_directory:$PATH
declare -gx role="both" export LD_LIBRARY_PATH=$binary_directory:$LD_LIBRARY_PATH
export SLEDGE_NWORKERS=5
} }
# Parses arguments from the user and sets associates global state
parse_arguments() { parse_arguments() {
for i in "$@"; do for i in "$@"; do
case $i in case $i in
@ -44,53 +60,94 @@ parse_arguments() {
if [[ "$role" == "server" ]]; then if [[ "$role" == "server" ]]; then
echo "Cannot set target when server" echo "Cannot set target when server"
usage usage
exit 1 return 1
fi fi
role=client role=client
target="${i#*=}" target="${i#*=}"
shift # past argument=value shift
;; ;;
-s=* | --serve=*) -s=* | --serve=*)
if [[ "$role" == "client" ]]; then if [[ "$role" == "client" ]]; then
echo "Cannot serve with target is set" echo "Cannot use -s,--serve with -t,--target"
usage usage
exit 1 return 1
fi fi
role=server role=server
policy="${i#*=}" policy="${i#*=}"
if [[ ! $policy =~ ^(EDF|FIFO)$ ]]; then if [[ ! $policy =~ ^(EDF|FIFO)$ ]]; then
echo "\"$policy\" is not a valid policy. EDF or FIFO allowed" echo "\"$policy\" is not a valid policy. EDF or FIFO allowed"
usage usage
exit 1 return 1
fi fi
shift # past argument=value shift
;;
-d=* | --debug=*)
if [[ "$role" == "client" ]]; then
echo "Cannot use -d,--debug with -t,--target"
usage
return 1
fi
role=debug
policy="${i#*=}"
if [[ ! $policy =~ ^(EDF|FIFO)$ ]]; then
echo "\"$policy\" is not a valid policy. EDF or FIFO allowed"
usage
return 1
fi
shift
;;
-p=* | --perf=*)
if [[ "$role" == "perf" ]]; then
echo "Cannot use -p,--perf with -t,--target"
usage
return 1
fi
role=perf
policy="${i#*=}"
if [[ ! $policy =~ ^(EDF|FIFO)$ ]]; then
echo "\"$policy\" is not a valid policy. EDF or FIFO allowed"
usage
return 1
fi
shift
;; ;;
-h | --help) -h | --help)
usage usage
exit 0
;; ;;
*) *)
echo "$1 is a not a valid option" echo "$1 is a not a valid option"
usage usage
exit 1 return 1
;; ;;
esac esac
done done
# default to both if no arguments were passed
if [[ -z "$role" ]]; then
role="both"
fi
# Set globals as read only # Set globals as read only
declare -r target declare -r target
declare -r policy declare -r policy
declare -r role declare -r role
} }
# Starts the Sledge Runtime
start_runtime() { start_runtime() {
printf "Starting Runtime: "
if (($# != 2)); then if (($# != 2)); then
echo "${FUNCNAME[0]} error: invalid number of arguments \"$1\"" printf "[ERR]\n"
error_msg "invalid number of arguments \"$1\""
return 1 return 1
elif ! [[ $1 =~ ^(EDF|FIFO)$ ]]; then elif ! [[ $1 =~ ^(EDF|FIFO)$ ]]; then
echo "${FUNCNAME[0]} error: expected EDF or FIFO was \"$1\"" printf "[ERR]\n"
error_msg "expected EDF or FIFO was \"$1\""
return 1 return 1
elif ! [[ -d "$2" ]]; then elif ! [[ -d "$2" ]]; then
echo "${FUNCNAME[0]} error: \"$2\" does not exist" printf "[ERR]\n"
error_msg "directory \"$2\" does not exist"
return 1 return 1
fi fi
@ -102,114 +159,132 @@ start_runtime() {
log_environment >> "$log" log_environment >> "$log"
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >> "$log" 2>> "$log" & SLEDGE_SCHEDULER="$scheduler" \
return $? sledgert "$experiment_directory/spec.json" >> "$log" 2>> "$log" &
printf "[OK]\n"
return 0
} }
# Seed enough work to fill the perf window buffers # Sends requests until the per-module perf window buffers are full
# This ensures that Sledge has accurate estimates of execution time
run_samples() { run_samples() {
local hostname="${1:-localhost}" local hostname="${1:-localhost}"
echo -n "Running Samples: " echo -n "Running Samples: "
hey -n "$perf_window_buffer_size" -c "$perf_window_buffer_size" -cpus 3 -t 0 -o csv -m GET -d "40\n" "http://${hostname}:10040" || { hey -n "$perf_window_buffer_size" -c "$perf_window_buffer_size" -cpus 3 -t 0 -o csv -m GET -d "40\n" "http://${hostname}:10040" 1> /dev/null 2> /dev/null || {
echo "error" error_msg "fib40 samples failed"
return 1 return 1
} }
hey -n "$perf_window_buffer_size" -c "$perf_window_buffer_size" -cpus 3 -t 0 -o csv -m GET -d "10\n" "http://${hostname}:100010" || { hey -n "$perf_window_buffer_size" -c "$perf_window_buffer_size" -cpus 3 -t 0 -o csv -m GET -d "10\n" "http://${hostname}:100010" 1> /dev/null 2> /dev/null || {
echo "error" error_msg "fib10 samples failed"
return 1 return 1
} }
echo "[OK]"
return 0 return 0
} }
# Execute the fib10 and fib40 experiments sequentially and concurrently
# $1 (results_directory) - a directory where we will store our results # $1 (results_directory) - a directory where we will store our results
# $2 (hostname="localhost") - an optional parameter that sets the hostname. Defaults to localhost # $2 (hostname="localhost") - an optional parameter that sets the hostname. Defaults to localhost
run_experiments() { run_experiments() {
if (($# < 1 || $# > 2)); then if (($# < 1 || $# > 2)); then
echo "${FUNCNAME[0]} error: invalid number of arguments \"$1\"" error_msg "invalid number of arguments \"$1\""
exit return 1
elif ! [[ -d "$1" ]]; then elif ! [[ -d "$1" ]]; then
echo "${FUNCNAME[0]} error: \"$2\" does not exist" error_msg "directory \"$1\" does not exist"
exit return 1
elif (($# > 2)) && [[ ! $1 =~ ^(EDF|FIFO)$ ]]; then
echo "${FUNCNAME[0]} error: expected EDF or FIFO was \"$1\""
exit
fi fi
local results_directory="$1" local results_directory="$1"
local hostname="${2:-localhost}" local hostname="${2:-localhost}"
# The duration in seconds that the low priority task should run before the high priority task starts
local -ir offset=5
# The duration in seconds that we want the client to send requests # The duration in seconds that we want the client to send requests
local -ir duration_sec=15 local -ir duration_sec=15
echo "Running Experiments" # The duration in seconds that the low priority task should run before the high priority task starts
local -ir offset=5
printf "Running Experiments\n"
# Run each separately # Run each separately
echo "Running fib40" printf "\tfib40: "
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "40\n" "http://${hostname}:10040" > "$results_directory/fib40.csv" || { hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "40\n" "http://$hostname:10040" > "$results_directory/fib40.csv" 2> /dev/null || {
echo "error" printf "[ERR]\n"
error_msg "fib40 failed"
return 1 return 1
} }
get_result_count "$results_directory/fib40.csv" || { get_result_count "$results_directory/fib40.csv" || {
echo "fib40 unexpectedly has zero requests" printf "[ERR]\n"
error_msg "fib40 unexpectedly has zero requests"
return 1 return 1
} }
printf "[OK]\n"
echo "Running fib10" printf "\tfib10: "
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "10\n" "http://${hostname}:10010" > "$results_directory/fib10.csv" || { hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "10\n" "http://$hostname:10010" > "$results_directory/fib10.csv" 2> /dev/null || {
echo "error" printf "[ERR]\n"
error_msg "fib10 failed"
return 1 return 1
} }
get_result_count "$results_directory/fib10.csv" || { get_result_count "$results_directory/fib10.csv" || {
echo "fib10 unexpectedly has zero requests" printf "[ERR]\n"
error_msg "fib10 unexpectedly has zero requests"
return 1 return 1
} }
printf "[OK]\n"
# Run concurrently # Run concurrently
# The lower priority has offsets to ensure it runs the entire time the high priority is trying to run # The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
# This asynchronously trigger jobs and then wait on their pids # This asynchronously trigger jobs and then wait on their pids
local -a pids=() local fib40_con_PID
local fib10_con_PID
echo "Running fib40_con" hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 100 -t 0 -o csv -m GET -d "40\n" "http://${hostname}:10040" > "$results_directory/fib40_con.csv" 2> /dev/null &
hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 100 -t 0 -o csv -m GET -d "40\n" "http://${hostname}:10040" > "$results_directory/fib40_con.csv" & fib40_con_PID="$!"
pids+=($!)
sleep $offset sleep $offset
echo "Running fib10_con" hey -z "${duration_sec}s" -cpus 2 -c 100 -t 0 -o csv -m GET -d "10\n" "http://${hostname}:10010" > "$results_directory/fib10_con.csv" 2> /dev/null &
hey -z "${duration_sec}s" -cpus 2 -c 100 -t 0 -o csv -m GET -d "10\n" "http://${hostname}:10010" > "$results_directory/fib10_con.csv" & fib10_con_PID="$!"
pids+=($!)
for ((i = 0; i < "${#pids[@]}"; i++)); do
wait -n "${pids[@]}" || {
echo "error"
return 1
}
done
get_result_count "$results_directory/fib40_con.csv" || { wait -f "$fib10_con_PID" || {
echo "fib40_con unexpectedly has zero requests" printf "\tfib10_con: [ERR]\n"
error_msg "failed to wait -f ${fib10_con_PID}"
return 1 return 1
} }
get_result_count "$results_directory/fib10_con.csv" || { get_result_count "$results_directory/fib10_con.csv" || {
echo "fib10_con has zero requests. This might be because fib40_con saturated the runtime" printf "\tfib10_con: [ERR]\n"
error_msg "fib10_con has zero requests. This might be because fib40_con saturated the runtime"
return 1
} }
printf "\tfib10_con: [OK]\n"
wait -f "$fib40_con_PID" || {
printf "\tfib40_con: [ERR]\n"
error_msg "failed to wait -f ${fib40_con_PID}"
return 1
}
get_result_count "$results_directory/fib40_con.csv" || {
printf "\tfib40_con: [ERR]\n"
error_msg "fib40_con has zero requests."
return 1
}
printf "\tfib40_con: [OK]\n"
return 0 return 0
} }
# Process the experimental results and generate human-friendly results for success rate, throughput, and latency
process_results() { process_results() {
if (($# != 1)); then if (($# != 1)); then
echo "${FUNCNAME[0]} error: invalid number of arguments \"$1\"" error_msg "invalid number of arguments ($#, expected 1)"
exit return 1
elif ! [[ -d "$1" ]]; then elif ! [[ -d "$1" ]]; then
echo "${FUNCNAME[0]} error: \"$1\" does not exist" error_msg "directory $1 does not exist"
exit return 1
fi fi
local -r results_directory="$1" local -r results_directory="$1"
@ -227,6 +302,7 @@ process_results() {
local -ar payloads=(fib10 fib10_con fib40 fib40_con) local -ar payloads=(fib10 fib10_con fib40 fib40_con)
# The deadlines for each of the workloads # The deadlines for each of the workloads
# TODO: Scrape these from spec.json
local -Ar deadlines_ms=( local -Ar deadlines_ms=(
[fib10]=2 [fib10]=2
[fib40]=3000 [fib40]=3000
@ -257,8 +333,11 @@ process_results() {
oks=$(wc -l < "$results_directory/$payload-response.csv") oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line ((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp # We determine duration by looking at the timestamp of the last complete request
# TODO: Should this instead just use the client-side synthetic duration_sec value?
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8) duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
# Throughput is calculated as the mean number of successful requests per second
throughput=$(echo "$oks/$duration" | bc) throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv" printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
@ -290,122 +369,177 @@ process_results() {
} }
run_server() { run_server() {
if (($# != 2)); then if (($# != 1)); then
echo "${FUNCNAME[0]} error: invalid number of arguments \"$1\"" error_msg "invalid number of arguments \"$1\""
exit return 1
elif ! [[ $1 =~ ^(EDF|FIFO)$ ]]; then elif ! [[ $1 =~ ^(EDF|FIFO)$ ]]; then
echo "${FUNCNAME[0]} error: expected EDF or FIFO was \"$1\"" error_msg "expected EDF or FIFO was \"$1\""
exit return 1
elif ! [[ -d "$2" ]]; then
echo "${FUNCNAME[0]} error: \"$2\" does not exist"
exit
fi fi
local -r scheduler="$1" local -r scheduler="$1"
local -r results_directory="$2"
start_runtime "$scheduler" "$log" || { if [[ "$role" == "both" ]]; then
echo "${FUNCNAME[0]} error" local results_directory="$experiment_directory/res/$timestamp/$scheduler"
elif [[ "$role" == "server" ]]; then
local results_directory="$experiment_directory/res/$timestamp"
else
error_msg "Unexpected $role"
return 1
fi
mkdir -p "$results_directory"
start_runtime "$scheduler" "$results_directory" || {
echo "start_runtime RC: $?"
error_msg "Error calling start_runtime $scheduler $results_directory"
return 1 return 1
} }
return 0
} }
run_client() { run_perf() {
results_directory="$experiment_directory/res/$timestamp" if (($# != 1)); then
mkdir -p "$results_directory" printf "[ERR]\n"
error_msg "invalid number of arguments \"$1\""
return 1
elif ! [[ $1 =~ ^(EDF|FIFO)$ ]]; then
printf "[ERR]\n"
error_msg "expected EDF or FIFO was \"$1\""
return 1
fi
run_samples "$target" || { [[ ! -x perf ]] && {
echo "${FUNCNAME[0]} error" echo "perf is not present"
exit 1 exit 1
} }
sleep 5 SLEDGE_SCHEDULER="$scheduler" perf record -g -s sledgert "$experiment_directory/spec.json"
}
run_experiments "$target" || { # Starts the Sledge Runtime under GDB
echo "${FUNCNAME[0]} error" run_debug() {
exit 1 # shellcheck disable=SC2155
local project_directory=$(cd ../.. && pwd)
if (($# != 1)); then
printf "[ERR]\n"
error_msg "invalid number of arguments \"$1\""
return 1
elif ! [[ $1 =~ ^(EDF|FIFO)$ ]]; then
printf "[ERR]\n"
error_msg "expected EDF or FIFO was \"$1\""
return 1
fi
local -r scheduler="$1"
if [[ "$project_directory" != "/sledge/runtime" ]]; then
printf "It appears that you are not running in the container. Substituting path to match host environment\n"
SLEDGE_SCHEDULER="$scheduler" gdb \
--eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run $experiment_directory/spec.json" \
sledgert
else
SLEDGE_SCHEDULER="$scheduler" gdb \
--eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="run $experiment_directory/spec.json" \
sledgert
fi
return 0
}
run_client() {
if [[ "$role" == "both" ]]; then
local results_directory="$experiment_directory/res/$timestamp/$scheduler"
elif [[ "$role" == "client" ]]; then
local results_directory="$experiment_directory/res/$timestamp"
else
error_msg "${FUNCNAME[0]} Unexpected $role"
return 1
fi
mkdir -p "$results_directory"
run_samples "$target" || {
error_msg "Error calling run_samples $target"
return 1
} }
sleep 1 run_experiments "$results_directory" || {
error_msg "Error calling run_experiments $results_directory"
return 1
}
process_results "$results_directory" || { process_results "$results_directory" || {
echo "${FUNCNAME[0]} error" error_msg "Error calling process_results $results_directory"
exit 1 return 1
} }
echo "[DONE]" echo "[OK]"
exit 0 return 0
} }
run_both() { run_both() {
local -ar schedulers=(EDF FIFO) local -ar schedulers=(EDF FIFO)
for scheduler in "${schedulers[@]}"; do for scheduler in "${schedulers[@]}"; do
results_directory="$experiment_directory/res/$timestamp/$scheduler" printf "Running %s\n" "$scheduler"
mkdir -p "$results_directory"
start_runtime "$scheduler" "$results_directory" || {
echo "${FUNCNAME[0]} Error"
exit 1
}
sleep 1
run_samples || { run_server "$scheduler" || {
echo "${FUNCNAME[0]} Error" error_msg "Error calling run_server"
kill_runtime return 1
exit 1
} }
sleep 1 run_client || {
error_msg "Error calling run_client"
run_experiments "$results_directory" || {
echo "${FUNCNAME[0]} Error"
kill_runtime kill_runtime
exit 1 return 1
} }
sleep 1
kill_runtime || { kill_runtime || {
echo "${FUNCNAME[0]} Error" error_msg "Error calling kill_runtime"
exit 1 return 1
}
process_results "$results_directory" || {
echo "${FUNCNAME[0]} Error"
exit 1
} }
echo "[DONE]"
exit 0
done done
return 0
} }
main() { main() {
initialize_globals initialize_globals
parse_arguments "$@" parse_arguments "$@" || {
exit 1
echo "$timestamp" }
echo "Target: $target"
echo "Policy: $policy"
echo "Role: $role"
case $role in case $role in
both) both)
run_both run_both
;; ;;
server) server)
results_directory="$experiment_directory/res/$timestamp" run_server "$policy"
mkdir -p "$results_directory" ;;
start_runtime "$target" "$results_directory" debug)
exit 0 run_debug "$policy"
;;
perf)
run_perf "$policy"
;;
client)
run_client
;; ;;
client) ;;
*) *)
echo "Invalid state" echo "Invalid state"
exit 1 false
;; ;;
esac esac
exit "$?"
} }
main "$@" main "$@"

@ -1,124 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
schedulers=(EDF FIFO)
for scheduler in ${schedulers[*]}; do
results_directory="$experiment_directory/res/$timestamp/$scheduler"
log=log.txt
mkdir -p "$results_directory"
log_environment >> "$results_directory/$log"
# Start the runtime
if [ "$1" != "-d" ]; then
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >> "$results_directory/$log" 2>> "$results_directory/$log" &
sleep 1
else
echo "Running under gdb"
echo "Running under gdb" >> "$results_directory/$log"
fi
inputs=(40 10)
duration_sec=15
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for input in ${inputs[*]}; do
hey -z ${duration_sec}s -cpus 3 -t 0 -o csv -m GET -d "$input\n" http://localhost:$((10000 + input))
done
echo "[DONE]"
sleep 5
echo "Running Experiments"
# Run each separately
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 > "$results_directory/fib40.csv"
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 > "$results_directory/fib10.csv"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 > "$results_directory/fib40-con.csv" &
sleep $offset
hey -z ${duration_sec}s -cpus 2 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 > "$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset + 15))
# Stop the runtime if not in debug mode
[ "$1" != "-d" ] && kill_runtime
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
deadlines_ms=(2 2 3000 3000)
payloads=(fib10 fib10-con fib40 fib40-con)
for ((i = 0; i < 4; i++)); do
# for payload in ${payloads[*]}; do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f\n", (ok / (NR - 1) * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
NR==p90 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
NR==p99 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
NR==p100 {printf "%1.4f%\n", $0 / '"$deadline"' * 100}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"
done

@ -1,5 +0,0 @@
hey -n 200 -c 200 -t 0 -m GET -d "40\n" http://localhost:10040
hey -n 500 -c 500 -t 0 -m GET -d "10\n" http://localhost:10010

@ -1,8 +0,0 @@
#!/bin/bash
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
# Start the runtime
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json"
Loading…
Cancel
Save