From a5670610476302607b81257a03ef3cb21964cdbe Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Sat, 12 Dec 2020 12:28:44 -0500 Subject: [PATCH] chore: driver script fiddling --- runtime/experiments/deadline/client.sh | 24 ++-- runtime/experiments/deadline/client2.sh | 110 +++++++++++++++++ runtime/experiments/deadline/client3.sh | 108 +++++++++++++++++ runtime/experiments/deadline/fix_calcs.sh | 77 ++++++++++++ runtime/experiments/deadline/fix_calcs2.sh | 111 ++++++++++++++++++ runtime/experiments/preemption/client.sh | 21 ++-- runtime/experiments/preemption/fix_results.sh | 81 +++++++++++++ 7 files changed, 513 insertions(+), 19 deletions(-) create mode 100755 runtime/experiments/deadline/client2.sh create mode 100755 runtime/experiments/deadline/client3.sh create mode 100755 runtime/experiments/deadline/fix_calcs.sh create mode 100755 runtime/experiments/deadline/fix_calcs2.sh create mode 100755 runtime/experiments/preemption/fix_results.sh diff --git a/runtime/experiments/deadline/client.sh b/runtime/experiments/deadline/client.sh index 37e267a..2449ce3 100755 --- a/runtime/experiments/deadline/client.sh +++ b/runtime/experiments/deadline/client.sh @@ -4,7 +4,6 @@ source ../common.sh # This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate # Use -d flag if running under gdb -# host=192.168.1.13 host=localhost timestamp=$(date +%s) experiment_directory=$(pwd) @@ -17,13 +16,13 @@ mkdir -p "$results_directory" log_environment >>"$results_directory/$log" inputs=(40 10) -duration_sec=15 +duration_sec=60 offset=5 # Execute workloads long enough for runtime to learn excepted execution time echo -n "Running Samples: " for input in ${inputs[*]}; do - hey -z ${duration_sec}s -cpus 3 -t 0 -o csv -m GET -d "$input\n" http://${host}:$((10000 + input)) + hey -n 16 -c 4 -t 0 -o csv -m GET -d "$input\n" http://${host}:$((10000 + input)) done echo "[DONE]" sleep 5 @@ -31,10 +30,10 @@ sleep 5 echo "Running Experiments" # Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run -hey -z $((duration_sec + 2 * offset))s -cpus 3 -c 200 -t 0 -o csv -m GET -d "40\n" http://${host}:10040 >"$results_directory/fib40-con.csv" & -sleep $offset -hey -z ${duration_sec}s -cpus 3 -c 200 -t 0 -o csv -m GET -d "10\n" http://${host}:10010 >"$results_directory/fib10-con.csv" & -sleep $((duration_sec + offset + 15)) +hey -n 1000 -c 1000 -cpus 6 -t 0 -o csv -m GET -d "40\n" http://${host}:10040 >"$results_directory/fib40-con.csv" +# sleep $offset +# hey -n 25000 -c 1000000 -t 0 -o csv -m GET -d "10\n" http://${host}:10010 >"$results_directory/fib10-con.csv" & +# sleep $((duration_sec + offset + 45)) # Generate *.csv and *.dat results echo -n "Parsing Results: " @@ -44,11 +43,13 @@ printf "Payload,Throughput\n" >>"$results_directory/throughput.csv" printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv" deadlines_ms=(20 20000) +# durations_s=(60 70) payloads=(fib10-con fib40-con) -for ((i = 0; i < 2; i++)); do +for ((i = 1; i < 2; i++)); do payload=${payloads[$i]} deadline=${deadlines_ms[$i]} + # duration=${durations_s[$i]} # Get Number of Requests requests=$(($(wc -l <"$results_directory/$payload.csv") - 1)) @@ -69,9 +70,8 @@ for ((i = 0; i < 2; i++)); do ((oks == 0)) && continue # If all errors, skip line # Get Latest Timestamp - duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8) - throughput=$(echo "$oks/$duration" | bc) - printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv" + # throughput=$(echo "$oks/$duration" | bc) + # printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv" # Generate Latency Data for csv awk ' @@ -94,7 +94,7 @@ for ((i = 0; i < 2; i++)); do done # Transform csvs to dat files for gnuplot -for file in success latency throughput; do +for file in success latency; do echo -n "#" >"$results_directory/$file.dat" tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat" done diff --git a/runtime/experiments/deadline/client2.sh b/runtime/experiments/deadline/client2.sh new file mode 100755 index 0000000..e5909ea --- /dev/null +++ b/runtime/experiments/deadline/client2.sh @@ -0,0 +1,110 @@ +#!/bin/bash +source ../common.sh + +# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate +# Use -d flag if running under gdb + +host=192.168.1.13 +# host=localhost +timestamp=$(date +%s) +experiment_directory=$(pwd) +binary_directory=$(cd ../../bin && pwd) + +results_directory="$experiment_directory/res/$timestamp" +log=log.txt + +mkdir -p "$results_directory" +log_environment >>"$results_directory/$log" + +inputs=(40 10) +duration_sec=30 +offset=5 + +# Execute workloads long enough for runtime to learn excepted execution time +echo -n "Running Samples: " +for input in ${inputs[*]}; do + hey -n 16 -c 4 -t 0 -o csv -m GET -d "$input\n" http://${host}:$((10000 + input)) +done +echo "[DONE]" +sleep 5 + +echo "Running Experiments" + +# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run +hey -z $((duration_sec + 2 * offset))s -cpus 3 -c 200 -t 0 -o csv -m GET -d "40\n" http://${host}:10040 >"$results_directory/fib40-con.csv" & +sleep $offset +hey -z ${duration_sec}s -cpus 3 -c 200 -t 0 -o csv -m GET -d "10\n" http://${host}:10010 >"$results_directory/fib10-con.csv" & +sleep $((duration_sec + offset + 15)) +sleep 30 + +# Generate *.csv and *.dat results +echo -n "Parsing Results: " + +printf "Payload,Success_Rate\n" >>"$results_directory/success.csv" +printf "Payload,Throughput\n" >>"$results_directory/throughput.csv" +printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv" + +deadlines_ms=(20 20000) +payloads=(fib10-con fib40-con) +durations_s=(30 40) + +for ((i = 0; i < 2; i++)); do + payload=${payloads[$i]} + deadline=${deadlines_ms[$i]} + duration=${durations_s[$i]} + + # Get Number of Requests + requests=$(($(wc -l <"$results_directory/$payload.csv") - 1)) + ((requests == 0)) && continue + + # Calculate Success Rate for csv + awk -F, ' + $7 == 200 {denom++} + $7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++} + END{printf "'"$payload"',%3.5f%\n", (ok / denom * 100)} + ' <"$results_directory/$payload.csv" >>"$results_directory/success.csv" + + # Filter on 200s, convery from s to ms, and sort + awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" | + sort -g >"$results_directory/$payload-response.csv" + + # Get Number of 200s + oks=$(wc -l <"$results_directory/$payload-response.csv") + ((oks == 0)) && continue # If all errors, skip line + + # Get Latest Timestamp + duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8) + throughput=$(echo "$oks/$duration" | bc) + printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv" + + # Generate Latency Data for csv + awk ' + BEGIN { + sum = 0 + p50 = int('"$oks"' * 0.5) + p90 = int('"$oks"' * 0.9) + p99 = int('"$oks"' * 0.99) + p100 = '"$oks"' + printf "'"$payload"'," + } + NR==p50 {printf "%1.4f,", $0} + NR==p90 {printf "%1.4f,", $0} + NR==p99 {printf "%1.4f,", $0} + NR==p100 {printf "%1.4f\n", $0} + ' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv" + + # Delete scratch file used for sorting/counting + # rm -rf "$results_directory/$payload-response.csv" +done + +# Transform csvs to dat files for gnuplot +for file in success latency throughput; do + echo -n "#" >"$results_directory/$file.dat" + tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat" +done + +# Generate gnuplots. Commented out because we don't have *.gnuplots defined +# generate_gnuplots + +# Cleanup, if requires +echo "[DONE]" diff --git a/runtime/experiments/deadline/client3.sh b/runtime/experiments/deadline/client3.sh new file mode 100755 index 0000000..391c53f --- /dev/null +++ b/runtime/experiments/deadline/client3.sh @@ -0,0 +1,108 @@ +#!/bin/bash +source ../common.sh + +# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate +# Use -d flag if running under gdb + +host=192.168.1.13 +# host=localhost +timestamp=$(date +%s) +experiment_directory=$(pwd) +binary_directory=$(cd ../../bin && pwd) + +results_directory="$experiment_directory/res/$timestamp" +log=log.txt + +mkdir -p "$results_directory" +log_environment >>"$results_directory/$log" + +inputs=(10) +duration_sec=30 +offset=5 + +# Execute workloads long enough for runtime to learn excepted execution time +echo -n "Running Samples: " +hey -n 16 -c 4 -t 0 -o csv -m GET -d "10\n" http://${host}:10010 +echo "[DONE]" +sleep 5 + +echo "Running Experiments" + +# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run +# hey -z $((duration_sec + 2 * offset))s -cpus 3 -c 200 -t 0 -o csv -m GET -d "40\n" http://${host}:10040 >"$results_directory/fib40-con.csv" & +# sleep $offset +hey -z ${duration_sec}s -cpus 6 -c 400 -t 0 -o csv -m GET -d "10\n" http://${host}:10010 >"$results_directory/fib10-con.csv" +# sleep $((duration_sec + offset + 15)) +# sleep 30 + +# Generate *.csv and *.dat results +echo -n "Parsing Results: " + +printf "Payload,Success_Rate\n" >>"$results_directory/success.csv" +printf "Payload,Throughput\n" >>"$results_directory/throughput.csv" +printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv" + +deadlines_ms=(20 20000) +payloads=(fib10-con fib40-con) +durations_s=(30 40) + +for ((i = 0; i < 1; i++)); do + payload=${payloads[$i]} + deadline=${deadlines_ms[$i]} + duration=${durations_s[$i]} + + # Get Number of Requests + requests=$(($(wc -l <"$results_directory/$payload.csv") - 1)) + ((requests == 0)) && continue + + # Calculate Success Rate for csv + awk -F, ' + $7 == 200 {denom++} + $7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++} + END{printf "'"$payload"',%3.5f%\n", (ok / denom * 100)} + ' <"$results_directory/$payload.csv" >>"$results_directory/success.csv" + + # Filter on 200s, convery from s to ms, and sort + awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" | + sort -g >"$results_directory/$payload-response.csv" + + # Get Number of 200s + oks=$(wc -l <"$results_directory/$payload-response.csv") + ((oks == 0)) && continue # If all errors, skip line + + # Get Latest Timestamp + duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8) + throughput=$(echo "$oks/$duration" | bc) + printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv" + + # Generate Latency Data for csv + awk ' + BEGIN { + sum = 0 + p50 = int('"$oks"' * 0.5) + p90 = int('"$oks"' * 0.9) + p99 = int('"$oks"' * 0.99) + p100 = '"$oks"' + printf "'"$payload"'," + } + NR==p50 {printf "%1.4f,", $0} + NR==p90 {printf "%1.4f,", $0} + NR==p99 {printf "%1.4f,", $0} + NR==p100 {printf "%1.4f\n", $0} + ' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv" + + # Delete scratch file used for sorting/counting + # rm -rf "$results_directory/$payload-response.csv" +done + +# Transform csvs to dat files for gnuplot +for file in success latency throughput; do + echo -n "#" >"$results_directory/$file.dat" + tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat" +done + +# Generate gnuplots. Commented out because we don't have *.gnuplots defined +# generate_gnuplots + +# Cleanup, if requires +echo "[DONE]" diff --git a/runtime/experiments/deadline/fix_calcs.sh b/runtime/experiments/deadline/fix_calcs.sh new file mode 100755 index 0000000..cf13246 --- /dev/null +++ b/runtime/experiments/deadline/fix_calcs.sh @@ -0,0 +1,77 @@ +#!/bin/bash +source ../common.sh + +# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate +# Use -d flag if running under gdb + +experiment_directory=$(pwd) +results_directory="$experiment_directory/res/1606615320-fifo-adm" + +# Generate *.csv and *.dat results +echo -n "Parsing Results: " + +printf "Payload,Success_Rate\n" >>"$results_directory/success.csv" +printf "Payload,Throughput\n" >>"$results_directory/throughput.csv" +printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv" + +deadlines_ms=(20 20000) +payloads=(fib10-con fib40-con) + +for ((i = 0; i < 2; i++)); do + payload=${payloads[$i]} + deadline=${deadlines_ms[$i]} + + # Get Number of Requests + requests=$(($(wc -l <"$results_directory/$payload.csv") - 1)) + ((requests == 0)) && continue + + # Calculate Success Rate for csv + awk -F, ' + $7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++} + END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)} + ' <"$results_directory/$payload.csv" >>"$results_directory/success.csv" + + # Filter on 200s, convery from s to ms, and sort + awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" | + sort -g >"$results_directory/$payload-response.csv" + + # Get Number of 200s + oks=$(wc -l <"$results_directory/$payload-response.csv") + ((oks == 0)) && continue # If all errors, skip line + + # Get Latest Timestamp + duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8) + throughput=$(echo "$oks/$duration" | bc) + printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv" + + # Generate Latency Data for csv + awk ' + BEGIN { + sum = 0 + p50 = int('"$oks"' * 0.5) + p90 = int('"$oks"' * 0.9) + p99 = int('"$oks"' * 0.99) + p100 = '"$oks"' + printf "'"$payload"'," + } + NR==p50 {printf "%1.4f,", $0} + NR==p90 {printf "%1.4f,", $0} + NR==p99 {printf "%1.4f,", $0} + NR==p100 {printf "%1.4f\n", $0} + ' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv" + + # Delete scratch file used for sorting/counting + # rm -rf "$results_directory/$payload-response.csv" +done + +# Transform csvs to dat files for gnuplot +for file in success latency throughput; do + echo -n "#" >"$results_directory/$file.dat" + tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat" +done + +# Generate gnuplots. Commented out because we don't have *.gnuplots defined +# generate_gnuplots + +# Cleanup, if requires +echo "[DONE]" diff --git a/runtime/experiments/deadline/fix_calcs2.sh b/runtime/experiments/deadline/fix_calcs2.sh new file mode 100755 index 0000000..86f0f13 --- /dev/null +++ b/runtime/experiments/deadline/fix_calcs2.sh @@ -0,0 +1,111 @@ +#!/bin/bash +source ../common.sh + +# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate +# Use -d flag if running under gdb + +host=192.168.1.13 +# host=localhost +# timestamp=$(date +%s) +timestamp=1606697099 +experiment_directory=$(pwd) +binary_directory=$(cd ../../bin && pwd) + +results_directory="$experiment_directory/res/$timestamp" +log=log.txt + +mkdir -p "$results_directory" +log_environment >>"$results_directory/$log" + +inputs=(40 10) +duration_sec=60 +offset=5 + +# Execute workloads long enough for runtime to learn excepted execution time +# echo -n "Running Samples: " +# for input in ${inputs[*]}; do +# hey -n 16 -c 4 -t 0 -o csv -m GET -d "$input\n" http://${host}:$((10000 + input)) +# done +# echo "[DONE]" +# sleep 5 + +# echo "Running Experiments" + +# # Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run +# hey -z $((duration_sec + 2 * offset))s -cpus 3 -c 200 -t 0 -o csv -m GET -d "40\n" http://${host}:10040 >"$results_directory/fib40-con.csv" & +# sleep $offset +# hey -z ${duration_sec}s -cpus 3 -c 200 -t 0 -o csv -m GET -d "10\n" http://${host}:10010 >"$results_directory/fib10-con.csv" & +# sleep $((duration_sec + offset + 15)) +# sleep 30 + +# Generate *.csv and *.dat results +echo -n "Parsing Results: " + +printf "Payload,Success_Rate\n" >>"$results_directory/success.csv" +printf "Payload,Throughput\n" >>"$results_directory/throughput.csv" +printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv" + +deadlines_ms=(20 20000) +payloads=(fib10-con fib40-con) +durations_s=(60 70) + +for ((i = 0; i < 2; i++)); do + payload=${payloads[$i]} + deadline=${deadlines_ms[$i]} + duration=${durations_s[$i]} + + # Get Number of Requests + requests=$(($(wc -l <"$results_directory/$payload.csv") - 1)) + ((requests == 0)) && continue + + # Calculate Success Rate for csv + awk -F, ' + $7 == 200 {denom++} + $7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++} + END{printf "'"$payload"',%3.5f%\n", (ok / denom * 100)} + ' <"$results_directory/$payload.csv" >>"$results_directory/success.csv" + + # Filter on 200s, convery from s to ms, and sort + awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" | + sort -g >"$results_directory/$payload-response.csv" + + # Get Number of 200s + oks=$(wc -l <"$results_directory/$payload-response.csv") + ((oks == 0)) && continue # If all errors, skip line + + # Get Latest Timestamp + # duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8) + throughput=$(echo "$oks/$duration" | bc) + printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv" + + # Generate Latency Data for csv + awk ' + BEGIN { + sum = 0 + p50 = int('"$oks"' * 0.5) + p90 = int('"$oks"' * 0.9) + p99 = int('"$oks"' * 0.99) + p100 = '"$oks"' + printf "'"$payload"'," + } + NR==p50 {printf "%1.4f,", $0} + NR==p90 {printf "%1.4f,", $0} + NR==p99 {printf "%1.4f,", $0} + NR==p100 {printf "%1.4f\n", $0} + ' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv" + + # Delete scratch file used for sorting/counting + # rm -rf "$results_directory/$payload-response.csv" +done + +# Transform csvs to dat files for gnuplot +for file in success latency throughput; do + echo -n "#" >"$results_directory/$file.dat" + tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat" +done + +# Generate gnuplots. Commented out because we don't have *.gnuplots defined +# generate_gnuplots + +# Cleanup, if requires +echo "[DONE]" diff --git a/runtime/experiments/preemption/client.sh b/runtime/experiments/preemption/client.sh index d8cf193..d6ffff6 100755 --- a/runtime/experiments/preemption/client.sh +++ b/runtime/experiments/preemption/client.sh @@ -9,34 +9,38 @@ experiment_directory=$(pwd) host=192.168.1.13 results_directory="$experiment_directory/res/$timestamp" -log=log.txt mkdir -p "$results_directory" -log_environment >>"$results_directory/$log" # Start the runtime inputs=(40 10) -duration_sec=15 +duration_sec=30 offset=5 # Execute workloads long enough for runtime to learn excepted execution time echo -n "Running Samples: " for input in ${inputs[*]}; do - hey -z ${duration_sec}s -cpus 6 -t 0 -o csv -m GET -d "$input\n" http://"$host":$((10000 + input)) + hey -n 45 -c 4 -t 0 -o csv -m GET -d "$input\n" http://"$host":$((10000 + input)) done echo "[DONE]" -sleep 5 +sleep 30 echo "Running Experiments" # Run each separately -hey -z ${duration_sec}s -cpus 6 -c 100 -t 0 -o csv -m GET -d "40\n" "http://$host:10040" >"$results_directory/fib40.csv" hey -z ${duration_sec}s -cpus 6 -c 100 -t 0 -o csv -m GET -d "10\n" "http://$host:10010" >"$results_directory/fib10.csv" +echo "fib(10) Complete" +sleep 60 + +hey -z ${duration_sec}s -cpus 6 -c 100 -t 0 -o csv -m GET -d "40\n" "http://$host:10040" >"$results_directory/fib40.csv" +echo "fib(40) Complete" +sleep 120 # Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run hey -z $((duration_sec + 2 * offset))s -cpus 3 -c 100 -t 0 -o csv -m GET -d "40\n" "http://$host:10040" >"$results_directory/fib40-con.csv" & sleep $offset hey -z ${duration_sec}s -cpus 3 -c 100 -t 0 -o csv -m GET -d "10\n" "http://$host:10010" >"$results_directory/fib10-con.csv" & sleep $((duration_sec + offset + 15)) +echo "fib(10) & fib(40) Complete" # Generate *.csv and *.dat results echo -n "Parsing Results: " @@ -45,6 +49,7 @@ printf "Payload,Success_Rate\n" >>"$results_directory/success.csv" printf "Payload,Throughput\n" >>"$results_directory/throughput.csv" printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv" +durations_s=(15 15 15 25) payloads=(fib10 fib10-con fib40 fib40-con) for payload in ${payloads[*]}; do @@ -52,6 +57,8 @@ for payload in ${payloads[*]}; do requests=$(($(wc -l <"$results_directory/$payload.csv") - 1)) ((requests == 0)) && continue + duration=${durations_s[$i]} + # Calculate Success Rate for csv awk -F, ' $7 == 200 {ok++} @@ -67,7 +74,7 @@ for payload in ${payloads[*]}; do ((oks == 0)) && continue # If all errors, skip line # Get Latest Timestamp - duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8) + # duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8) throughput=$(echo "$oks/$duration" | bc) printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv" diff --git a/runtime/experiments/preemption/fix_results.sh b/runtime/experiments/preemption/fix_results.sh new file mode 100755 index 0000000..83ef32d --- /dev/null +++ b/runtime/experiments/preemption/fix_results.sh @@ -0,0 +1,81 @@ +#!/bin/bash +source ../common.sh + +# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate +# Modified to target a remote host + +timestamp=1606608313-FIFO +experiment_directory=$(pwd) +results_directory="$experiment_directory/res/$timestamp" + +# Generate *.csv and *.dat results +echo -n "Parsing Results: " + +printf "Payload,Success_Rate\n" >>"$results_directory/success.csv" +printf "Payload,Throughput\n" >>"$results_directory/throughput.csv" +printf "Payload,p50,p90,p99,p998,p999,p100\n" >>"$results_directory/latency.csv" + +durations_s=(15 15 15 25) +payloads=(fib10 fib10-con fib40 fib40-con) + +for payload in ${payloads[*]}; do + # Get Number of Requests + requests=$(($(wc -l <"$results_directory/$payload.csv") - 1)) + ((requests == 0)) && continue + + duration=${durations_s[$i]} + + # Calculate Success Rate for csv + awk -F, ' + $7 == 200 {ok++} + END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)} +' <"$results_directory/$payload.csv" >>"$results_directory/success.csv" + + # Filter on 200s, convery from s to ms, and sort + awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" | + sort -g >"$results_directory/$payload-response.csv" + + # Get Number of 200s + oks=$(wc -l <"$results_directory/$payload-response.csv") + ((oks == 0)) && continue # If all errors, skip line + + # Get Latest Timestamp + # duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8) + throughput=$(echo "$oks/$duration" | bc) + printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv" + + # Generate Latency Data for csv + awk ' + BEGIN { + sum = 0 + p50 = int('"$oks"' * 0.5) + p90 = int('"$oks"' * 0.9) + p99 = int('"$oks"' * 0.99) + p998 = int('"$oks"' * 0.998) + p999 = int('"$oks"' * 0.999) + p100 = '"$oks"' + printf "'"$payload"'," + } + NR==p50 {printf "%1.4f,", $0} + NR==p90 {printf "%1.4f,", $0} + NR==p99 {printf "%1.4f,", $0} + NR==p998 {printf "%1.4f,", $0} + NR==p999 {printf "%1.4f,", $0} + NR==p100 {printf "%1.4f\n", $0} +' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv" + + # Delete scratch file used for sorting/counting + # rm -rf "$results_directory/$payload-response.csv" +done + +# Transform csvs to dat files for gnuplot +for file in success latency throughput; do + echo -n "#" >"$results_directory/$file.dat" + tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat" +done + +# Generate gnuplots. Commented out because we don't have *.gnuplots defined +# generate_gnuplots + +# Cleanup, if required +echo "[DONE]"