From cf29da6517c51ecd050ecb93ee14135836e740bf Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Sun, 11 Oct 2020 15:27:48 -0400 Subject: [PATCH] chore: update preeption experiment --- runtime/experiments/preemption/run.sh | 163 ++++++++++++++++++ .../{test_mixed_preemption.json => spec.json} | 18 +- 2 files changed, 179 insertions(+), 2 deletions(-) create mode 100755 runtime/experiments/preemption/run.sh rename runtime/experiments/preemption/{test_mixed_preemption.json => spec.json} (79%) diff --git a/runtime/experiments/preemption/run.sh b/runtime/experiments/preemption/run.sh new file mode 100755 index 0000000..92f62c2 --- /dev/null +++ b/runtime/experiments/preemption/run.sh @@ -0,0 +1,163 @@ +#!/bin/bash + +# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate +# Use -d flag if running under gdb + +timestamp=$(date +%s) +experiment_directory=$(pwd) +binary_directory=$(cd ../../bin && pwd) + +schedulers=(EDF FIFO) +for scheduler in ${schedulers[*]}; do + + results_directory="$experiment_directory/res/$timestamp/$scheduler" + log=log.txt + + mkdir -p "$results_directory" + + { + echo "*******" + echo "* Git *" + echo "*******" + git log | head -n 1 | cut -d' ' -f2 + git status + echo "" + + echo "************" + echo "* Makefile *" + echo "************" + cat ../../Makefile + echo "" + + echo "**********" + echo "* Run.sh *" + echo "**********" + cat run.sh + echo "" + + echo "************" + echo "* Hardware *" + echo "************" + lscpu + echo "" + + echo "*************" + echo "* Execution *" + echo "*************" + } >>"$results_directory/$log" + + # Start the runtime + if [ "$1" != "-d" ]; then + SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" & + sleep 1 + else + echo "Running under gdb" + echo "Running under gdb" >>"$results_directory/$log" + fi + + inputs=(40 10) + duration_sec=15 + offset=5 + + # Execute workloads long enough for runtime to learn excepted execution time + echo -n "Running Samples: " + for input in ${inputs[*]}; do + hey -z ${duration_sec}s -c 3 -q 1000 -o csv -m GET -d "$input\n" http://localhost:$((10000 + input)) + done + echo "[DONE]" + sleep 5 + + echo "Running Experiments" + # Run each separately + hey -z ${duration_sec}s -cpus 3 -c 3 -q 1000 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40.csv" + hey -z ${duration_sec}s -cpus 3 -c 3 -q 1000 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10.csv" + + # Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run + hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 3 -q 1000 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40-con.csv" & + sleep $offset + hey -z ${duration_sec}s -cpus 3 -c 3 -q 1000 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10-con.csv" & + sleep $((duration_sec + offset)) + + # Stop the runtime + if [ "$1" != "-d" ]; then + sleep $offset + echo -n "Running Cleanup: " + pkill sledgert >/dev/null 2>/dev/null + pkill wrk >/dev/null 2>/dev/null + echo "[DONE]" + fi + + # # Generate *.csv and *.dat results + echo -n "Parsing Results: " + + printf "Payload,Success_Rate\n" >>"$results_directory/success.csv" + printf "Payload,Throughput\n" >>"$results_directory/throughput.csv" + printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv" + + deadlines_ms=(1.5 1.5 660 660) + payloads=(fib10 fib10-con fib40 fib40-con) + + for ((i = 0; i < 4; i++)); do + # for payload in ${payloads[*]}; do + payload=${payloads[$i]} + deadline=${deadlines_ms[$i]} + + # Get Number of Requests + requests=$(($(wc -l <"$results_directory/$payload.csv") - 1)) + ((requests == 0)) && continue + + # Calculate Success Rate for csv + awk -F, ' + $7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++} + END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)} + ' <"$results_directory/$payload.csv" >>"$results_directory/success.csv" + + # Filter on 200s, convery from s to ms, and sort + awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" | + sort -g >"$results_directory/$payload-response.csv" + + # Get Number of 200s + oks=$(wc -l <"$results_directory/$payload-response.csv") + ((oks == 0)) && continue # If all errors, skip line + + # Get Latest Timestamp + duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8) + throughput=$(echo "$oks/$duration" | bc) + printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv" + + # Generate Latency Data for csv + awk ' + BEGIN { + sum = 0 + p50 = int('"$oks"' * 0.5) + p90 = int('"$oks"' * 0.9) + p99 = int('"$oks"' * 0.99) + p100 = '"$oks"' + printf "'"$payload"'," + } + NR==p50 {printf "%1.4f%,", $0 / '"$deadline"' * 100} + NR==p90 {printf "%1.4f%,", $0 / '"$deadline"' * 100} + NR==p99 {printf "%1.4f%,", $0 / '"$deadline"' * 100} + NR==p100 {printf "%1.4f%\n", $0 / '"$deadline"' * 100} + ' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv" + + # Delete scratch file used for sorting/counting + # rm -rf "$results_directory/$payload-response.csv" + done + + # Transform csvs to dat files for gnuplot + for file in success latency throughput; do + echo -n "#" >"$results_directory/$file.dat" + tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat" + done + + # Generate gnuplots + cd "$results_directory" || exit + gnuplot ../../latency.gnuplot + gnuplot ../../success.gnuplot + gnuplot ../../throughput.gnuplot + cd "$experiment_directory" || exit + + # Cleanup, if requires + echo "[DONE]" +done diff --git a/runtime/experiments/preemption/test_mixed_preemption.json b/runtime/experiments/preemption/spec.json similarity index 79% rename from runtime/experiments/preemption/test_mixed_preemption.json rename to runtime/experiments/preemption/spec.json index cbd3b0c..a802a18 100644 --- a/runtime/experiments/preemption/test_mixed_preemption.json +++ b/runtime/experiments/preemption/spec.json @@ -3,7 +3,7 @@ "name": "fibonacci_10", "path": "fibonacci_wasm.so", "port": 10010, - "relative-deadline-us": 4000, + "relative-deadline-us": 1500, "argsize": 1, "http-req-headers": [], "http-req-content-type": "text/plain", @@ -31,7 +31,7 @@ "name": "fibonacci_25", "path": "fibonacci_wasm.so", "port": 10025, - "relative-deadline-us": 6000, + "relative-deadline-us": 200000, "argsize": 1, "http-req-headers": [], "http-req-content-type": "text/plain", @@ -67,4 +67,18 @@ "http-resp-headers": [], "http-resp-size": 1024, "http-resp-content-type": "text/plain" +}, +{ + "active": "yes", + "name": "fibonacci_40", + "path": "fibonacci_wasm.so", + "port": 10040, + "relative-deadline-us": 660000, + "argsize": 1, + "http-req-headers": [], + "http-req-content-type": "text/plain", + "http-req-size": 1024, + "http-resp-headers": [], + "http-resp-size": 1024, + "http-resp-content-type": "text/plain" }