chore: Update preemption with new keys

main
Sean McBride 4 years ago
parent 10d4cba9b0
commit 3d91c104f3

@ -4,16 +4,16 @@
# This allows debugging outside of the Docker container # This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1 # Also disables pagination and stopping on SIGUSR1
declare project_path="$( experiment_directory=$(pwd)
cd "$(dirname "$1")/../.." project_directory=$(cd ../.. && pwd)
pwd binary_directory=$(cd "$project_directory"/bin && pwd)
)"
echo $project_path export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
cd ../../bin export PATH="$binary_directory:$PATH"
export LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH"
gdb --eval-command="handle SIGUSR1 nostop" \ gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \ --eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_path" \ --eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run ../tests/mixed_preemption/test_mixed_preemption.json" \ --eval-command="run $experiment_directory/spec.json" \
./sledgert sledgert
cd ../../tests

@ -1,4 +1,5 @@
#!/bin/bash #!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate # This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb # Use -d flag if running under gdb
@ -14,41 +15,11 @@ for scheduler in ${schedulers[*]}; do
log=log.txt log=log.txt
mkdir -p "$results_directory" mkdir -p "$results_directory"
log_environment >>"$results_directory/$log"
{
echo "*******"
echo "* Git *"
echo "*******"
git log | head -n 1 | cut -d' ' -f2
git status
echo ""
echo "************"
echo "* Makefile *"
echo "************"
cat ../../Makefile
echo ""
echo "**********"
echo "* Run.sh *"
echo "**********"
cat run.sh
echo ""
echo "************"
echo "* Hardware *"
echo "************"
lscpu
echo ""
echo "*************"
echo "* Execution *"
echo "*************"
} >>"$results_directory/$log"
# Start the runtime # Start the runtime
if [ "$1" != "-d" ]; then if [ "$1" != "-d" ]; then
SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" & SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" &
sleep 1 sleep 1
else else
echo "Running under gdb" echo "Running under gdb"
@ -62,32 +33,26 @@ for scheduler in ${schedulers[*]}; do
# Execute workloads long enough for runtime to learn excepted execution time # Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: " echo -n "Running Samples: "
for input in ${inputs[*]}; do for input in ${inputs[*]}; do
hey -z ${duration_sec}s -c 3 -q 1000 -o csv -m GET -d "$input\n" http://localhost:$((10000 + input)) hey -z ${duration_sec}s -c 3 -o csv -m GET -d "$input\n" http://localhost:$((10000 + input))
done done
echo "[DONE]" echo "[DONE]"
sleep 5 sleep 5
echo "Running Experiments" echo "Running Experiments"
# Run each separately # Run each separately
hey -z ${duration_sec}s -cpus 3 -c 3 -q 1000 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40.csv" hey -z ${duration_sec}s -cpus 3 -c 3 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40.csv"
hey -z ${duration_sec}s -cpus 3 -c 3 -q 1000 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10.csv" hey -z ${duration_sec}s -cpus 3 -c 3 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10.csv"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run # Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 3 -q 1000 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40-con.csv" & hey -z $((duration_sec + 2 * offset))s -cpus 3 -c 3 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40-con.csv" &
sleep $offset sleep $offset
hey -z ${duration_sec}s -cpus 3 -c 3 -q 1000 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10-con.csv" & hey -z ${duration_sec}s -cpus 3 -c 3 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset)) sleep $((duration_sec + offset + 5))
# Stop the runtime # Stop the runtime if not in debug mode
if [ "$1" != "-d" ]; then [ "$1" != "-d" ] && kill_runtime
sleep $offset
echo -n "Running Cleanup: "
pkill sledgert >/dev/null 2>/dev/null
pkill wrk >/dev/null 2>/dev/null
echo "[DONE]"
fi
# # Generate *.csv and *.dat results # Generate *.csv and *.dat results
echo -n "Parsing Results: " echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >>"$results_directory/success.csv" printf "Payload,Success_Rate\n" >>"$results_directory/success.csv"
@ -151,12 +116,8 @@ for scheduler in ${schedulers[*]}; do
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat" tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
done done
# Generate gnuplots # Generate gnuplots. Commented out because we don't have *.gnuplots defined
cd "$results_directory" || exit # generate_gnuplots
gnuplot ../../latency.gnuplot
gnuplot ../../success.gnuplot
gnuplot ../../throughput.gnuplot
cd "$experiment_directory" || exit
# Cleanup, if requires # Cleanup, if requires
echo "[DONE]" echo "[DONE]"

@ -3,7 +3,8 @@
"name": "fibonacci_10", "name": "fibonacci_10",
"path": "fibonacci_wasm.so", "path": "fibonacci_wasm.so",
"port": 10010, "port": 10010,
"relative-deadline-us": 1500, "expected-execution-us": 600,
"relative-deadline-us": 2000,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
"http-req-content-type": "text/plain", "http-req-content-type": "text/plain",
@ -17,6 +18,7 @@
"name": "fibonacci_20", "name": "fibonacci_20",
"path": "fibonacci_wasm.so", "path": "fibonacci_wasm.so",
"port": 10020, "port": 10020,
"expected-execution-us": 900,
"relative-deadline-us": 5000, "relative-deadline-us": 5000,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
@ -31,6 +33,7 @@
"name": "fibonacci_25", "name": "fibonacci_25",
"path": "fibonacci_wasm.so", "path": "fibonacci_wasm.so",
"port": 10025, "port": 10025,
"expected-execution-us": 90000,
"relative-deadline-us": 200000, "relative-deadline-us": 200000,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
@ -45,7 +48,8 @@
"name": "fibonacci_30", "name": "fibonacci_30",
"path": "fibonacci_wasm.so", "path": "fibonacci_wasm.so",
"port": 10030, "port": 10030,
"relative-deadline-us": 8000, "expected-execution-us": 9000,
"relative-deadline-us": 80000,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
"http-req-content-type": "text/plain", "http-req-content-type": "text/plain",
@ -59,6 +63,7 @@
"name": "fibonacci_35", "name": "fibonacci_35",
"path": "fibonacci_wasm.so", "path": "fibonacci_wasm.so",
"port": 10035, "port": 10035,
"expected-execution-us": 9000,
"relative-deadline-us": 53000, "relative-deadline-us": 53000,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
@ -73,7 +78,8 @@
"name": "fibonacci_40", "name": "fibonacci_40",
"path": "fibonacci_wasm.so", "path": "fibonacci_wasm.so",
"port": 10040, "port": 10040,
"relative-deadline-us": 660000, "expected-execution-us": 550000,
"relative-deadline-us": 1000000,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
"http-req-content-type": "text/plain", "http-req-content-type": "text/plain",

Loading…
Cancel
Save