commit
1b09846e70
@ -0,0 +1 @@
|
||||
res
|
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
# Executes the runtime in GDB
|
||||
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
|
||||
# This allows debugging outside of the Docker container
|
||||
# Also disables pagination and stopping on SIGUSR1
|
||||
|
||||
experiment_directory=$(pwd)
|
||||
project_directory=$(cd ../.. && pwd)
|
||||
binary_directory=$(cd "$project_directory"/bin && pwd)
|
||||
|
||||
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
|
||||
export PATH="$binary_directory:$PATH"
|
||||
|
||||
gdb --eval-command="handle SIGUSR1 nostop" \
|
||||
--eval-command="handle SIGPIPE nostop" \
|
||||
--eval-command="set pagination off" \
|
||||
--eval-command="set substitute-path /sledge/runtime $project_directory" \
|
||||
--eval-command="run $experiment_directory/spec.json" \
|
||||
sledgert
|
After Width: | Height: | Size: 1.3 KiB |
File diff suppressed because one or more lines are too long
@ -0,0 +1,2 @@
|
||||
pnm file was generated as follows
|
||||
`pngtopnm 5x8.png >5x8.pnm`
|
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
# Executes the runtime in GDB
|
||||
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
|
||||
# This allows debugging outside of the Docker container
|
||||
# Also disables pagination and stopping on SIGUSR1
|
||||
|
||||
experiment_directory=$(pwd)
|
||||
project_directory=$(cd ../../.. && pwd)
|
||||
binary_directory=$(cd "$project_directory"/bin && pwd)
|
||||
|
||||
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
|
||||
export PATH="$binary_directory:$PATH"
|
||||
|
||||
gdb --eval-command="handle SIGUSR1 nostop" \
|
||||
--eval-command="handle SIGPIPE nostop" \
|
||||
--eval-command="set pagination off" \
|
||||
--eval-command="set substitute-path /sledge/runtime $project_directory" \
|
||||
--eval-command="run $experiment_directory/spec.json" \
|
||||
sledgert
|
@ -0,0 +1,2 @@
|
||||
ABCD abcd EFGHI efghi JKLMN jklmn OPQR opqr
|
||||
STUVW stuvw XYZ xyz 12345 67890 !?'&/\=+-*#(){}[]<>
|
@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
# Executes the runtime in GDB
|
||||
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
|
||||
# This allows debugging outside of the Docker container
|
||||
# Also disables pagination and stopping on SIGUSR1
|
||||
|
||||
experiment_directory=$(pwd)
|
||||
project_directory=$(cd ../../.. && pwd)
|
||||
binary_directory=$(cd "$project_directory"/bin && pwd)
|
||||
|
||||
if [ "$1" != "-d" ]; then
|
||||
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" &
|
||||
sleep 1
|
||||
else
|
||||
echo "Running under gdb"
|
||||
fi
|
||||
|
||||
expected_result="$(cat ./expected_result.txt)"
|
||||
|
||||
success_count=0
|
||||
total_count=50
|
||||
|
||||
for ((i = 0; i < total_count; i++)); do
|
||||
echo "$i"
|
||||
result=$(curl -H 'Expect:' -H "Content-Type: text/plain" --data-binary "@5x8.pnm" localhost:10000 2>/dev/null)
|
||||
# echo "$result"
|
||||
if [[ "$result" == "$expected_result" ]]; then
|
||||
success_count=$((success_count + 1))
|
||||
else
|
||||
echo "FAIL"
|
||||
echo "Expected:"
|
||||
echo "$expected_result"
|
||||
echo "==============================================="
|
||||
echo "Was:"
|
||||
echo "$result"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "$success_count / $total_count"
|
||||
|
||||
if [ "$1" != "-d" ]; then
|
||||
sleep 5
|
||||
echo -n "Running Cleanup: "
|
||||
pkill sledgert >/dev/null 2>/dev/null
|
||||
pkill wrk >/dev/null 2>/dev/null
|
||||
echo "[DONE]"
|
||||
fi
|
@ -0,0 +1,15 @@
|
||||
{
|
||||
"active": "yes",
|
||||
"name": "gocr",
|
||||
"path": "gocr.aso",
|
||||
"port": 10000,
|
||||
"relative-deadline-us": 500000000,
|
||||
"expected-execution-us": 5000000,
|
||||
"argsize": 1,
|
||||
"http-req-headers": [],
|
||||
"http-req-content-type": "text/plain",
|
||||
"http-req-size": 1024000,
|
||||
"http-resp-headers": [],
|
||||
"http-resp-size": 1024000,
|
||||
"http-resp-content-type": "text/plain"
|
||||
}
|
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
# Executes the runtime in GDB
|
||||
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
|
||||
# This allows debugging outside of the Docker container
|
||||
# Also disables pagination and stopping on SIGUSR1
|
||||
|
||||
experiment_directory=$(pwd)
|
||||
project_directory=$(cd ../../.. && pwd)
|
||||
binary_directory=$(cd "$project_directory"/bin && pwd)
|
||||
|
||||
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
|
||||
export PATH="$binary_directory:$PATH"
|
||||
|
||||
gdb --eval-command="handle SIGUSR1 nostop" \
|
||||
--eval-command="handle SIGPIPE nostop" \
|
||||
--eval-command="set pagination off" \
|
||||
--eval-command="set substitute-path /sledge/runtime $project_directory" \
|
||||
--eval-command="run $experiment_directory/spec.json" \
|
||||
sledgert
|
@ -0,0 +1,3 @@
|
||||
This is q handw__tten
|
||||
examp(e for _0CR,
|
||||
Write as good as yo_ c4n.
|
After Width: | Height: | Size: 42 KiB |
File diff suppressed because one or more lines are too long
@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
# Executes the runtime in GDB
|
||||
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
|
||||
# This allows debugging outside of the Docker container
|
||||
# Also disables pagination and stopping on SIGUSR1
|
||||
|
||||
experiment_directory=$(pwd)
|
||||
project_directory=$(cd ../../.. && pwd)
|
||||
binary_directory=$(cd "$project_directory"/bin && pwd)
|
||||
|
||||
if [ "$1" != "-d" ]; then
|
||||
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" &
|
||||
sleep 1
|
||||
else
|
||||
echo "Running under gdb"
|
||||
fi
|
||||
|
||||
expected_result="$(cat ./expected_result.txt)"
|
||||
|
||||
success_count=0
|
||||
total_count=50
|
||||
|
||||
for ((i = 0; i < total_count; i++)); do
|
||||
echo "$i"
|
||||
result=$(curl -H 'Expect:' -H "Content-Type: text/plain" --data-binary "@handwrt1.pnm" localhost:10000 2>/dev/null)
|
||||
# echo "$result"
|
||||
if [[ "$result" == "$expected_result" ]]; then
|
||||
success_count=$((success_count + 1))
|
||||
else
|
||||
echo "FAIL"
|
||||
echo "Expected:"
|
||||
echo "$expected_result"
|
||||
echo "==============================================="
|
||||
echo "Was:"
|
||||
echo "$result"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "$success_count / $total_count"
|
||||
|
||||
if [ "$1" != "-d" ]; then
|
||||
sleep 5
|
||||
echo -n "Running Cleanup: "
|
||||
pkill sledgert >/dev/null 2>/dev/null
|
||||
pkill wrk >/dev/null 2>/dev/null
|
||||
echo "[DONE]"
|
||||
fi
|
@ -0,0 +1,14 @@
|
||||
{
|
||||
"active": "yes",
|
||||
"name": "gocr",
|
||||
"path": "gocr.aso",
|
||||
"port": 10000,
|
||||
"relative-deadline-us": 50000000000,
|
||||
"argsize": 1,
|
||||
"http-req-headers": [],
|
||||
"http-req-content-type": "text/plain",
|
||||
"http-req-size": 1024000,
|
||||
"http-resp-headers": [],
|
||||
"http-resp-size": 1024000,
|
||||
"http-resp-content-type": "text/plain"
|
||||
}
|
@ -0,0 +1,5 @@
|
||||
# A Page from Dr. Jekyll and Mister Hyde
|
||||
|
||||
This is a larger OCR example. The \*.pnm file is 5mb or so.
|
||||
|
||||
In the future, this OCR example might be a good use case for a "step-wise" function, as the file is substantially smaller before conversion to the \*.pnm format.
|
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
# Executes the runtime in GDB
|
||||
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
|
||||
# This allows debugging outside of the Docker container
|
||||
# Also disables pagination and stopping on SIGUSR1
|
||||
|
||||
experiment_directory=$(pwd)
|
||||
project_directory=$(cd ../../.. && pwd)
|
||||
binary_directory=$(cd "$project_directory"/bin && pwd)
|
||||
|
||||
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
|
||||
export PATH="$binary_directory:$PATH"
|
||||
|
||||
gdb --eval-command="handle SIGUSR1 nostop" \
|
||||
--eval-command="handle SIGPIPE nostop" \
|
||||
--eval-command="set pagination off" \
|
||||
--eval-command="set substitute-path /sledge/runtime $project_directory" \
|
||||
--eval-command="run $experiment_directory/spec.json" \
|
||||
sledgert
|
@ -0,0 +1,33 @@
|
||||
at his to4ch of a certain icy pang along my blood. "Come, sir;' said I.
|
||||
"Yo4 forget that I have not yet the pleas4re ofyo4r acq4aintance. Be
|
||||
seated, if yo4 please: And I showed him an example, and sat down
|
||||
myself in my c4stomary seat and with as fair an imitation of my or-
|
||||
dinary manner to a patient, as the lateness of the ho4r, the nat4re of
|
||||
my preocc4pations, and the horror I had of my visitor, wo4ld s4_er
|
||||
me to m4ster.
|
||||
"I beg yo4r pardon, Dr. Lanyon;' he replied civilly eno4gh. "What
|
||||
yo4 say is very well fo4nded; and my impatience has shown its heels
|
||||
to my politeness. I come here at the instance of yo4r colleag4e, Dr.
|
||||
Henry _ekyll, on a piece of b4siness of some moment; and I 4nder-
|
||||
stood.. ." He pa4sed and p4t his hand to his throat, and I co4ld see,
|
||||
in spite of his collected manner, that he was wrestling against the
|
||||
approaches of the hysteria-"I 4nderstood, a drawer.. ."
|
||||
B4t here I took pity on my visito(0xed)s s4spense, and some perhaps
|
||||
on my own growing c4riosity.
|
||||
"mere it is, sir;' said I, pointing to the drawer, where it lay on the
|
||||
noor behind a table and still covered with the sheet.
|
||||
He sprang to it, and then pa4sed, and laid his hand 4pon his
|
||||
heart; I co4ld hear his teeth grate with the conv4lsive action of his
|
||||
jaws; and his face was so ghastly to see that I grew alarmed both for
|
||||
his life and reason.
|
||||
"Compose yo4rself_' said I.
|
||||
He t4rned a dreadf4l smile to me, and as if with the decision of
|
||||
despair, pl4cked away the sheet. At sight of the contents, he 4ttered
|
||||
one lo4d sob of s4ch immense relief that I sat petri_ed. And the
|
||||
next moment, in a voice that was already fairly well 4nder control,
|
||||
"Have yo4 a grad4ated glass? " he asked.
|
||||
I rose from my place with something of an e_ort and gave him
|
||||
what he asked.
|
||||
He thanked me with a smiling nod, meas4red o4t a few min-
|
||||
ims of the red tinct4re and added one of the powders. me mix-
|
||||
t4re, which was at _rst of a reddish h4e, began, in proportion as the
|
After Width: | Height: | Size: 823 KiB |
File diff suppressed because one or more lines are too long
@ -0,0 +1,46 @@
|
||||
#!/bin/bash
|
||||
# Executes the runtime in GDB
|
||||
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
|
||||
# This allows debugging outside of the Docker container
|
||||
# Also disables pagination and stopping on SIGUSR1
|
||||
|
||||
experiment_directory=$(pwd)
|
||||
project_directory=$(cd ../../.. && pwd)
|
||||
binary_directory=$(cd "$project_directory"/bin && pwd)
|
||||
|
||||
if [ "$1" != "-d" ]; then
|
||||
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" &
|
||||
sleep 1
|
||||
else
|
||||
echo "Running under gdb"
|
||||
fi
|
||||
|
||||
expected_result="$(cat ./expected_result.txt)"
|
||||
success_count=0
|
||||
total_count=50
|
||||
|
||||
for ((i = 0; i < total_count; i++)); do
|
||||
echo "$i"
|
||||
result=$(curl -H 'Expect:' -H "Content-Type: text/plain" --data-binary "@hyde.pnm" localhost:10000 2>/dev/null)
|
||||
# echo "$result"
|
||||
if [[ "$result" == "$expected_result" ]]; then
|
||||
success_count=$((success_count + 1))
|
||||
else
|
||||
echo "FAIL"
|
||||
echo "Expected:"
|
||||
echo "$expected_result"
|
||||
echo "==============================================="
|
||||
echo "Was:"
|
||||
echo "$result"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "$success_count / $total_count"
|
||||
|
||||
if [ "$1" != "-d" ]; then
|
||||
sleep 5
|
||||
echo -n "Running Cleanup: "
|
||||
pkill sledgert >/dev/null 2>/dev/null
|
||||
pkill wrk >/dev/null 2>/dev/null
|
||||
echo "[DONE]"
|
||||
fi
|
@ -0,0 +1,14 @@
|
||||
{
|
||||
"active": "yes",
|
||||
"name": "gocr",
|
||||
"path": "gocr.aso",
|
||||
"port": 10000,
|
||||
"relative-deadline-us": 50000000000,
|
||||
"argsize": 1,
|
||||
"http-req-headers": [],
|
||||
"http-req-content-type": "text/plain",
|
||||
"http-req-size": 5335057,
|
||||
"http-resp-headers": [],
|
||||
"http-resp-size": 5335057,
|
||||
"http-resp-content-type": "text/plain"
|
||||
}
|
@ -0,0 +1,20 @@
|
||||
reset
|
||||
|
||||
set term jpeg
|
||||
set output "latency.jpg"
|
||||
|
||||
set xlabel "Payload (bytes)"
|
||||
set xrange [-5:1050000]
|
||||
|
||||
set ylabel "Latency (ms)"
|
||||
set yrange [0:]
|
||||
|
||||
set key left top
|
||||
|
||||
|
||||
set style histogram columnstacked
|
||||
|
||||
plot 'latency.dat' using 1:2 title 'p50', \
|
||||
'latency.dat' using 1:3 title 'p90', \
|
||||
'latency.dat' using 1:4 title 'p99', \
|
||||
'latency.dat' using 1:5 title 'p100', \
|
@ -0,0 +1,111 @@
|
||||
#!/bin/bash
|
||||
source ../common.sh
|
||||
|
||||
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
|
||||
# Use -d flag if running under gdb
|
||||
|
||||
timestamp=$(date +%s)
|
||||
experiment_directory=$(pwd)
|
||||
binary_directory=$(cd ../../bin && pwd)
|
||||
results_directory="$experiment_directory/res/$timestamp"
|
||||
log=log.txt
|
||||
|
||||
mkdir -p "$results_directory"
|
||||
|
||||
log_environment >>"$results_directory/$log"
|
||||
|
||||
# Start the runtime
|
||||
if [ "$1" != "-d" ]; then
|
||||
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" &
|
||||
sleep 1
|
||||
else
|
||||
echo "Running under gdb"
|
||||
echo "Running under gdb" >>"$results_directory/$log"
|
||||
fi
|
||||
payloads=(fivebyeight/5x8 handwriting/handwrt1 hyde/hyde)
|
||||
ports=(10000 10001 10002)
|
||||
iterations=1000
|
||||
|
||||
# Execute workloads long enough for runtime to learn excepted execution time
|
||||
echo -n "Running Samples: "
|
||||
for i in {0..2}; do
|
||||
hey -n 200 -c 3 -q 200 -o csv -m GET -D "$experiment_directory/${payloads[$i]}.pnm" "http://localhost:${ports[$i]}"
|
||||
done
|
||||
sleep 1
|
||||
echo "[DONE]"
|
||||
|
||||
# Execute the experiments
|
||||
echo "Running Experiments"
|
||||
for i in {0..2}; do
|
||||
printf "\t%s Payload: " "${payloads[$i]}"
|
||||
file=$(echo "${payloads[$i]}" | awk -F/ '{print $2}').csv
|
||||
hey -n "$iterations" -c 3 -cpus 2 -o csv -m GET -D "$experiment_directory/${payloads[$i]}.pnm" "http://localhost:${ports[$i]}" >"$results_directory/$file"
|
||||
echo "[DONE]"
|
||||
done
|
||||
|
||||
# Stop the runtime
|
||||
|
||||
if [ "$1" != "-d" ]; then
|
||||
sleep 5
|
||||
kill_runtime
|
||||
fi
|
||||
|
||||
# Generate *.csv and *.dat results
|
||||
echo -n "Parsing Results: "
|
||||
|
||||
printf "Concurrency,Success_Rate\n" >>"$results_directory/success.csv"
|
||||
printf "Concurrency,Throughput\n" >>"$results_directory/throughput.csv"
|
||||
printf "Con,p50,p90,p99,p100\n" >>"$results_directory/latency.csv"
|
||||
|
||||
for payload in ${payloads[*]}; do
|
||||
# Calculate Success Rate for csv
|
||||
file=$(echo "$payload" | awk -F/ '{print $2}')
|
||||
awk -F, '
|
||||
$7 == 200 {ok++}
|
||||
END{printf "'"$file"',%3.5f\n", (ok / '"$iterations"' * 100)}
|
||||
' <"$results_directory/$file.csv" >>"$results_directory/success.csv"
|
||||
|
||||
# Filter on 200s, convery from s to ms, and sort
|
||||
awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$file.csv" |
|
||||
sort -g >"$results_directory/$file-response.csv"
|
||||
|
||||
# Get Number of 200s
|
||||
oks=$(wc -l <"$results_directory/$file-response.csv")
|
||||
((oks == 0)) && continue # If all errors, skip line
|
||||
|
||||
# Get Latest Timestamp
|
||||
duration=$(tail -n1 "$results_directory/$file.csv" | cut -d, -f8)
|
||||
throughput=$(echo "$oks/$duration" | bc)
|
||||
printf "%s,%f\n" "$file" "$throughput" >>"$results_directory/throughput.csv"
|
||||
|
||||
# Generate Latency Data for csv
|
||||
awk '
|
||||
BEGIN {
|
||||
sum = 0
|
||||
p50 = int('"$oks"' * 0.5)
|
||||
p90 = int('"$oks"' * 0.9)
|
||||
p99 = int('"$oks"' * 0.99)
|
||||
p100 = '"$oks"'
|
||||
printf "'"$file"',"
|
||||
}
|
||||
NR==p50 {printf "%1.4f,", $0}
|
||||
NR==p90 {printf "%1.4f,", $0}
|
||||
NR==p99 {printf "%1.4f,", $0}
|
||||
NR==p100 {printf "%1.4f\n", $0}
|
||||
' <"$results_directory/$file-response.csv" >>"$results_directory/latency.csv"
|
||||
|
||||
# Delete scratch file used for sorting/counting
|
||||
rm -rf "$results_directory/$file-response.csv"
|
||||
done
|
||||
|
||||
# Transform csvs to dat files for gnuplot
|
||||
for file in success latency throughput; do
|
||||
echo -n "#" >"$results_directory/$file.dat"
|
||||
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
|
||||
done
|
||||
|
||||
# Generate gnuplots
|
||||
generate_gnuplots
|
||||
|
||||
# Cleanup, if requires
|
||||
echo "[DONE]"
|
@ -0,0 +1,43 @@
|
||||
{
|
||||
"active": "yes",
|
||||
"name": "gocr",
|
||||
"path": "gocr.aso",
|
||||
"port": 10000,
|
||||
"relative-deadline-us": 50000000000,
|
||||
"argsize": 1,
|
||||
"http-req-headers": [],
|
||||
"http-req-content-type": "text/plain",
|
||||
"http-req-size": 1024000,
|
||||
"http-resp-headers": [],
|
||||
"http-resp-size": 1024000,
|
||||
"http-resp-content-type": "text/plain"
|
||||
},
|
||||
{
|
||||
"active": "yes",
|
||||
"name": "gocr",
|
||||
"path": "gocr.aso",
|
||||
"port": 10001,
|
||||
"relative-deadline-us": 50000000000,
|
||||
"argsize": 1,
|
||||
"http-req-headers": [],
|
||||
"http-req-content-type": "text/plain",
|
||||
"http-req-size": 1024000,
|
||||
"http-resp-headers": [],
|
||||
"http-resp-size": 1024000,
|
||||
"http-resp-content-type": "text/plain"
|
||||
},
|
||||
{
|
||||
"active": "yes",
|
||||
"name": "gocr",
|
||||
"path": "gocr.aso",
|
||||
"port": 10002,
|
||||
"relative-deadline-us": 50000000000,
|
||||
"argsize": 1,
|
||||
"http-req-headers": [],
|
||||
"http-req-content-type": "text/plain",
|
||||
"http-req-size": 5335057,
|
||||
"http-resp-headers": [],
|
||||
"http-resp-size": 5335057,
|
||||
"http-resp-content-type": "text/plain"
|
||||
}
|
||||
|
@ -0,0 +1,12 @@
|
||||
reset
|
||||
|
||||
set term jpeg
|
||||
set output "success.jpg"
|
||||
|
||||
set xlabel "Connections"
|
||||
set xrange [-5:105]
|
||||
|
||||
set ylabel "% 2XX"
|
||||
set yrange [0:110]
|
||||
|
||||
plot 'success.dat' using 1:2 title '2XX'
|
@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
cd handwriting || exit
|
||||
./run.sh
|
||||
cd .. || exit
|
||||
cd hyde || exit
|
||||
./run.sh
|
||||
cd .. || exit
|
||||
cd fivebyeight || exit
|
||||
./run.sh
|
||||
cd ..
|
@ -0,0 +1,13 @@
|
||||
reset
|
||||
|
||||
set term jpeg
|
||||
set output "throughput.jpg"
|
||||
|
||||
# TODO: Axis shouldn't be linear
|
||||
set xlabel "Connections"
|
||||
set xrange [-5:105]
|
||||
|
||||
set ylabel "Requests/sec"
|
||||
set yrange [0:]
|
||||
|
||||
plot 'throughput.dat' using 1:2 title 'Reqs/sec'
|
@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
|
||||
log_environment() {
|
||||
echo "*******"
|
||||
echo "* Git *"
|
||||
echo "*******"
|
||||
git log | head -n 1 | cut -d' ' -f2
|
||||
git status
|
||||
echo ""
|
||||
|
||||
echo "************"
|
||||
echo "* Makefile *"
|
||||
echo "************"
|
||||
cat ../../Makefile
|
||||
echo ""
|
||||
|
||||
echo "**********"
|
||||
echo "* Run.sh *"
|
||||
echo "**********"
|
||||
cat run.sh
|
||||
echo ""
|
||||
|
||||
echo "************"
|
||||
echo "* Hardware *"
|
||||
echo "************"
|
||||
lscpu
|
||||
echo ""
|
||||
|
||||
echo "*************"
|
||||
echo "* Execution *"
|
||||
echo "*************"
|
||||
}
|
||||
|
||||
kill_runtime() {
|
||||
echo -n "Running Cleanup: "
|
||||
pkill sledgert >/dev/null 2>/dev/null
|
||||
pkill hey >/dev/null 2>/dev/null
|
||||
echo "[DONE]"
|
||||
}
|
||||
|
||||
generate_gnuplots() {
|
||||
cd "$results_directory" || exit
|
||||
gnuplot ../../latency.gnuplot
|
||||
gnuplot ../../success.gnuplot
|
||||
gnuplot ../../throughput.gnuplot
|
||||
cd "$experiment_directory" || exit
|
||||
}
|
@ -0,0 +1 @@
|
||||
res
|
@ -0,0 +1,36 @@
|
||||
# Concurrency
|
||||
|
||||
## Question
|
||||
|
||||
_How does increasing levels of concurrent client requests affect tail latency, throughput, and the success/error rate of sandbox execution?_
|
||||
|
||||
## Independent Variable
|
||||
|
||||
- The number of concurrent client requests made at a given time
|
||||
|
||||
## Dependent Variables
|
||||
|
||||
- p50, p90, p99, and p100 latency measured in ms
|
||||
- throughput measures in requests/second
|
||||
- success rate, measures in % of requests that return a 200
|
||||
|
||||
## Assumptions about test environment
|
||||
|
||||
- You have a modern bash shell. My Linux environment shows version 4.4.20(1)-release
|
||||
- `hey` (https://github.com/rakyll/hey) is available in your PATH
|
||||
- You have compiled `sledgert` and the `empty.so` test workload
|
||||
|
||||
## To Execute
|
||||
|
||||
1. Run `./run.sh`
|
||||
2. View the results in the newest timestamped directory in `./res`
|
||||
|
||||
## To Debug
|
||||
|
||||
1. Run `./debug.sh` in a tab
|
||||
2. Run `./run.sh -d` in a second tab
|
||||
|
||||
## TODO
|
||||
|
||||
- Harden scripts to validate assumptions
|
||||
- Improve error handling in scripts. If `sledgrt` crashes, this charges forward until it hits a divide by error when attempting to clean data that doesn't exist
|
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
# Executes the runtime in GDB
|
||||
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
|
||||
# This allows debugging outside of the Docker container
|
||||
# Also disables pagination and stopping on SIGUSR1
|
||||
|
||||
experiment_directory=$(pwd)
|
||||
project_directory=$(cd ../.. && pwd)
|
||||
binary_directory=$(cd "$project_directory"/bin && pwd)
|
||||
|
||||
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
|
||||
export PATH="$binary_directory:$PATH"
|
||||
|
||||
gdb --eval-command="handle SIGUSR1 nostop" \
|
||||
--eval-command="handle SIGPIPE nostop" \
|
||||
--eval-command="set pagination off" \
|
||||
--eval-command="set substitute-path /sledge/runtime $project_directory" \
|
||||
--eval-command="run $experiment_directory/spec.json" \
|
||||
sledgert
|
@ -0,0 +1,19 @@
|
||||
reset
|
||||
|
||||
set term jpeg
|
||||
set output "latency.jpg"
|
||||
|
||||
set xlabel "Concurrency"
|
||||
set ylabel "Latency (ms)"
|
||||
|
||||
set key left top
|
||||
|
||||
set xrange [-5:105]
|
||||
set yrange [0:]
|
||||
|
||||
set style histogram columnstacked
|
||||
|
||||
plot 'latency.dat' using 1:2 title 'p50', \
|
||||
'latency.dat' using 1:3 title 'p90', \
|
||||
'latency.dat' using 1:4 title 'p99', \
|
||||
'latency.dat' using 1:5 title 'p100', \
|
@ -0,0 +1,107 @@
|
||||
#!/bin/bash
|
||||
source ../common.sh
|
||||
|
||||
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
|
||||
# Use -d flag if running under gdb
|
||||
|
||||
timestamp=$(date +%s)
|
||||
experiment_directory=$(pwd)
|
||||
binary_directory=$(cd ../../bin && pwd)
|
||||
results_directory="$experiment_directory/res/$timestamp"
|
||||
log=log.txt
|
||||
|
||||
mkdir -p "$results_directory"
|
||||
|
||||
log_environment >>"$results_directory/$log"
|
||||
|
||||
# Start the runtime
|
||||
if [ "$1" != "-d" ]; then
|
||||
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" &
|
||||
sleep 1
|
||||
else
|
||||
echo "Running under gdb"
|
||||
echo "Running under gdb" >>"$results_directory/$log"
|
||||
fi
|
||||
|
||||
iterations=10000
|
||||
|
||||
# Execute workloads long enough for runtime to learn excepted execution time
|
||||
echo -n "Running Samples: "
|
||||
hey -n "$iterations" -c 3 -q 200 -o csv -m GET http://localhost:10000
|
||||
sleep 5
|
||||
echo "[DONE]"
|
||||
|
||||
# Execute the experiments
|
||||
concurrency=(1 20 40 60 80 100)
|
||||
echo "Running Experiments"
|
||||
for conn in ${concurrency[*]}; do
|
||||
printf "\t%d Concurrency: " "$conn"
|
||||
hey -n "$iterations" -c "$conn" -cpus 2 -o csv -m GET http://localhost:10000 >"$results_directory/con$conn.csv"
|
||||
echo "[DONE]"
|
||||
done
|
||||
|
||||
# Stop the runtime
|
||||
|
||||
if [ "$1" != "-d" ]; then
|
||||
sleep 5
|
||||
kill_runtime
|
||||
fi
|
||||
|
||||
# Generate *.csv and *.dat results
|
||||
echo -n "Parsing Results: "
|
||||
|
||||
printf "Concurrency,Success_Rate\n" >>"$results_directory/success.csv"
|
||||
printf "Concurrency,Throughput\n" >>"$results_directory/throughput.csv"
|
||||
printf "Con,p50,p90,p99,p100\n" >>"$results_directory/latency.csv"
|
||||
|
||||
for conn in ${concurrency[*]}; do
|
||||
# Calculate Success Rate for csv
|
||||
awk -F, '
|
||||
$7 == 200 {ok++}
|
||||
END{printf "'"$conn"',%3.5f\n", (ok / '"$iterations"' * 100)}
|
||||
' <"$results_directory/con$conn.csv" >>"$results_directory/success.csv"
|
||||
|
||||
# Filter on 200s, convery from s to ms, and sort
|
||||
awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/con$conn.csv" |
|
||||
sort -g >"$results_directory/con$conn-response.csv"
|
||||
|
||||
# Get Number of 200s
|
||||
oks=$(wc -l <"$results_directory/con$conn-response.csv")
|
||||
((oks == 0)) && continue # If all errors, skip line
|
||||
|
||||
# Get Latest Timestamp
|
||||
duration=$(tail -n1 "$results_directory/con$conn.csv" | cut -d, -f8)
|
||||
throughput=$(echo "$oks/$duration" | bc)
|
||||
printf "%d,%f\n" "$conn" "$throughput" >>"$results_directory/throughput.csv"
|
||||
|
||||
# Generate Latency Data for csv
|
||||
awk '
|
||||
BEGIN {
|
||||
sum = 0
|
||||
p50 = int('"$oks"' * 0.5)
|
||||
p90 = int('"$oks"' * 0.9)
|
||||
p99 = int('"$oks"' * 0.99)
|
||||
p100 = '"$oks"'
|
||||
printf "'"$conn"',"
|
||||
}
|
||||
NR==p50 {printf "%1.4f,", $0}
|
||||
NR==p90 {printf "%1.4f,", $0}
|
||||
NR==p99 {printf "%1.4f,", $0}
|
||||
NR==p100 {printf "%1.4f\n", $0}
|
||||
' <"$results_directory/con$conn-response.csv" >>"$results_directory/latency.csv"
|
||||
|
||||
# Delete scratch file used for sorting/counting
|
||||
rm -rf "$results_directory/con$conn-response.csv"
|
||||
done
|
||||
|
||||
# Transform csvs to dat files for gnuplot
|
||||
for file in success latency throughput; do
|
||||
echo -n "#" >"$results_directory/$file.dat"
|
||||
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
|
||||
done
|
||||
|
||||
# Generate gnuplots
|
||||
generate_gnuplots
|
||||
|
||||
# Cleanup, if requires
|
||||
echo "[DONE]"
|
@ -0,0 +1,14 @@
|
||||
{
|
||||
"active": "yes",
|
||||
"name": "empty",
|
||||
"path": "empty_wasm.so",
|
||||
"port": 10000,
|
||||
"relative-deadline-us": 50000,
|
||||
"argsize": 1,
|
||||
"http-req-headers": [],
|
||||
"http-req-content-type": "text/plain",
|
||||
"http-req-size": 1024,
|
||||
"http-resp-headers": [],
|
||||
"http-resp-size": 1024,
|
||||
"http-resp-content-type": "text/plain"
|
||||
}
|
@ -0,0 +1,12 @@
|
||||
reset
|
||||
|
||||
set term jpeg
|
||||
set output "success.jpg"
|
||||
|
||||
set xlabel "Concurrency"
|
||||
set ylabel "% 2XX"
|
||||
|
||||
set xrange [-5:105]
|
||||
set yrange [0:110]
|
||||
|
||||
plot 'success.dat' using 1:2 title '2XX'
|
@ -0,0 +1,12 @@
|
||||
reset
|
||||
|
||||
set term jpeg
|
||||
set output "throughput.jpg"
|
||||
|
||||
set xlabel "Concurrency"
|
||||
set ylabel "Requests/sec"
|
||||
|
||||
set xrange [-5:105]
|
||||
set yrange [0:]
|
||||
|
||||
plot 'throughput.dat' using 1:2 title 'Reqs/sec'
|
@ -0,0 +1 @@
|
||||
res
|
@ -0,0 +1,48 @@
|
||||
# Admissions Control
|
||||
|
||||
## Discussion of Implementation
|
||||
|
||||
The admissions control subsystem seeks to ensure that the system does not accept more work than it can execute while meeting the relative deadline defined in a module's JSON specification.
|
||||
|
||||
The system maintains an integral value expressing the capacity of the system as millionths of a worker core. This assumes that the runtime has "pinned" these workers to underlying processors and has no contention with other workloads.
|
||||
|
||||
The system maintains a second integral value expressing the total accepted work.
|
||||
|
||||
The module specification provides a relative deadline, an expected execution time, and a percentile target expressing the pXX latency that the admissions control system should use when making admissions decisions (tunable from 50% to 99%). Tuning this percentile expresses how conservative the system should be with regard to scheduling. Selecting a lower value, such as 50%, reserves less processor time and results in a higher likelihood that the relative deadline is not met. Selecting a higher value, such as 99%, reserves more processor time and provides a higher likelihood that that the relative deadline will be met. The provided expected execution time is assumed to match the percentile provided.
|
||||
|
||||
Dividing the expected execution time by the relative deadline yields the fraction of a worker needed to meet the deadline.
|
||||
|
||||
If the existing accepted workload plus the required work of this new workload is less than the system capacity, the workload is accepted, and the integral value expressing the total accepted work is increased. The resulting sandbox request is tagged with the fraction of a worker it was calculated to use, and when the request completes, the total accepted work is decreased by this amount.
|
||||
|
||||
If the existing accepted workload plus the required work of this new workload is greater than the system capacity, the request is rejected and the runtime sends the client an HTTP 503 response.
|
||||
|
||||
While the module specification provides an expected execution time, the system does not trust this value and only uses it in the absence of better information. Each sandbox is profiled as it runs through the system, and the end-to-end execution time of successful sandbox requests are added to a specialized performance window data structure that stores the last N execution times sorted in order of execution time. This structure optimizes for quick lookups of a specific ppXX percentile
|
||||
|
||||
Once data is seeded into this data structure, the initial execution estimate provided in the module specification is ignored, and the pXX target is instead used to lookup the actual pXX performance metric.
|
||||
|
||||
Future Work:
|
||||
|
||||
Currently, the scheduler takes no actual when an executing sandbox exceeds its pXX execution time or deadline.
|
||||
|
||||
In the case of the pXX workload, this means that a workload configured to target p50 during admissions control decisions with exceptionally poor p99 performance causes system-wide overheads that can cause other systems to miss their deadlines.
|
||||
|
||||
Even worse, when executing beyond the relative deadline, the request might be too stale for the client.
|
||||
|
||||
In the absolute worst case, one can imagine a client workload caught in an infinite loop that causes permanent head of line blocking because its deadline is earlier than the current time, such that nothing can possibly preempt the executing workload.
|
||||
|
||||
## Question
|
||||
|
||||
- Does Admissions Control guarantee that deadlines are met?
|
||||
|
||||
## Independent Variable
|
||||
|
||||
Deadline is disabled versus deadline is enabled
|
||||
|
||||
## Invariants
|
||||
|
||||
Single workload
|
||||
Use FIFO policy
|
||||
|
||||
## Dependent Variables
|
||||
|
||||
End-to-end execution time of a workload measured from a client measured relative to its deadline
|
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
# Executes the runtime in GDB
|
||||
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
|
||||
# This allows debugging outside of the Docker container
|
||||
# Also disables pagination and stopping on SIGUSR1
|
||||
|
||||
experiment_directory=$(pwd)
|
||||
project_directory=$(cd ../.. && pwd)
|
||||
binary_directory=$(cd "$project_directory"/bin && pwd)
|
||||
|
||||
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
|
||||
export PATH="$binary_directory:$PATH"
|
||||
|
||||
gdb --eval-command="handle SIGUSR1 nostop" \
|
||||
--eval-command="handle SIGPIPE nostop" \
|
||||
--eval-command="set pagination off" \
|
||||
--eval-command="set substitute-path /sledge/runtime $project_directory" \
|
||||
--eval-command="run $experiment_directory/spec.json" \
|
||||
sledgert
|
@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
# Executes the runtime in GDB
|
||||
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
|
||||
# This allows debugging outside of the Docker container
|
||||
# Also disables pagination and stopping on SIGUSR1
|
||||
|
||||
experiment_directory=$(pwd)
|
||||
project_directory=$(cd ../.. && pwd)
|
||||
binary_directory=$(cd "$project_directory"/bin && pwd)
|
||||
|
||||
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
|
||||
export PATH="$binary_directory:$PATH"
|
||||
|
||||
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=EDF perf record -g -s sledgert "$experiment_directory/spec.json"
|
@ -0,0 +1,124 @@
|
||||
#!/bin/bash
|
||||
source ../common.sh
|
||||
|
||||
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
|
||||
# Use -d flag if running under gdb
|
||||
|
||||
timestamp=$(date +%s)
|
||||
experiment_directory=$(pwd)
|
||||
binary_directory=$(cd ../../bin && pwd)
|
||||
|
||||
schedulers=(EDF FIFO)
|
||||
for scheduler in ${schedulers[*]}; do
|
||||
|
||||
results_directory="$experiment_directory/res/$timestamp/$scheduler"
|
||||
log=log.txt
|
||||
|
||||
mkdir -p "$results_directory"
|
||||
log_environment >>"$results_directory/$log"
|
||||
|
||||
# Start the runtime
|
||||
if [ "$1" != "-d" ]; then
|
||||
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" &
|
||||
sleep 1
|
||||
else
|
||||
echo "Running under gdb"
|
||||
echo "Running under gdb" >>"$results_directory/$log"
|
||||
fi
|
||||
|
||||
inputs=(40 10)
|
||||
duration_sec=15
|
||||
offset=5
|
||||
|
||||
# Execute workloads long enough for runtime to learn excepted execution time
|
||||
echo -n "Running Samples: "
|
||||
for input in ${inputs[*]}; do
|
||||
hey -z ${duration_sec}s -cpus 3 -t 0 -o csv -m GET -d "$input\n" http://localhost:$((10000 + input))
|
||||
done
|
||||
echo "[DONE]"
|
||||
sleep 5
|
||||
|
||||
echo "Running Experiments"
|
||||
# Run each separately
|
||||
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40.csv"
|
||||
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10.csv"
|
||||
|
||||
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
|
||||
hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40-con.csv" &
|
||||
sleep $offset
|
||||
hey -z ${duration_sec}s -cpus 2 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10-con.csv" &
|
||||
sleep $((duration_sec + offset + 15))
|
||||
|
||||
# Stop the runtime if not in debug mode
|
||||
[ "$1" != "-d" ] && kill_runtime
|
||||
|
||||
# Generate *.csv and *.dat results
|
||||
echo -n "Parsing Results: "
|
||||
|
||||
printf "Payload,Success_Rate\n" >>"$results_directory/success.csv"
|
||||
printf "Payload,Throughput\n" >>"$results_directory/throughput.csv"
|
||||
printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv"
|
||||
|
||||
deadlines_ms=(2 2 3000 3000)
|
||||
payloads=(fib10 fib10-con fib40 fib40-con)
|
||||
|
||||
for ((i = 0; i < 4; i++)); do
|
||||
# for payload in ${payloads[*]}; do
|
||||
payload=${payloads[$i]}
|
||||
deadline=${deadlines_ms[$i]}
|
||||
|
||||
# Get Number of Requests
|
||||
requests=$(($(wc -l <"$results_directory/$payload.csv") - 1))
|
||||
((requests == 0)) && continue
|
||||
|
||||
# Calculate Success Rate for csv
|
||||
awk -F, '
|
||||
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
|
||||
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
|
||||
' <"$results_directory/$payload.csv" >>"$results_directory/success.csv"
|
||||
|
||||
# Filter on 200s, convery from s to ms, and sort
|
||||
awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" |
|
||||
sort -g >"$results_directory/$payload-response.csv"
|
||||
|
||||
# Get Number of 200s
|
||||
oks=$(wc -l <"$results_directory/$payload-response.csv")
|
||||
((oks == 0)) && continue # If all errors, skip line
|
||||
|
||||
# Get Latest Timestamp
|
||||
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
|
||||
throughput=$(echo "$oks/$duration" | bc)
|
||||
printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv"
|
||||
|
||||
# Generate Latency Data for csv
|
||||
awk '
|
||||
BEGIN {
|
||||
sum = 0
|
||||
p50 = int('"$oks"' * 0.5)
|
||||
p90 = int('"$oks"' * 0.9)
|
||||
p99 = int('"$oks"' * 0.99)
|
||||
p100 = '"$oks"'
|
||||
printf "'"$payload"',"
|
||||
}
|
||||
NR==p50 {printf "%1.4f,", $0}
|
||||
NR==p90 {printf "%1.4f,", $0}
|
||||
NR==p99 {printf "%1.4f,", $0}
|
||||
NR==p100 {printf "%1.4f\n", $0}
|
||||
' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv"
|
||||
|
||||
# Delete scratch file used for sorting/counting
|
||||
# rm -rf "$results_directory/$payload-response.csv"
|
||||
done
|
||||
|
||||
# Transform csvs to dat files for gnuplot
|
||||
for file in success latency throughput; do
|
||||
echo -n "#" >"$results_directory/$file.dat"
|
||||
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
|
||||
done
|
||||
|
||||
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
|
||||
# generate_gnuplots
|
||||
|
||||
# Cleanup, if requires
|
||||
echo "[DONE]"
|
||||
done
|
@ -0,0 +1,124 @@
|
||||
#!/bin/bash
|
||||
source ../common.sh
|
||||
|
||||
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
|
||||
# Use -d flag if running under gdb
|
||||
|
||||
timestamp=$(date +%s)
|
||||
experiment_directory=$(pwd)
|
||||
binary_directory=$(cd ../../bin && pwd)
|
||||
|
||||
schedulers=(EDF FIFO)
|
||||
for scheduler in ${schedulers[*]}; do
|
||||
|
||||
results_directory="$experiment_directory/res/$timestamp/$scheduler"
|
||||
log=log.txt
|
||||
|
||||
mkdir -p "$results_directory"
|
||||
log_environment >>"$results_directory/$log"
|
||||
|
||||
# Start the runtime
|
||||
if [ "$1" != "-d" ]; then
|
||||
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" &
|
||||
sleep 1
|
||||
else
|
||||
echo "Running under gdb"
|
||||
echo "Running under gdb" >>"$results_directory/$log"
|
||||
fi
|
||||
|
||||
inputs=(40 10)
|
||||
duration_sec=15
|
||||
offset=5
|
||||
|
||||
# Execute workloads long enough for runtime to learn excepted execution time
|
||||
echo -n "Running Samples: "
|
||||
for input in ${inputs[*]}; do
|
||||
hey -z ${duration_sec}s -cpus 3 -t 0 -o csv -m GET -d "$input\n" http://localhost:$((10000 + input))
|
||||
done
|
||||
echo "[DONE]"
|
||||
sleep 5
|
||||
|
||||
echo "Running Experiments"
|
||||
# Run each separately
|
||||
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40.csv"
|
||||
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10.csv"
|
||||
|
||||
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
|
||||
hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40-con.csv" &
|
||||
sleep $offset
|
||||
hey -z ${duration_sec}s -cpus 2 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10-con.csv" &
|
||||
sleep $((duration_sec + offset + 15))
|
||||
|
||||
# Stop the runtime if not in debug mode
|
||||
[ "$1" != "-d" ] && kill_runtime
|
||||
|
||||
# Generate *.csv and *.dat results
|
||||
echo -n "Parsing Results: "
|
||||
|
||||
printf "Payload,Success_Rate\n" >>"$results_directory/success.csv"
|
||||
printf "Payload,Throughput\n" >>"$results_directory/throughput.csv"
|
||||
printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv"
|
||||
|
||||
deadlines_ms=(2 2 3000 3000)
|
||||
payloads=(fib10 fib10-con fib40 fib40-con)
|
||||
|
||||
for ((i = 0; i < 4; i++)); do
|
||||
# for payload in ${payloads[*]}; do
|
||||
payload=${payloads[$i]}
|
||||
deadline=${deadlines_ms[$i]}
|
||||
|
||||
# Get Number of Requests
|
||||
requests=$(($(wc -l <"$results_directory/$payload.csv") - 1))
|
||||
((requests == 0)) && continue
|
||||
|
||||
# Calculate Success Rate for csv
|
||||
awk -F, '
|
||||
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
|
||||
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
|
||||
' <"$results_directory/$payload.csv" >>"$results_directory/success.csv"
|
||||
|
||||
# Filter on 200s, convery from s to ms, and sort
|
||||
awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" |
|
||||
sort -g >"$results_directory/$payload-response.csv"
|
||||
|
||||
# Get Number of 200s
|
||||
oks=$(wc -l <"$results_directory/$payload-response.csv")
|
||||
((oks == 0)) && continue # If all errors, skip line
|
||||
|
||||
# Get Latest Timestamp
|
||||
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
|
||||
throughput=$(echo "$oks/$duration" | bc)
|
||||
printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv"
|
||||
|
||||
# Generate Latency Data for csv
|
||||
awk '
|
||||
BEGIN {
|
||||
sum = 0
|
||||
p50 = int('"$oks"' * 0.5)
|
||||
p90 = int('"$oks"' * 0.9)
|
||||
p99 = int('"$oks"' * 0.99)
|
||||
p100 = '"$oks"'
|
||||
printf "'"$payload"',"
|
||||
}
|
||||
NR==p50 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
|
||||
NR==p90 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
|
||||
NR==p99 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
|
||||
NR==p100 {printf "%1.4f%\n", $0 / '"$deadline"' * 100}
|
||||
' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv"
|
||||
|
||||
# Delete scratch file used for sorting/counting
|
||||
# rm -rf "$results_directory/$payload-response.csv"
|
||||
done
|
||||
|
||||
# Transform csvs to dat files for gnuplot
|
||||
for file in success latency throughput; do
|
||||
echo -n "#" >"$results_directory/$file.dat"
|
||||
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
|
||||
done
|
||||
|
||||
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
|
||||
# generate_gnuplots
|
||||
|
||||
# Cleanup, if requires
|
||||
echo "[DONE]"
|
||||
done
|
@ -0,0 +1,5 @@
|
||||
|
||||
|
||||
hey -n 200 -c 200 -t 0 -m GET -d "40\n" http://localhost:10040
|
||||
|
||||
hey -n 500 -c 500 -t 0 -m GET -d "10\n" http://localhost:10010
|
@ -0,0 +1 @@
|
||||
res
|
@ -0,0 +1 @@
|
||||
*.txt
|
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
# Generates payloads of 1KB, 10KB, 100KB, 1MB
|
||||
for size in 1024 $((1024 * 10)) $((1024 * 100)) $((1024 * 1024)); do
|
||||
rm -rf $size.txt
|
||||
i=0
|
||||
echo -n "Generating $size:"
|
||||
while ((i < size)); do
|
||||
printf 'a' >>$size.txt
|
||||
((i++))
|
||||
done
|
||||
echo "[DONE]"
|
||||
done
|
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
# Executes the runtime in GDB
|
||||
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
|
||||
# This allows debugging outside of the Docker container
|
||||
# Also disables pagination and stopping on SIGUSR1
|
||||
|
||||
experiment_directory=$(pwd)
|
||||
project_directory=$(cd ../.. && pwd)
|
||||
binary_directory=$(cd "$project_directory"/bin && pwd)
|
||||
|
||||
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
|
||||
export PATH="$binary_directory:$PATH"
|
||||
|
||||
gdb --eval-command="handle SIGUSR1 nostop" \
|
||||
--eval-command="handle SIGPIPE nostop" \
|
||||
--eval-command="set pagination off" \
|
||||
--eval-command="set substitute-path /sledge/runtime $project_directory" \
|
||||
--eval-command="run $experiment_directory/spec.json" \
|
||||
sledgert
|
@ -0,0 +1,20 @@
|
||||
reset
|
||||
|
||||
set term jpeg
|
||||
set output "latency.jpg"
|
||||
|
||||
set xlabel "Payload (bytes)"
|
||||
set xrange [-5:1050000]
|
||||
|
||||
set ylabel "Latency (ms)"
|
||||
set yrange [0:]
|
||||
|
||||
set key left top
|
||||
|
||||
|
||||
set style histogram columnstacked
|
||||
|
||||
plot 'latency.dat' using 1:2 title 'p50', \
|
||||
'latency.dat' using 1:3 title 'p90', \
|
||||
'latency.dat' using 1:4 title 'p99', \
|
||||
'latency.dat' using 1:5 title 'p100', \
|
@ -0,0 +1,123 @@
|
||||
#!/bin/bash
|
||||
source ../common.sh
|
||||
|
||||
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
|
||||
# Use -d flag if running under gdb
|
||||
|
||||
timestamp=$(date +%s)
|
||||
experiment_directory=$(pwd)
|
||||
binary_directory=$(cd ../../bin && pwd)
|
||||
results_directory="$experiment_directory/res/$timestamp"
|
||||
log=log.txt
|
||||
|
||||
mkdir -p "$results_directory"
|
||||
|
||||
log_environment >>"$results_directory/$log"
|
||||
|
||||
# Start the runtime
|
||||
if [ "$1" != "-d" ]; then
|
||||
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" &
|
||||
sleep 1
|
||||
else
|
||||
echo "Running under gdb"
|
||||
echo "Running under gdb" >>"$results_directory/$log"
|
||||
fi
|
||||
|
||||
payloads=(1024 10240 102400 1048576)
|
||||
ports=(10000 10001 10002 10003)
|
||||
iterations=10000
|
||||
|
||||
# If the one of the expected body files doesn't exist, trigger the generation script.
|
||||
for payload in ${payloads[*]}; do
|
||||
if test -f "$experiment_directory/body/$payload.txt"; then
|
||||
continue
|
||||
else
|
||||
echo "Generating Payloads: "
|
||||
{
|
||||
cd "$experiment_directory/body" && ./generate.sh
|
||||
}
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Execute workloads long enough for runtime to learn excepted execution time
|
||||
echo -n "Running Samples: "
|
||||
hey -n "$iterations" -c 3 -q 200 -o csv -m GET -D "$experiment_directory/body/1024.txt" http://localhost:10000
|
||||
hey -n "$iterations" -c 3 -q 200 -o csv -m GET -D "$experiment_directory/body/10240.txt" http://localhost:10001
|
||||
hey -n "$iterations" -c 3 -q 200 -o csv -m GET -D "$experiment_directory/body/102400.txt" http://localhost:10002
|
||||
hey -n "$iterations" -c 3 -q 200 -o csv -m GET -D "$experiment_directory/body/1048576.txt" http://localhost:10003
|
||||
sleep 5
|
||||
echo "[DONE]"
|
||||
|
||||
# Execute the experiments
|
||||
echo "Running Experiments"
|
||||
for i in {0..3}; do
|
||||
printf "\t%d Payload: " "${payloads[$i]}"
|
||||
hey -n "$iterations" -c 1 -cpus 2 -o csv -m GET -D "$experiment_directory/body/${payloads[$i]}.txt" http://localhost:"${ports[$i]}" >"$results_directory/${payloads[$i]}.csv"
|
||||
echo "[DONE]"
|
||||
done
|
||||
|
||||
# Stop the runtime
|
||||
if [ "$1" != "-d" ]; then
|
||||
sleep 5
|
||||
kill_runtime
|
||||
fi
|
||||
|
||||
# Generate *.csv and *.dat results
|
||||
echo -n "Parsing Results: "
|
||||
|
||||
printf "Payload,Success_Rate\n" >>"$results_directory/success.csv"
|
||||
printf "Payload,Throughput\n" >>"$results_directory/throughput.csv"
|
||||
printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv"
|
||||
|
||||
for payload in ${payloads[*]}; do
|
||||
# Calculate Success Rate for csv
|
||||
awk -F, '
|
||||
$7 == 200 {ok++}
|
||||
END{printf "'"$payload"',%3.5f\n", (ok / '"$iterations"' * 100)}
|
||||
' <"$results_directory/$payload.csv" >>"$results_directory/success.csv"
|
||||
|
||||
# Filter on 200s, convery from s to ms, and sort
|
||||
awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" |
|
||||
sort -g >"$results_directory/$payload-response.csv"
|
||||
|
||||
# Get Number of 200s
|
||||
oks=$(wc -l <"$results_directory/$payload-response.csv")
|
||||
((oks == 0)) && continue # If all errors, skip line
|
||||
|
||||
# Get Latest Timestamp
|
||||
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
|
||||
throughput=$(echo "$oks/$duration" | bc)
|
||||
printf "%d,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv"
|
||||
|
||||
# Generate Latency Data for csv
|
||||
awk '
|
||||
BEGIN {
|
||||
sum = 0
|
||||
p50 = int('"$oks"' * 0.5)
|
||||
p90 = int('"$oks"' * 0.9)
|
||||
p99 = int('"$oks"' * 0.99)
|
||||
p100 = '"$oks"'
|
||||
printf "'"$payload"',"
|
||||
}
|
||||
NR==p50 {printf "%1.4f,", $0}
|
||||
NR==p90 {printf "%1.4f,", $0}
|
||||
NR==p99 {printf "%1.4f,", $0}
|
||||
NR==p100 {printf "%1.4f\n", $0}
|
||||
' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv"
|
||||
|
||||
# Delete scratch file used for sorting/counting
|
||||
rm -rf "$results_directory/$payload-response.csv"
|
||||
done
|
||||
|
||||
# Transform csvs to dat files for gnuplot
|
||||
for file in success latency throughput; do
|
||||
echo -n "#" >"$results_directory/$file.dat"
|
||||
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
|
||||
done
|
||||
|
||||
# Generate gnuplots
|
||||
generate_gnuplots
|
||||
|
||||
# Cleanup, if requires
|
||||
echo "[DONE]"
|
@ -0,0 +1,60 @@
|
||||
{
|
||||
"active": "yes",
|
||||
"name": "work1k",
|
||||
"path": "work1k_wasm.so",
|
||||
"port": 10000,
|
||||
"expected-execution-us": 400,
|
||||
"relative-deadline-us": 2000,
|
||||
"argsize": 1,
|
||||
"http-req-headers": [],
|
||||
"http-req-content-type": "text/plain",
|
||||
"http-req-size": 1548,
|
||||
"http-resp-headers": [],
|
||||
"http-resp-size": 1548,
|
||||
"http-resp-content-type": "text/plain"
|
||||
},
|
||||
{
|
||||
"active": "yes",
|
||||
"name": "work10k",
|
||||
"path": "work10k_wasm.so",
|
||||
"port": 10001,
|
||||
"expected-execution-us": 600,
|
||||
"relative-deadline-us": 2000,
|
||||
"argsize": 1,
|
||||
"http-req-headers": [],
|
||||
"http-req-content-type": "text/plain",
|
||||
"http-req-size": 10480,
|
||||
"http-resp-headers": [],
|
||||
"http-resp-size": 10480,
|
||||
"http-resp-content-type": "text/plain"
|
||||
},
|
||||
{
|
||||
"active": "yes",
|
||||
"name": "work100k",
|
||||
"path": "work100k_wasm.so",
|
||||
"port": 10002,
|
||||
"expected-execution-us": 700,
|
||||
"relative-deadline-us": 2000,
|
||||
"argsize": 1,
|
||||
"http-req-headers": [],
|
||||
"http-req-content-type": "text/plain",
|
||||
"http-req-size": 104800,
|
||||
"http-resp-headers": [],
|
||||
"http-resp-size": 104800,
|
||||
"http-resp-content-type": "text/plain"
|
||||
},
|
||||
{
|
||||
"active": "yes",
|
||||
"name": "work1m",
|
||||
"path": "work1m_wasm.so",
|
||||
"port": 10003,
|
||||
"expected-execution-us": 2000,
|
||||
"relative-deadline-us": 6000,
|
||||
"argsize": 1,
|
||||
"http-req-headers": [],
|
||||
"http-req-content-type": "text/plain",
|
||||
"http-req-size": 1048776,
|
||||
"http-resp-headers": [],
|
||||
"http-resp-size": 1048776,
|
||||
"http-resp-content-type": "text/plain"
|
||||
}
|
@ -0,0 +1,12 @@
|
||||
reset
|
||||
|
||||
set term jpeg
|
||||
set output "success.jpg"
|
||||
|
||||
set xlabel "Payload (bytes)"
|
||||
set xrange [-5:1050000]
|
||||
|
||||
set ylabel "% 2XX"
|
||||
set yrange [0:110]
|
||||
|
||||
plot 'success.dat' using 1:2 title '2XX'
|
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
hey -n 100 -c 3 -q 100 -m GET -D "./body/1024.txt" http://localhost:10000
|
@ -0,0 +1,13 @@
|
||||
reset
|
||||
|
||||
set term jpeg
|
||||
set output "throughput.jpg"
|
||||
|
||||
# TODO: Axis shouldn't be linear
|
||||
set xlabel "Payload (bytes)"
|
||||
set xrange [-5:1050000]
|
||||
|
||||
set ylabel "Requests/sec"
|
||||
set yrange [0:]
|
||||
|
||||
plot 'throughput.dat' using 1:2 title 'Reqs/sec'
|
@ -0,0 +1 @@
|
||||
res
|
@ -0,0 +1,28 @@
|
||||
# Preemption
|
||||
|
||||
## Question
|
||||
|
||||
- How do mixed criticality workloads perform under the Sledge scheduler policies?
|
||||
- How does the latency of a high criticality workload that triggers preemption on a system under load compare to being the only workload on the system?
|
||||
- What is the slowdown on the low priority workload?
|
||||
- How does this affect aggregate throughput?
|
||||
|
||||
## Setup
|
||||
|
||||
The system is configured with admission control disabled.
|
||||
|
||||
The driver script drives a bimodal distribution of long-running low-priority and short-running high-priority workloads
|
||||
|
||||
Relative Deadlines are tuned such that the scheduler should always preempt the low-priority workload for the high-priority workload.
|
||||
|
||||
A driver script runs the two workloads separately as a baseline
|
||||
|
||||
It then runs them concurrently, starting the low-priority long-running workload first such that the system begins execution and accumulates requests in the data structures. The high-priority short-running workload then begins.
|
||||
|
||||
## Independent Variable
|
||||
|
||||
The Scheduling Policy: EDF versus FIFO
|
||||
|
||||
## Dependent Variables
|
||||
|
||||
Latency of high priority workload
|
@ -0,0 +1,104 @@
|
||||
#!/bin/bash
|
||||
source ../common.sh
|
||||
|
||||
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
|
||||
# Modified to target a remote host
|
||||
|
||||
timestamp=$(date +%s)
|
||||
experiment_directory=$(pwd)
|
||||
host=192.168.1.13
|
||||
|
||||
results_directory="$experiment_directory/res/$timestamp"
|
||||
log=log.txt
|
||||
|
||||
mkdir -p "$results_directory"
|
||||
log_environment >>"$results_directory/$log"
|
||||
|
||||
# Start the runtime
|
||||
inputs=(40 10)
|
||||
duration_sec=15
|
||||
offset=5
|
||||
|
||||
# Execute workloads long enough for runtime to learn excepted execution time
|
||||
echo -n "Running Samples: "
|
||||
for input in ${inputs[*]}; do
|
||||
hey -z ${duration_sec}s -cpus 6 -t 0 -o csv -m GET -d "$input\n" http://"$host":$((10000 + input))
|
||||
done
|
||||
echo "[DONE]"
|
||||
sleep 5
|
||||
|
||||
echo "Running Experiments"
|
||||
# Run each separately
|
||||
hey -z ${duration_sec}s -cpus 6 -c 100 -t 0 -o csv -m GET -d "40\n" "http://$host:10040" >"$results_directory/fib40.csv"
|
||||
hey -z ${duration_sec}s -cpus 6 -c 100 -t 0 -o csv -m GET -d "10\n" "http://$host:10010" >"$results_directory/fib10.csv"
|
||||
|
||||
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
|
||||
hey -z $((duration_sec + 2 * offset))s -cpus 3 -c 100 -t 0 -o csv -m GET -d "40\n" "http://$host:10040" >"$results_directory/fib40-con.csv" &
|
||||
sleep $offset
|
||||
hey -z ${duration_sec}s -cpus 3 -c 100 -t 0 -o csv -m GET -d "10\n" "http://$host:10010" >"$results_directory/fib10-con.csv" &
|
||||
sleep $((duration_sec + offset + 15))
|
||||
|
||||
# Generate *.csv and *.dat results
|
||||
echo -n "Parsing Results: "
|
||||
|
||||
printf "Payload,Success_Rate\n" >>"$results_directory/success.csv"
|
||||
printf "Payload,Throughput\n" >>"$results_directory/throughput.csv"
|
||||
printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv"
|
||||
|
||||
payloads=(fib10 fib10-con fib40 fib40-con)
|
||||
|
||||
for payload in ${payloads[*]}; do
|
||||
# Get Number of Requests
|
||||
requests=$(($(wc -l <"$results_directory/$payload.csv") - 1))
|
||||
((requests == 0)) && continue
|
||||
|
||||
# Calculate Success Rate for csv
|
||||
awk -F, '
|
||||
$7 == 200 {ok++}
|
||||
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
|
||||
' <"$results_directory/$payload.csv" >>"$results_directory/success.csv"
|
||||
|
||||
# Filter on 200s, convery from s to ms, and sort
|
||||
awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" |
|
||||
sort -g >"$results_directory/$payload-response.csv"
|
||||
|
||||
# Get Number of 200s
|
||||
oks=$(wc -l <"$results_directory/$payload-response.csv")
|
||||
((oks == 0)) && continue # If all errors, skip line
|
||||
|
||||
# Get Latest Timestamp
|
||||
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
|
||||
throughput=$(echo "$oks/$duration" | bc)
|
||||
printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv"
|
||||
|
||||
# Generate Latency Data for csv
|
||||
awk '
|
||||
BEGIN {
|
||||
sum = 0
|
||||
p50 = int('"$oks"' * 0.5)
|
||||
p90 = int('"$oks"' * 0.9)
|
||||
p99 = int('"$oks"' * 0.99)
|
||||
p100 = '"$oks"'
|
||||
printf "'"$payload"',"
|
||||
}
|
||||
NR==p50 {printf "%1.4f,", $0}
|
||||
NR==p90 {printf "%1.4f,", $0}
|
||||
NR==p99 {printf "%1.4f,", $0}
|
||||
NR==p100 {printf "%1.4f\n", $0}
|
||||
' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv"
|
||||
|
||||
# Delete scratch file used for sorting/counting
|
||||
# rm -rf "$results_directory/$payload-response.csv"
|
||||
done
|
||||
|
||||
# Transform csvs to dat files for gnuplot
|
||||
for file in success latency throughput; do
|
||||
echo -n "#" >"$results_directory/$file.dat"
|
||||
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
|
||||
done
|
||||
|
||||
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
|
||||
# generate_gnuplots
|
||||
|
||||
# Cleanup, if required
|
||||
echo "[DONE]"
|
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
# Executes the runtime in GDB
|
||||
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
|
||||
# This allows debugging outside of the Docker container
|
||||
# Also disables pagination and stopping on SIGUSR1
|
||||
|
||||
experiment_directory=$(pwd)
|
||||
project_directory=$(cd ../.. && pwd)
|
||||
binary_directory=$(cd "$project_directory"/bin && pwd)
|
||||
|
||||
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
|
||||
export PATH="$binary_directory:$PATH"
|
||||
|
||||
gdb --eval-command="handle SIGUSR1 nostop" \
|
||||
--eval-command="handle SIGPIPE nostop" \
|
||||
--eval-command="set pagination off" \
|
||||
--eval-command="set substitute-path /sledge/runtime $project_directory" \
|
||||
--eval-command="run $experiment_directory/spec.json" \
|
||||
sledgert
|
@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
cd ../../bin
|
||||
LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/mixed_preemption/test_mixed_preemption.json &
|
||||
cd ../tests/mixed_preemption/
|
||||
|
||||
# Run small samples on each port to let the runtime figure out the execution time
|
||||
sleep 10
|
||||
echo "Running Samples"
|
||||
wrk -d 20s -t1 -s post.lua http://localhost:10010 -- --delay 500 10\n
|
||||
wrk -d 20s -t1 -s post.lua http://localhost:10020 -- --delay 500 20\n
|
||||
wrk -d 20s -t1 -s post.lua http://localhost:10030 -- --delay 500 25\n
|
||||
|
||||
# Run in Parallel
|
||||
sleep 10
|
||||
echo "Running Experiments"
|
||||
wrk -d 1m -t1 -s post.lua http://localhost:10010 -- --delay 125 10\n >./res/fib10.txt &
|
||||
wrk -d 2m -t1 -s post.lua http://localhost:10020 -- --delay 250 20\n >./res/fib20.txt &
|
||||
wrk -d 3m -t1 -s post.lua http://localhost:10025 -- --delay 500 25\n >./res/fib25.txt
|
||||
|
||||
# Kill the Background Sledge processes
|
||||
sleep 10
|
||||
echo "Running Cleanup"
|
||||
pkill sledgert
|
||||
pkill wrk
|
||||
|
||||
# Extract the Latency CSV Data from the Log
|
||||
|
||||
echo 'Fib10, Fib10' >./res/fib10.csv
|
||||
grep -A200 -m1 -e 'Percentile, Latency' ./res/fib10.txt >>./res/fib10.csv
|
||||
echo 'Fib20, Fib20' >./res/fib20.csv
|
||||
grep -A200 -m1 -e 'Percentile, Latency' ./res/fib20.txt >>./res/fib20.csv
|
||||
echo 'Fib25, Fib25' >./res/fib25.csv
|
||||
grep -A200 -m1 -e 'Percentile, Latency' ./res/fib25.txt >>./res/fib25.csv
|
||||
paste -d, ./res/fib10.csv ./res/fib20.csv ./res/fib25.csv >./res/merged.csv
|
@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
# Executes the runtime in GDB
|
||||
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
|
||||
# This allows debugging outside of the Docker container
|
||||
# Also disables pagination and stopping on SIGUSR1
|
||||
|
||||
experiment_directory=$(pwd)
|
||||
project_directory=$(cd ../.. && pwd)
|
||||
binary_directory=$(cd "$project_directory"/bin && pwd)
|
||||
|
||||
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
|
||||
export PATH="$binary_directory:$PATH"
|
||||
|
||||
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=EDF perf record -g -s sledgert "$experiment_directory/spec.json"
|
@ -0,0 +1,124 @@
|
||||
#!/bin/bash
|
||||
source ../common.sh
|
||||
|
||||
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
|
||||
# Use -d flag if running under gdb
|
||||
|
||||
timestamp=$(date +%s)
|
||||
experiment_directory=$(pwd)
|
||||
binary_directory=$(cd ../../bin && pwd)
|
||||
|
||||
schedulers=(EDF FIFO)
|
||||
for scheduler in ${schedulers[*]}; do
|
||||
|
||||
results_directory="$experiment_directory/res/$timestamp/$scheduler"
|
||||
log=log.txt
|
||||
|
||||
mkdir -p "$results_directory"
|
||||
log_environment >>"$results_directory/$log"
|
||||
|
||||
# Start the runtime
|
||||
if [ "$1" != "-d" ]; then
|
||||
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" &
|
||||
sleep 1
|
||||
else
|
||||
echo "Running under gdb"
|
||||
echo "Running under gdb" >>"$results_directory/$log"
|
||||
fi
|
||||
|
||||
inputs=(40 10)
|
||||
duration_sec=15
|
||||
offset=5
|
||||
|
||||
# Execute workloads long enough for runtime to learn excepted execution time
|
||||
echo -n "Running Samples: "
|
||||
for input in ${inputs[*]}; do
|
||||
hey -z ${duration_sec}s -cpus 3 -t 0 -o csv -m GET -d "$input\n" http://localhost:$((10000 + input))
|
||||
done
|
||||
echo "[DONE]"
|
||||
sleep 5
|
||||
|
||||
echo "Running Experiments"
|
||||
# Run each separately
|
||||
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40.csv"
|
||||
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10.csv"
|
||||
|
||||
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
|
||||
hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40-con.csv" &
|
||||
sleep $offset
|
||||
hey -z ${duration_sec}s -cpus 2 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10-con.csv" &
|
||||
sleep $((duration_sec + offset + 15))
|
||||
|
||||
# Stop the runtime if not in debug mode
|
||||
[ "$1" != "-d" ] && kill_runtime
|
||||
|
||||
# Generate *.csv and *.dat results
|
||||
echo -n "Parsing Results: "
|
||||
|
||||
printf "Payload,Success_Rate\n" >>"$results_directory/success.csv"
|
||||
printf "Payload,Throughput\n" >>"$results_directory/throughput.csv"
|
||||
printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv"
|
||||
|
||||
deadlines_ms=(2 2 3000 3000)
|
||||
payloads=(fib10 fib10-con fib40 fib40-con)
|
||||
|
||||
for ((i = 0; i < 4; i++)); do
|
||||
# for payload in ${payloads[*]}; do
|
||||
payload=${payloads[$i]}
|
||||
deadline=${deadlines_ms[$i]}
|
||||
|
||||
# Get Number of Requests
|
||||
requests=$(($(wc -l <"$results_directory/$payload.csv") - 1))
|
||||
((requests == 0)) && continue
|
||||
|
||||
# Calculate Success Rate for csv
|
||||
awk -F, '
|
||||
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
|
||||
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
|
||||
' <"$results_directory/$payload.csv" >>"$results_directory/success.csv"
|
||||
|
||||
# Filter on 200s, convery from s to ms, and sort
|
||||
awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" |
|
||||
sort -g >"$results_directory/$payload-response.csv"
|
||||
|
||||
# Get Number of 200s
|
||||
oks=$(wc -l <"$results_directory/$payload-response.csv")
|
||||
((oks == 0)) && continue # If all errors, skip line
|
||||
|
||||
# Get Latest Timestamp
|
||||
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
|
||||
throughput=$(echo "$oks/$duration" | bc)
|
||||
printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv"
|
||||
|
||||
# Generate Latency Data for csv
|
||||
awk '
|
||||
BEGIN {
|
||||
sum = 0
|
||||
p50 = int('"$oks"' * 0.5)
|
||||
p90 = int('"$oks"' * 0.9)
|
||||
p99 = int('"$oks"' * 0.99)
|
||||
p100 = '"$oks"'
|
||||
printf "'"$payload"',"
|
||||
}
|
||||
NR==p50 {printf "%1.4f,", $0}
|
||||
NR==p90 {printf "%1.4f,", $0}
|
||||
NR==p99 {printf "%1.4f,", $0}
|
||||
NR==p100 {printf "%1.4f\n", $0}
|
||||
' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv"
|
||||
|
||||
# Delete scratch file used for sorting/counting
|
||||
# rm -rf "$results_directory/$payload-response.csv"
|
||||
done
|
||||
|
||||
# Transform csvs to dat files for gnuplot
|
||||
for file in success latency throughput; do
|
||||
echo -n "#" >"$results_directory/$file.dat"
|
||||
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
|
||||
done
|
||||
|
||||
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
|
||||
# generate_gnuplots
|
||||
|
||||
# Cleanup, if requires
|
||||
echo "[DONE]"
|
||||
done
|
@ -0,0 +1,124 @@
|
||||
#!/bin/bash
|
||||
source ../common.sh
|
||||
|
||||
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
|
||||
# Use -d flag if running under gdb
|
||||
|
||||
timestamp=$(date +%s)
|
||||
experiment_directory=$(pwd)
|
||||
binary_directory=$(cd ../../bin && pwd)
|
||||
|
||||
schedulers=(EDF FIFO)
|
||||
for scheduler in ${schedulers[*]}; do
|
||||
|
||||
results_directory="$experiment_directory/res/$timestamp/$scheduler"
|
||||
log=log.txt
|
||||
|
||||
mkdir -p "$results_directory"
|
||||
log_environment >>"$results_directory/$log"
|
||||
|
||||
# Start the runtime
|
||||
if [ "$1" != "-d" ]; then
|
||||
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" &
|
||||
sleep 1
|
||||
else
|
||||
echo "Running under gdb"
|
||||
echo "Running under gdb" >>"$results_directory/$log"
|
||||
fi
|
||||
|
||||
inputs=(40 10)
|
||||
duration_sec=15
|
||||
offset=5
|
||||
|
||||
# Execute workloads long enough for runtime to learn excepted execution time
|
||||
echo -n "Running Samples: "
|
||||
for input in ${inputs[*]}; do
|
||||
hey -z ${duration_sec}s -cpus 3 -t 0 -o csv -m GET -d "$input\n" http://localhost:$((10000 + input))
|
||||
done
|
||||
echo "[DONE]"
|
||||
sleep 5
|
||||
|
||||
echo "Running Experiments"
|
||||
# Run each separately
|
||||
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40.csv"
|
||||
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10.csv"
|
||||
|
||||
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
|
||||
hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40-con.csv" &
|
||||
sleep $offset
|
||||
hey -z ${duration_sec}s -cpus 2 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10-con.csv" &
|
||||
sleep $((duration_sec + offset + 15))
|
||||
|
||||
# Stop the runtime if not in debug mode
|
||||
[ "$1" != "-d" ] && kill_runtime
|
||||
|
||||
# Generate *.csv and *.dat results
|
||||
echo -n "Parsing Results: "
|
||||
|
||||
printf "Payload,Success_Rate\n" >>"$results_directory/success.csv"
|
||||
printf "Payload,Throughput\n" >>"$results_directory/throughput.csv"
|
||||
printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv"
|
||||
|
||||
deadlines_ms=(2 2 3000 3000)
|
||||
payloads=(fib10 fib10-con fib40 fib40-con)
|
||||
|
||||
for ((i = 0; i < 4; i++)); do
|
||||
# for payload in ${payloads[*]}; do
|
||||
payload=${payloads[$i]}
|
||||
deadline=${deadlines_ms[$i]}
|
||||
|
||||
# Get Number of Requests
|
||||
requests=$(($(wc -l <"$results_directory/$payload.csv") - 1))
|
||||
((requests == 0)) && continue
|
||||
|
||||
# Calculate Success Rate for csv
|
||||
awk -F, '
|
||||
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
|
||||
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
|
||||
' <"$results_directory/$payload.csv" >>"$results_directory/success.csv"
|
||||
|
||||
# Filter on 200s, convery from s to ms, and sort
|
||||
awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" |
|
||||
sort -g >"$results_directory/$payload-response.csv"
|
||||
|
||||
# Get Number of 200s
|
||||
oks=$(wc -l <"$results_directory/$payload-response.csv")
|
||||
((oks == 0)) && continue # If all errors, skip line
|
||||
|
||||
# Get Latest Timestamp
|
||||
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
|
||||
throughput=$(echo "$oks/$duration" | bc)
|
||||
printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv"
|
||||
|
||||
# Generate Latency Data for csv
|
||||
awk '
|
||||
BEGIN {
|
||||
sum = 0
|
||||
p50 = int('"$oks"' * 0.5)
|
||||
p90 = int('"$oks"' * 0.9)
|
||||
p99 = int('"$oks"' * 0.99)
|
||||
p100 = '"$oks"'
|
||||
printf "'"$payload"',"
|
||||
}
|
||||
NR==p50 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
|
||||
NR==p90 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
|
||||
NR==p99 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
|
||||
NR==p100 {printf "%1.4f%\n", $0 / '"$deadline"' * 100}
|
||||
' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv"
|
||||
|
||||
# Delete scratch file used for sorting/counting
|
||||
# rm -rf "$results_directory/$payload-response.csv"
|
||||
done
|
||||
|
||||
# Transform csvs to dat files for gnuplot
|
||||
for file in success latency throughput; do
|
||||
echo -n "#" >"$results_directory/$file.dat"
|
||||
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
|
||||
done
|
||||
|
||||
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
|
||||
# generate_gnuplots
|
||||
|
||||
# Cleanup, if requires
|
||||
echo "[DONE]"
|
||||
done
|
@ -0,0 +1,5 @@
|
||||
|
||||
|
||||
hey -n 200 -c 200 -t 0 -m GET -d "40\n" http://localhost:10040
|
||||
|
||||
hey -n 500 -c 500 -t 0 -m GET -d "10\n" http://localhost:10010
|
@ -0,0 +1,30 @@
|
||||
{
|
||||
"active": "yes",
|
||||
"name": "fibonacci_10",
|
||||
"path": "fibonacci_wasm.so",
|
||||
"port": 10010,
|
||||
"expected-execution-us": 600,
|
||||
"relative-deadline-us": 2000,
|
||||
"argsize": 1,
|
||||
"http-req-headers": [],
|
||||
"http-req-content-type": "text/plain",
|
||||
"http-req-size": 1024,
|
||||
"http-resp-headers": [],
|
||||
"http-resp-size": 1024,
|
||||
"http-resp-content-type": "text/plain"
|
||||
},
|
||||
{
|
||||
"active": "yes",
|
||||
"name": "fibonacci_40",
|
||||
"path": "fibonacci_wasm.so",
|
||||
"port": 10040,
|
||||
"expected-execution-us": 550000,
|
||||
"relative-deadline-us": 300000000,
|
||||
"argsize": 1,
|
||||
"http-req-headers": [],
|
||||
"http-req-content-type": "text/plain",
|
||||
"http-req-size": 1024,
|
||||
"http-resp-headers": [],
|
||||
"http-resp-size": 1024,
|
||||
"http-resp-content-type": "text/plain"
|
||||
}
|
@ -0,0 +1,123 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdatomic.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "debuglog.h"
|
||||
#include "client_socket.h"
|
||||
|
||||
#define ADMISSIONS_CONTROL_GRANULARITY 1000000
|
||||
|
||||
/*
|
||||
* Unitless estimate of the instantaneous fraction of system capacity required to complete all previously
|
||||
* admitted work. This is used to calculate free capacity as part of admissions control
|
||||
*
|
||||
* The estimated requirements of a single admitted request is calculated as
|
||||
* estimated execution time (cycles) / relative deadline (cycles)
|
||||
*
|
||||
* These estimates are incremented on request acceptance and decremented on request completion (either
|
||||
* success or failure)
|
||||
*/
|
||||
extern _Atomic uint64_t admissions_control_admitted;
|
||||
extern uint64_t admissions_control_capacity;
|
||||
|
||||
static inline void
|
||||
admissions_control_initialize()
|
||||
{
|
||||
#ifdef ADMISSIONS_CONTROL
|
||||
atomic_init(&admissions_control_admitted, 0);
|
||||
admissions_control_capacity = runtime_worker_threads_count * ADMISSIONS_CONTROL_GRANULARITY;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
admissions_control_add(uint64_t admissions_estimate)
|
||||
{
|
||||
#ifdef ADMISSIONS_CONTROL
|
||||
assert(admissions_estimate > 0);
|
||||
atomic_fetch_add(&admissions_control_admitted, admissions_estimate);
|
||||
|
||||
#ifdef LOG_ADMISSIONS_CONTROL
|
||||
debuglog("Runtime Admitted: %lu / %lu\n", admissions_control_admitted, admissions_control_capacity);
|
||||
#endif
|
||||
|
||||
#endif /* ADMISSIONS_CONTROL */
|
||||
}
|
||||
|
||||
static inline void
|
||||
admissions_control_substract(uint64_t admissions_estimate)
|
||||
{
|
||||
#ifdef ADMISSIONS_CONTROL
|
||||
/* Assumption: Should never underflow */
|
||||
if (unlikely(admissions_estimate > admissions_control_admitted)) panic("Admissions Estimate underflow\n");
|
||||
|
||||
atomic_fetch_sub(&admissions_control_admitted, admissions_estimate);
|
||||
|
||||
#ifdef LOG_ADMISSIONS_CONTROL
|
||||
debuglog("Runtime Admitted: %lu / %lu\n", admissions_control_admitted, admissions_control_capacity);
|
||||
#endif
|
||||
|
||||
#endif /* ADMISSIONS_CONTROL */
|
||||
}
|
||||
|
||||
|
||||
static inline uint64_t
|
||||
admissions_control_calculate_estimate(uint64_t estimated_execution, uint64_t relative_deadline)
|
||||
{
|
||||
#ifdef ADMISSIONS_CONTROL
|
||||
assert(relative_deadline != 0);
|
||||
uint64_t admissions_estimate = (estimated_execution * (uint64_t)ADMISSIONS_CONTROL_GRANULARITY)
|
||||
/ relative_deadline;
|
||||
if (admissions_estimate == 0)
|
||||
panic("Ration of Deadline to Execution time cannot exceed %d\n", ADMISSIONS_CONTROL_GRANULARITY);
|
||||
|
||||
return admissions_estimate;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
admissions_control_calculate_estimate_us(uint32_t estimated_execution_us, uint32_t relative_deadline_us)
|
||||
{
|
||||
#ifdef ADMISSIONS_CONTROL
|
||||
assert(relative_deadline_us != 0);
|
||||
return (uint64_t)((uint64_t)(estimated_execution_us * ADMISSIONS_CONTROL_GRANULARITY)) / relative_deadline_us;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
admissions_control_log_decision(uint64_t admissions_estimate, bool admitted)
|
||||
{
|
||||
#ifdef LOG_ADMISSIONS_CONTROL
|
||||
debuglog("Admitted: %lu, Capacity: %lu, Estimate: %lu, Admitted? %s\n", admissions_control_admitted,
|
||||
admissions_control_capacity, admissions_estimate, admitted ? "yes" : "no");
|
||||
#endif /* LOG_ADMISSIONS_CONTROL */
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
admissions_control_decide(uint64_t admissions_estimate)
|
||||
{
|
||||
uint64_t work_admitted = 1; /* Nominal non-zero value in case admissions control is disabled */
|
||||
|
||||
#ifdef ADMISSIONS_CONTROL
|
||||
if (unlikely(admissions_estimate == 0)) panic("Admissions estimate should never be zero");
|
||||
|
||||
uint64_t total_admitted = atomic_load(&admissions_control_admitted);
|
||||
|
||||
if (total_admitted + admissions_estimate >= admissions_control_capacity) {
|
||||
admissions_control_log_decision(admissions_estimate, false);
|
||||
work_admitted = 0;
|
||||
} else {
|
||||
admissions_control_log_decision(admissions_estimate, true);
|
||||
admissions_control_add(admissions_estimate);
|
||||
work_admitted = admissions_estimate;
|
||||
}
|
||||
#endif /* ADMISSIONS_CONTROL */
|
||||
|
||||
return work_admitted;
|
||||
}
|
@ -0,0 +1,56 @@
|
||||
#pragma once
|
||||
|
||||
#include "debuglog.h"
|
||||
#include "perf_window.h"
|
||||
|
||||
struct admissions_info {
|
||||
struct perf_window perf_window;
|
||||
int percentile; /* 50 - 99 */
|
||||
int control_index; /* Precomputed Lookup index when perf_window is full */
|
||||
uint64_t estimate; /* cycles */
|
||||
uint64_t relative_deadline; /* Relative deadline in cycles. This is duplicated state */
|
||||
};
|
||||
|
||||
/**
|
||||
* Initializes perf window
|
||||
* @param self
|
||||
*/
|
||||
static inline void
|
||||
admissions_info_initialize(struct admissions_info *self, int percentile, uint64_t expected_execution,
|
||||
uint64_t relative_deadline)
|
||||
{
|
||||
#ifdef ADMISSIONS_CONTROL
|
||||
|
||||
self->relative_deadline = relative_deadline;
|
||||
self->estimate = admissions_control_calculate_estimate(expected_execution, relative_deadline);
|
||||
debuglog("Initial Estimate: %lu\n", self->estimate);
|
||||
assert(self != NULL);
|
||||
|
||||
perf_window_initialize(&self->perf_window);
|
||||
|
||||
if (unlikely(percentile < 50 || percentile > 99)) panic("Invalid admissions percentile");
|
||||
self->percentile = percentile;
|
||||
|
||||
self->control_index = PERF_WINDOW_BUFFER_SIZE * percentile / 100;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Adds an execution value to the perf window and calculates and caches and updated estimate
|
||||
* @param self
|
||||
* @param execution_duration
|
||||
*/
|
||||
static inline void
|
||||
admissions_info_update(struct admissions_info *self, uint64_t execution_duration)
|
||||
{
|
||||
#ifdef ADMISSIONS_CONTROL
|
||||
assert(!software_interrupt_is_enabled());
|
||||
struct perf_window *perf_window = &self->perf_window;
|
||||
|
||||
LOCK_LOCK(&self->perf_window.lock);
|
||||
perf_window_add(perf_window, execution_duration);
|
||||
uint64_t estimated_execution = perf_window_get_percentile(perf_window, self->percentile, self->control_index);
|
||||
self->estimate = admissions_control_calculate_estimate(estimated_execution, self->relative_deadline);
|
||||
LOCK_UNLOCK(&self->perf_window.lock);
|
||||
#endif
|
||||
}
|
@ -0,0 +1,65 @@
|
||||
#pragma once
|
||||
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "panic.h"
|
||||
#include "debuglog.h"
|
||||
#include "http_response.h"
|
||||
#include "http_total.h"
|
||||
#include "runtime.h"
|
||||
#include "worker_thread.h"
|
||||
|
||||
|
||||
static inline void
|
||||
client_socket_close(int client_socket)
|
||||
{
|
||||
if (close(client_socket) < 0) debuglog("Error closing client socket - %s", strerror(errno));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Rejects request due to admission control or error
|
||||
* @param client_socket - the client we are rejecting
|
||||
* @param status_code - either 503 or 400
|
||||
*/
|
||||
static inline int
|
||||
client_socket_send(int client_socket, int status_code)
|
||||
{
|
||||
const char *response;
|
||||
int rc;
|
||||
switch (status_code) {
|
||||
case 503:
|
||||
response = HTTP_RESPONSE_503_SERVICE_UNAVAILABLE;
|
||||
http_total_increment_5XX();
|
||||
break;
|
||||
case 400:
|
||||
response = HTTP_RESPONSE_400_BAD_REQUEST;
|
||||
http_total_increment_4XX();
|
||||
break;
|
||||
default:
|
||||
panic("%d is not a valid status code\n", status_code);
|
||||
}
|
||||
|
||||
int sent = 0;
|
||||
int to_send = strlen(response);
|
||||
|
||||
while (sent < to_send) {
|
||||
rc = write(client_socket, &response[sent], to_send - sent);
|
||||
if (rc < 0) {
|
||||
if (errno == EAGAIN) { debuglog("Unexpectedly blocking on write of %s\n", response); }
|
||||
|
||||
goto send_err;
|
||||
}
|
||||
sent += rc;
|
||||
};
|
||||
|
||||
rc = 0;
|
||||
done:
|
||||
return rc;
|
||||
send_err:
|
||||
debuglog("Error sending to client: %s", strerror(errno));
|
||||
rc = -1;
|
||||
goto done;
|
||||
}
|
@ -0,0 +1,57 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdatomic.h>
|
||||
#include <stdint.h>
|
||||
|
||||
/*
|
||||
* Counts to track requests and responses
|
||||
* requests and 5XX (admissions control rejections) are only tracked by the listener core, so they are not
|
||||
* behind a compiler flag. 2XX and 4XX can be incremented by worker cores, so they are behind a flag because
|
||||
* of concerns about contention
|
||||
*/
|
||||
extern _Atomic uint32_t http_total_requests;
|
||||
extern _Atomic uint32_t http_total_5XX;
|
||||
|
||||
#ifdef LOG_TOTAL_REQS_RESPS
|
||||
extern _Atomic uint32_t http_total_2XX;
|
||||
extern _Atomic uint32_t http_total_4XX;
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
http_total_init()
|
||||
{
|
||||
atomic_init(&http_total_requests, 0);
|
||||
atomic_init(&http_total_5XX, 0);
|
||||
#ifdef LOG_TOTAL_REQS_RESPS
|
||||
atomic_init(&http_total_2XX, 0);
|
||||
atomic_init(&http_total_4XX, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
http_total_increment_request()
|
||||
{
|
||||
atomic_fetch_add(&http_total_requests, 1);
|
||||
}
|
||||
|
||||
static inline void
|
||||
http_total_increment_2xx()
|
||||
{
|
||||
#ifdef LOG_TOTAL_REQS_RESPS
|
||||
atomic_fetch_add(&http_total_2XX, 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
http_total_increment_4XX()
|
||||
{
|
||||
#ifdef LOG_TOTAL_REQS_RESPS
|
||||
atomic_fetch_add(&http_total_4XX, 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
http_total_increment_5XX()
|
||||
{
|
||||
atomic_fetch_add(&http_total_5XX, 1);
|
||||
}
|
@ -1,89 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <assert.h>
|
||||
#include <sys/mman.h>
|
||||
#include <signal.h>
|
||||
#include <uv.h>
|
||||
|
||||
#include "http_request.h"
|
||||
#include "runtime.h"
|
||||
#include "sandbox.h"
|
||||
/**
|
||||
* Parses data read by the libuv stream chunk-by-chunk until the message is complete
|
||||
* Then stops the stream and wakes up the sandbox
|
||||
* @param stream
|
||||
* @param number_read bytes read
|
||||
* @param buffer unused
|
||||
*
|
||||
* FIXME: is there some weird edge case where a UNICODE character might be split between reads? Do we care?
|
||||
* Called after libuv has read a chunk of data. Issue #100
|
||||
*/
|
||||
static inline void
|
||||
libuv_callbacks_on_read_parse_http_request(uv_stream_t *stream, ssize_t number_read, const uv_buf_t *buffer)
|
||||
{
|
||||
struct sandbox *sandbox = stream->data;
|
||||
|
||||
/* Parse the chunks libuv has read on our behalf until we've parse to message end */
|
||||
if (number_read > 0) {
|
||||
// FIXME: Broken by refactor to sandbox_parse_http_request changes to return code
|
||||
if (sandbox_parse_http_request(sandbox, number_read) != 0) return;
|
||||
sandbox->request_response_data_length += number_read;
|
||||
struct http_request *rh = &sandbox->http_request;
|
||||
if (!rh->message_end) return;
|
||||
}
|
||||
|
||||
/* When the entire message has been read, stop the stream and wakeup the sandbox */
|
||||
uv_read_stop(stream);
|
||||
worker_thread_wakeup_sandbox(sandbox);
|
||||
}
|
||||
|
||||
/**
|
||||
* On libuv close, executes this callback to wake the blocked sandbox back up
|
||||
* @param stream
|
||||
*/
|
||||
static inline void
|
||||
libuv_callbacks_on_close_wakeup_sakebox(uv_handle_t *stream)
|
||||
{
|
||||
struct sandbox *sandbox = stream->data;
|
||||
worker_thread_wakeup_sandbox(sandbox);
|
||||
}
|
||||
|
||||
/**
|
||||
* On libuv shutdown, executes this callback to wake the blocked sandbox back up
|
||||
* @param req shutdown request
|
||||
* @param status unused in callback
|
||||
*/
|
||||
static inline void
|
||||
libuv_callbacks_on_shutdown_wakeup_sakebox(uv_shutdown_t *req, int status)
|
||||
{
|
||||
struct sandbox *sandbox = req->data;
|
||||
worker_thread_wakeup_sandbox(sandbox);
|
||||
}
|
||||
|
||||
/**
|
||||
* On libuv write, executes this callback to wake the blocked sandbox back up
|
||||
* In case of error, shutdown the sandbox
|
||||
* @param write shutdown request
|
||||
* @param status status code
|
||||
*/
|
||||
static inline void
|
||||
libuv_callbacks_on_write_wakeup_sandbox(uv_write_t *write, int status)
|
||||
{
|
||||
struct sandbox *sandbox = write->data;
|
||||
if (status < 0) {
|
||||
sandbox->client_libuv_shutdown_request.data = sandbox;
|
||||
uv_shutdown(&sandbox->client_libuv_shutdown_request, (uv_stream_t *)&sandbox->client_libuv_stream,
|
||||
libuv_callbacks_on_shutdown_wakeup_sakebox);
|
||||
return;
|
||||
}
|
||||
worker_thread_wakeup_sandbox(sandbox);
|
||||
}
|
||||
|
||||
static inline void
|
||||
libuv_callbacks_on_allocate_setup_request_response_data(uv_handle_t *h, size_t suggested, uv_buf_t *buf)
|
||||
{
|
||||
struct sandbox *sandbox = h->data;
|
||||
size_t l = (sandbox->module->max_request_or_response_size - sandbox->request_response_data_length);
|
||||
buf->base = (sandbox->request_response_data + sandbox->request_response_data_length);
|
||||
buf->len = l > suggested ? suggested : l;
|
||||
}
|
@ -0,0 +1,4 @@
|
||||
#pragma once
|
||||
|
||||
#define likely(x) __builtin_expect(!!(x), 1)
|
||||
#define unlikely(x) __builtin_expect(!!(x), 0)
|
@ -0,0 +1,81 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdatomic.h>
|
||||
|
||||
#include "debuglog.h"
|
||||
#include "likely.h"
|
||||
#include "panic.h"
|
||||
|
||||
typedef enum
|
||||
{
|
||||
SANDBOX_UNINITIALIZED = 0, /* Assumption: mmap zeros out structure */
|
||||
SANDBOX_ALLOCATED,
|
||||
SANDBOX_SET_AS_INITIALIZED,
|
||||
SANDBOX_INITIALIZED,
|
||||
SANDBOX_SET_AS_RUNNABLE,
|
||||
SANDBOX_RUNNABLE,
|
||||
SANDBOX_SET_AS_RUNNING,
|
||||
SANDBOX_RUNNING,
|
||||
SANDBOX_SET_AS_PREEMPTED,
|
||||
SANDBOX_PREEMPTED,
|
||||
SANDBOX_SET_AS_BLOCKED,
|
||||
SANDBOX_BLOCKED,
|
||||
SANDBOX_SET_AS_RETURNED,
|
||||
SANDBOX_RETURNED,
|
||||
SANDBOX_SET_AS_COMPLETE,
|
||||
SANDBOX_COMPLETE,
|
||||
SANDBOX_SET_AS_ERROR,
|
||||
SANDBOX_ERROR,
|
||||
SANDBOX_STATE_COUNT
|
||||
} sandbox_state_t;
|
||||
|
||||
extern const bool sandbox_state_is_terminal[SANDBOX_STATE_COUNT];
|
||||
|
||||
extern const char *sandbox_state_labels[SANDBOX_STATE_COUNT];
|
||||
|
||||
static inline const char *
|
||||
sandbox_state_stringify(sandbox_state_t state)
|
||||
{
|
||||
if (unlikely(state >= SANDBOX_STATE_COUNT)) panic("%d is an unrecognized sandbox state\n", state);
|
||||
return sandbox_state_labels[state];
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
sandbox_state_log_transition(uint64_t sandbox_id, sandbox_state_t last_state, sandbox_state_t current_state)
|
||||
{
|
||||
#ifdef LOG_STATE_CHANGES
|
||||
debuglog("Sandbox %lu | %s => %s\n", sandbox_id, sandbox_state_stringify(last_state),
|
||||
sandbox_state_stringify(current_state));
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef LOG_SANDBOX_COUNT
|
||||
extern _Atomic uint32_t sandbox_state_count[SANDBOX_STATE_COUNT];
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
sandbox_count_initialize()
|
||||
{
|
||||
#ifdef LOG_SANDBOX_COUNT
|
||||
for (int i = 0; i < SANDBOX_STATE_COUNT; i++) atomic_init(&sandbox_state_count[i], 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
runtime_sandbox_total_increment(sandbox_state_t state)
|
||||
{
|
||||
#ifdef LOG_SANDBOX_COUNT
|
||||
if (!sandbox_state_is_terminal[state]) panic("Unexpectedly logging intermediate transition state");
|
||||
atomic_fetch_add(&sandbox_state_count[state], 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
runtime_sandbox_total_decrement(sandbox_state_t state)
|
||||
{
|
||||
#ifdef LOG_SANDBOX_COUNT
|
||||
if (atomic_load(&sandbox_state_count[state]) == 0) panic("Underflow of %s\n", sandbox_state_stringify(state));
|
||||
atomic_fetch_sub(&sandbox_state_count[state], 1);
|
||||
#endif
|
||||
}
|
@ -0,0 +1,4 @@
|
||||
#include "admissions_control.h"
|
||||
|
||||
_Atomic uint64_t admissions_control_admitted;
|
||||
uint64_t admissions_control_capacity;
|
@ -0,0 +1,34 @@
|
||||
#include <stdint.h>
|
||||
|
||||
#include "debuglog.h"
|
||||
#include "http_total.h"
|
||||
|
||||
/* 2XX + 4XX should equal sandboxes */
|
||||
/* Listener Core Bookkeeping */
|
||||
_Atomic uint32_t http_total_requests = 0;
|
||||
_Atomic uint32_t http_total_5XX = 0;
|
||||
|
||||
#ifdef LOG_TOTAL_REQS_RESPS
|
||||
_Atomic uint32_t http_total_2XX = 0;
|
||||
_Atomic uint32_t http_total_4XX = 0;
|
||||
#endif
|
||||
|
||||
void
|
||||
http_total_log()
|
||||
{
|
||||
uint32_t total_reqs = atomic_load(&http_total_requests);
|
||||
uint32_t total_5XX = atomic_load(&http_total_5XX);
|
||||
|
||||
#ifdef LOG_TOTAL_REQS_RESPS
|
||||
uint32_t total_2XX = atomic_load(&http_total_2XX);
|
||||
uint32_t total_4XX = atomic_load(&http_total_4XX);
|
||||
|
||||
int64_t total_responses = total_2XX + total_4XX + total_5XX;
|
||||
int64_t outstanding_requests = (int64_t)total_reqs - total_responses;
|
||||
|
||||
debuglog("Requests: %u (%ld outstanding)\n\tResponses: %ld\n\t\t2XX: %u\n\t\t4XX: %u\n\t\t5XX: %u\n",
|
||||
total_reqs, outstanding_requests, total_responses, total_2XX, total_4XX, total_5XX);
|
||||
#else
|
||||
debuglog("Requests: %u\n\tResponses:\n\t\t\t5XX: %u\n", total_reqs, total_5XX);
|
||||
#endif
|
||||
};
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue