Merge pull request #1 from gwsystems/mixed-preemption

Mixed preemption
main
Emil 4 years ago committed by GitHub
commit 1b09846e70
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

3
.gitignore vendored

@ -53,6 +53,9 @@ dkms.conf
runtime/tags
runtime/bin
runtime/tests/tmp/
runtime/tests/**/*.csv
runtime/tests/**/*.txt
runtime/tests/**/*.xlsx
# Swap Files
*.swp

3
.gitmodules vendored

@ -11,3 +11,6 @@ url = https://github.com/gwsystems/ck.git
[submodule "jsmn"]
path = runtime/thirdparty/jsmn
url = https://github.com/gwsystems/jsmn.git
[submodule "runtime/tests/gocr"]
path = runtime/tests/gocr
url = https://github.com/gwsystems/gocr

@ -1,17 +1,10 @@
# SLEdge
**SLEdge** is a lightweight serverless solution suitable for edge computing. It combines WebAssembly sandboxing provided by the [aWsm compiler](https://github.com/gwsystems/aWsm) with asynchronous I/O provided by [libuv](https://github.com/libuv/libuv).
**SLEdge** is a lightweight serverless solution suitable for edge computing. It builds on WebAssembly sandboxing provided by the [aWsm compiler](https://github.com/gwsystems/aWsm).
## Host Dependencies
- Docker - [Installation Instructions](https://docs.docker.com/install/)
- libuv
If on Debian, you can install libuv with the following:
```bash
sudo apt-get install libuv1-dev
```
## Setting up the environment

@ -11,62 +11,53 @@ CC_OPTIONS = -O3 -flto -g -pthread -D_GNU_SOURCE
BINARY_NAME=sledgert
# Number of Cores. Options: {2...N or TOTAL_CORES}
# NCORES = ${TOTAL_CORES}
NCORES = 2
# Options: {USE_MEM_GENERIC, USE_MEM_VM}
USE_MEM = USE_MEM_VM
# Feature Toggles
# CFLAGS += -DADMISSIONS_CONTROL
# Debugging Flags
# Strips out calls to assert()
# Strips out calls to assert() and disables debuglog
# CFLAGS += -DNDEBUG
# Turns on debuglog and other assorted printfs in third party libs
CFLAGS += -DDEBUG
# Redirects debuglogs to /runtime/bin/awesome.log
#FIXME This log should be changed to sledge.log (and likely to a user defined path)
# Redirects debuglogs to /runtime/bin/sledge.log
# CFLAGS += -DLOG_TO_FILE
# Various Informational Logs for Debugging
CFLAGS += -DLOG_STATE_CHANGES
# CFLAGS += -DLOG_HTTP_PARSER
# CFLAGS += -DLOG_STATE_CHANGES
# CFLAGS += -DLOG_LOCK_OVERHEAD
CFLAGS += -DLOG_CONTEXT_SWITCHES
# CFLAGS += -DLOG_LISTENER_LOCK_OVERHEAD
# CFLAGS += -DLOG_CONTEXT_SWITCHES
# CFLAGS += -DLOG_ADMISSIONS_CONTROL
# CFLAGS += -DLOG_SANDBOX_PERF
# CFLAGS += -DLOG_REQUEST_ALLOCATION
# CFLAGS += -DLOG_PREEMPTION
# CFLAGS += -DLOG_MODULE_LOADING
# This flag dumps totals of incoming requests and outgoing responses, broken out by status code
# family, such as 2XX, 4XX, 5XX. It is useful to debug clients hanging waiting for a response.
# To log, run `call runtime_log_requests_responses()` while in GDB
CFLAGS += -DLOG_TOTAL_REQS_RESPS
# To log, run `call http_total_log()` while in GDB
# CFLAGS += -DLOG_TOTAL_REQS_RESPS
# This flag logs the total number of sandboxes in the various states
# It is useful to debug if sandboxes are "getting caught" in a particular state
# To log, run `call runtime_log_sandbox_states()` while in GDB
CFLAGS += -DLOG_SANDBOX_TOTALS
# CFLAGS += -DLOG_SANDBOX_COUNT
# This flag enables an per-worker atomic count of sandbox's local runqueue count in thread local storage
# Useful to debug if sandboxes are "getting caught" or "leaking" while in a local runqueue
CFLAGS += -DLOG_LOCAL_RUNQUEUE
# Debug the HTTP Parser
CFLAGS += -DLOG_HTTP_PARSER
# CFLAGS += -DLOG_LOCAL_RUNQUEUE
# System Configuraiton Flags
# System Configuration Flags
# Sets a flag equal to the processor architecture
CFLAGS += -D${ARCH}
CFLAGS += -DNCORES=${NCORES}
CFLAGS += -DNCORES=${TOTAL_CORES}
CFLAGS += -DPAGE_SIZE=$(PAGE_SIZE)
#CFLAGS += -DPREEMPT_DISABLE
#CFLAGS += -DUSE_HTTP_UVIO #-DUSE_HTTP_SYNC
CFLAGS += -D${USE_MEM}
#CFLAGS += -DUSE_SYSCALL
# Preprocessor
LDFLAGS += -Wl,--export-dynamic -ldl -lm

@ -0,0 +1,19 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run $experiment_directory/spec.json" \
sledgert

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 KiB

File diff suppressed because one or more lines are too long

@ -0,0 +1,2 @@
pnm file was generated as follows
`pngtopnm 5x8.png >5x8.pnm`

@ -0,0 +1,19 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run $experiment_directory/spec.json" \
sledgert

@ -0,0 +1,2 @@
ABCD abcd EFGHI efghi JKLMN jklmn OPQR opqr
STUVW stuvw XYZ xyz 12345 67890 !?'&/\=+-*#(){}[]<>

@ -0,0 +1,47 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
if [ "$1" != "-d" ]; then
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" &
sleep 1
else
echo "Running under gdb"
fi
expected_result="$(cat ./expected_result.txt)"
success_count=0
total_count=50
for ((i = 0; i < total_count; i++)); do
echo "$i"
result=$(curl -H 'Expect:' -H "Content-Type: text/plain" --data-binary "@5x8.pnm" localhost:10000 2>/dev/null)
# echo "$result"
if [[ "$result" == "$expected_result" ]]; then
success_count=$((success_count + 1))
else
echo "FAIL"
echo "Expected:"
echo "$expected_result"
echo "==============================================="
echo "Was:"
echo "$result"
fi
done
echo "$success_count / $total_count"
if [ "$1" != "-d" ]; then
sleep 5
echo -n "Running Cleanup: "
pkill sledgert >/dev/null 2>/dev/null
pkill wrk >/dev/null 2>/dev/null
echo "[DONE]"
fi

@ -0,0 +1,15 @@
{
"active": "yes",
"name": "gocr",
"path": "gocr.aso",
"port": 10000,
"relative-deadline-us": 500000000,
"expected-execution-us": 5000000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1024000,
"http-resp-headers": [],
"http-resp-size": 1024000,
"http-resp-content-type": "text/plain"
}

@ -0,0 +1,19 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run $experiment_directory/spec.json" \
sledgert

@ -0,0 +1,3 @@
This is q handw__tten
examp(e for _0CR,
Write as good as yo_ c4n.

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

File diff suppressed because one or more lines are too long

@ -0,0 +1,47 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
if [ "$1" != "-d" ]; then
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" &
sleep 1
else
echo "Running under gdb"
fi
expected_result="$(cat ./expected_result.txt)"
success_count=0
total_count=50
for ((i = 0; i < total_count; i++)); do
echo "$i"
result=$(curl -H 'Expect:' -H "Content-Type: text/plain" --data-binary "@handwrt1.pnm" localhost:10000 2>/dev/null)
# echo "$result"
if [[ "$result" == "$expected_result" ]]; then
success_count=$((success_count + 1))
else
echo "FAIL"
echo "Expected:"
echo "$expected_result"
echo "==============================================="
echo "Was:"
echo "$result"
fi
done
echo "$success_count / $total_count"
if [ "$1" != "-d" ]; then
sleep 5
echo -n "Running Cleanup: "
pkill sledgert >/dev/null 2>/dev/null
pkill wrk >/dev/null 2>/dev/null
echo "[DONE]"
fi

@ -0,0 +1,14 @@
{
"active": "yes",
"name": "gocr",
"path": "gocr.aso",
"port": 10000,
"relative-deadline-us": 50000000000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1024000,
"http-resp-headers": [],
"http-resp-size": 1024000,
"http-resp-content-type": "text/plain"
}

@ -0,0 +1,5 @@
# A Page from Dr. Jekyll and Mister Hyde
This is a larger OCR example. The \*.pnm file is 5mb or so.
In the future, this OCR example might be a good use case for a "step-wise" function, as the file is substantially smaller before conversion to the \*.pnm format.

@ -0,0 +1,19 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run $experiment_directory/spec.json" \
sledgert

@ -0,0 +1,33 @@
at his to4ch of a certain icy pang along my blood. "Come, sir;' said I.
"Yo4 forget that I have not yet the pleas4re ofyo4r acq4aintance. Be
seated, if yo4 please: And I showed him an example, and sat down
myself in my c4stomary seat and with as fair an imitation of my or-
dinary manner to a patient, as the lateness of the ho4r, the nat4re of
my preocc4pations, and the horror I had of my visitor, wo4ld s4_er
me to m4ster.
"I beg yo4r pardon, Dr. Lanyon;' he replied civilly eno4gh. "What
yo4 say is very well fo4nded; and my impatience has shown its heels
to my politeness. I come here at the instance of yo4r colleag4e, Dr.
Henry _ekyll, on a piece of b4siness of some moment; and I 4nder-
stood.. ." He pa4sed and p4t his hand to his throat, and I co4ld see,
in spite of his collected manner, that he was wrestling against the
approaches of the hysteria-"I 4nderstood, a drawer.. ."
B4t here I took pity on my visito(0xed)s s4spense, and some perhaps
on my own growing c4riosity.
"mere it is, sir;' said I, pointing to the drawer, where it lay on the
noor behind a table and still covered with the sheet.
He sprang to it, and then pa4sed, and laid his hand 4pon his
heart; I co4ld hear his teeth grate with the conv4lsive action of his
jaws; and his face was so ghastly to see that I grew alarmed both for
his life and reason.
"Compose yo4rself_' said I.
He t4rned a dreadf4l smile to me, and as if with the decision of
despair, pl4cked away the sheet. At sight of the contents, he 4ttered
one lo4d sob of s4ch immense relief that I sat petri_ed. And the
next moment, in a voice that was already fairly well 4nder control,
"Have yo4 a grad4ated glass? " he asked.
I rose from my place with something of an e_ort and gave him
what he asked.
He thanked me with a smiling nod, meas4red o4t a few min-
ims of the red tinct4re and added one of the powders. me mix-
t4re, which was at _rst of a reddish h4e, began, in proportion as the

Binary file not shown.

After

Width:  |  Height:  |  Size: 823 KiB

File diff suppressed because one or more lines are too long

@ -0,0 +1,46 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
if [ "$1" != "-d" ]; then
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" &
sleep 1
else
echo "Running under gdb"
fi
expected_result="$(cat ./expected_result.txt)"
success_count=0
total_count=50
for ((i = 0; i < total_count; i++)); do
echo "$i"
result=$(curl -H 'Expect:' -H "Content-Type: text/plain" --data-binary "@hyde.pnm" localhost:10000 2>/dev/null)
# echo "$result"
if [[ "$result" == "$expected_result" ]]; then
success_count=$((success_count + 1))
else
echo "FAIL"
echo "Expected:"
echo "$expected_result"
echo "==============================================="
echo "Was:"
echo "$result"
fi
done
echo "$success_count / $total_count"
if [ "$1" != "-d" ]; then
sleep 5
echo -n "Running Cleanup: "
pkill sledgert >/dev/null 2>/dev/null
pkill wrk >/dev/null 2>/dev/null
echo "[DONE]"
fi

@ -0,0 +1,14 @@
{
"active": "yes",
"name": "gocr",
"path": "gocr.aso",
"port": 10000,
"relative-deadline-us": 50000000000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 5335057,
"http-resp-headers": [],
"http-resp-size": 5335057,
"http-resp-content-type": "text/plain"
}

@ -0,0 +1,20 @@
reset
set term jpeg
set output "latency.jpg"
set xlabel "Payload (bytes)"
set xrange [-5:1050000]
set ylabel "Latency (ms)"
set yrange [0:]
set key left top
set style histogram columnstacked
plot 'latency.dat' using 1:2 title 'p50', \
'latency.dat' using 1:3 title 'p90', \
'latency.dat' using 1:4 title 'p99', \
'latency.dat' using 1:5 title 'p100', \

@ -0,0 +1,111 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt
mkdir -p "$results_directory"
log_environment >>"$results_directory/$log"
# Start the runtime
if [ "$1" != "-d" ]; then
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" &
sleep 1
else
echo "Running under gdb"
echo "Running under gdb" >>"$results_directory/$log"
fi
payloads=(fivebyeight/5x8 handwriting/handwrt1 hyde/hyde)
ports=(10000 10001 10002)
iterations=1000
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for i in {0..2}; do
hey -n 200 -c 3 -q 200 -o csv -m GET -D "$experiment_directory/${payloads[$i]}.pnm" "http://localhost:${ports[$i]}"
done
sleep 1
echo "[DONE]"
# Execute the experiments
echo "Running Experiments"
for i in {0..2}; do
printf "\t%s Payload: " "${payloads[$i]}"
file=$(echo "${payloads[$i]}" | awk -F/ '{print $2}').csv
hey -n "$iterations" -c 3 -cpus 2 -o csv -m GET -D "$experiment_directory/${payloads[$i]}.pnm" "http://localhost:${ports[$i]}" >"$results_directory/$file"
echo "[DONE]"
done
# Stop the runtime
if [ "$1" != "-d" ]; then
sleep 5
kill_runtime
fi
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Concurrency,Success_Rate\n" >>"$results_directory/success.csv"
printf "Concurrency,Throughput\n" >>"$results_directory/throughput.csv"
printf "Con,p50,p90,p99,p100\n" >>"$results_directory/latency.csv"
for payload in ${payloads[*]}; do
# Calculate Success Rate for csv
file=$(echo "$payload" | awk -F/ '{print $2}')
awk -F, '
$7 == 200 {ok++}
END{printf "'"$file"',%3.5f\n", (ok / '"$iterations"' * 100)}
' <"$results_directory/$file.csv" >>"$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$file.csv" |
sort -g >"$results_directory/$file-response.csv"
# Get Number of 200s
oks=$(wc -l <"$results_directory/$file-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$file.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$file" "$throughput" >>"$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$file"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' <"$results_directory/$file-response.csv" >>"$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
rm -rf "$results_directory/$file-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" >"$results_directory/$file.dat"
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
done
# Generate gnuplots
generate_gnuplots
# Cleanup, if requires
echo "[DONE]"

@ -0,0 +1,43 @@
{
"active": "yes",
"name": "gocr",
"path": "gocr.aso",
"port": 10000,
"relative-deadline-us": 50000000000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1024000,
"http-resp-headers": [],
"http-resp-size": 1024000,
"http-resp-content-type": "text/plain"
},
{
"active": "yes",
"name": "gocr",
"path": "gocr.aso",
"port": 10001,
"relative-deadline-us": 50000000000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1024000,
"http-resp-headers": [],
"http-resp-size": 1024000,
"http-resp-content-type": "text/plain"
},
{
"active": "yes",
"name": "gocr",
"path": "gocr.aso",
"port": 10002,
"relative-deadline-us": 50000000000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 5335057,
"http-resp-headers": [],
"http-resp-size": 5335057,
"http-resp-content-type": "text/plain"
}

@ -0,0 +1,12 @@
reset
set term jpeg
set output "success.jpg"
set xlabel "Connections"
set xrange [-5:105]
set ylabel "% 2XX"
set yrange [0:110]
plot 'success.dat' using 1:2 title '2XX'

@ -0,0 +1,10 @@
#!/bin/bash
cd handwriting || exit
./run.sh
cd .. || exit
cd hyde || exit
./run.sh
cd .. || exit
cd fivebyeight || exit
./run.sh
cd ..

@ -0,0 +1,13 @@
reset
set term jpeg
set output "throughput.jpg"
# TODO: Axis shouldn't be linear
set xlabel "Connections"
set xrange [-5:105]
set ylabel "Requests/sec"
set yrange [0:]
plot 'throughput.dat' using 1:2 title 'Reqs/sec'

@ -0,0 +1,47 @@
#!/bin/bash
log_environment() {
echo "*******"
echo "* Git *"
echo "*******"
git log | head -n 1 | cut -d' ' -f2
git status
echo ""
echo "************"
echo "* Makefile *"
echo "************"
cat ../../Makefile
echo ""
echo "**********"
echo "* Run.sh *"
echo "**********"
cat run.sh
echo ""
echo "************"
echo "* Hardware *"
echo "************"
lscpu
echo ""
echo "*************"
echo "* Execution *"
echo "*************"
}
kill_runtime() {
echo -n "Running Cleanup: "
pkill sledgert >/dev/null 2>/dev/null
pkill hey >/dev/null 2>/dev/null
echo "[DONE]"
}
generate_gnuplots() {
cd "$results_directory" || exit
gnuplot ../../latency.gnuplot
gnuplot ../../success.gnuplot
gnuplot ../../throughput.gnuplot
cd "$experiment_directory" || exit
}

@ -0,0 +1,36 @@
# Concurrency
## Question
_How does increasing levels of concurrent client requests affect tail latency, throughput, and the success/error rate of sandbox execution?_
## Independent Variable
- The number of concurrent client requests made at a given time
## Dependent Variables
- p50, p90, p99, and p100 latency measured in ms
- throughput measures in requests/second
- success rate, measures in % of requests that return a 200
## Assumptions about test environment
- You have a modern bash shell. My Linux environment shows version 4.4.20(1)-release
- `hey` (https://github.com/rakyll/hey) is available in your PATH
- You have compiled `sledgert` and the `empty.so` test workload
## To Execute
1. Run `./run.sh`
2. View the results in the newest timestamped directory in `./res`
## To Debug
1. Run `./debug.sh` in a tab
2. Run `./run.sh -d` in a second tab
## TODO
- Harden scripts to validate assumptions
- Improve error handling in scripts. If `sledgrt` crashes, this charges forward until it hits a divide by error when attempting to clean data that doesn't exist

@ -0,0 +1,19 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run $experiment_directory/spec.json" \
sledgert

@ -0,0 +1,19 @@
reset
set term jpeg
set output "latency.jpg"
set xlabel "Concurrency"
set ylabel "Latency (ms)"
set key left top
set xrange [-5:105]
set yrange [0:]
set style histogram columnstacked
plot 'latency.dat' using 1:2 title 'p50', \
'latency.dat' using 1:3 title 'p90', \
'latency.dat' using 1:4 title 'p99', \
'latency.dat' using 1:5 title 'p100', \

@ -0,0 +1,107 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt
mkdir -p "$results_directory"
log_environment >>"$results_directory/$log"
# Start the runtime
if [ "$1" != "-d" ]; then
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" &
sleep 1
else
echo "Running under gdb"
echo "Running under gdb" >>"$results_directory/$log"
fi
iterations=10000
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
hey -n "$iterations" -c 3 -q 200 -o csv -m GET http://localhost:10000
sleep 5
echo "[DONE]"
# Execute the experiments
concurrency=(1 20 40 60 80 100)
echo "Running Experiments"
for conn in ${concurrency[*]}; do
printf "\t%d Concurrency: " "$conn"
hey -n "$iterations" -c "$conn" -cpus 2 -o csv -m GET http://localhost:10000 >"$results_directory/con$conn.csv"
echo "[DONE]"
done
# Stop the runtime
if [ "$1" != "-d" ]; then
sleep 5
kill_runtime
fi
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Concurrency,Success_Rate\n" >>"$results_directory/success.csv"
printf "Concurrency,Throughput\n" >>"$results_directory/throughput.csv"
printf "Con,p50,p90,p99,p100\n" >>"$results_directory/latency.csv"
for conn in ${concurrency[*]}; do
# Calculate Success Rate for csv
awk -F, '
$7 == 200 {ok++}
END{printf "'"$conn"',%3.5f\n", (ok / '"$iterations"' * 100)}
' <"$results_directory/con$conn.csv" >>"$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/con$conn.csv" |
sort -g >"$results_directory/con$conn-response.csv"
# Get Number of 200s
oks=$(wc -l <"$results_directory/con$conn-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/con$conn.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%d,%f\n" "$conn" "$throughput" >>"$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$conn"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' <"$results_directory/con$conn-response.csv" >>"$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
rm -rf "$results_directory/con$conn-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" >"$results_directory/$file.dat"
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
done
# Generate gnuplots
generate_gnuplots
# Cleanup, if requires
echo "[DONE]"

@ -0,0 +1,14 @@
{
"active": "yes",
"name": "empty",
"path": "empty_wasm.so",
"port": 10000,
"relative-deadline-us": 50000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1024,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain"
}

@ -0,0 +1,12 @@
reset
set term jpeg
set output "success.jpg"
set xlabel "Concurrency"
set ylabel "% 2XX"
set xrange [-5:105]
set yrange [0:110]
plot 'success.dat' using 1:2 title '2XX'

@ -0,0 +1,12 @@
reset
set term jpeg
set output "throughput.jpg"
set xlabel "Concurrency"
set ylabel "Requests/sec"
set xrange [-5:105]
set yrange [0:]
plot 'throughput.dat' using 1:2 title 'Reqs/sec'

@ -0,0 +1,48 @@
# Admissions Control
## Discussion of Implementation
The admissions control subsystem seeks to ensure that the system does not accept more work than it can execute while meeting the relative deadline defined in a module's JSON specification.
The system maintains an integral value expressing the capacity of the system as millionths of a worker core. This assumes that the runtime has "pinned" these workers to underlying processors and has no contention with other workloads.
The system maintains a second integral value expressing the total accepted work.
The module specification provides a relative deadline, an expected execution time, and a percentile target expressing the pXX latency that the admissions control system should use when making admissions decisions (tunable from 50% to 99%). Tuning this percentile expresses how conservative the system should be with regard to scheduling. Selecting a lower value, such as 50%, reserves less processor time and results in a higher likelihood that the relative deadline is not met. Selecting a higher value, such as 99%, reserves more processor time and provides a higher likelihood that that the relative deadline will be met. The provided expected execution time is assumed to match the percentile provided.
Dividing the expected execution time by the relative deadline yields the fraction of a worker needed to meet the deadline.
If the existing accepted workload plus the required work of this new workload is less than the system capacity, the workload is accepted, and the integral value expressing the total accepted work is increased. The resulting sandbox request is tagged with the fraction of a worker it was calculated to use, and when the request completes, the total accepted work is decreased by this amount.
If the existing accepted workload plus the required work of this new workload is greater than the system capacity, the request is rejected and the runtime sends the client an HTTP 503 response.
While the module specification provides an expected execution time, the system does not trust this value and only uses it in the absence of better information. Each sandbox is profiled as it runs through the system, and the end-to-end execution time of successful sandbox requests are added to a specialized performance window data structure that stores the last N execution times sorted in order of execution time. This structure optimizes for quick lookups of a specific ppXX percentile
Once data is seeded into this data structure, the initial execution estimate provided in the module specification is ignored, and the pXX target is instead used to lookup the actual pXX performance metric.
Future Work:
Currently, the scheduler takes no actual when an executing sandbox exceeds its pXX execution time or deadline.
In the case of the pXX workload, this means that a workload configured to target p50 during admissions control decisions with exceptionally poor p99 performance causes system-wide overheads that can cause other systems to miss their deadlines.
Even worse, when executing beyond the relative deadline, the request might be too stale for the client.
In the absolute worst case, one can imagine a client workload caught in an infinite loop that causes permanent head of line blocking because its deadline is earlier than the current time, such that nothing can possibly preempt the executing workload.
## Question
- Does Admissions Control guarantee that deadlines are met?
## Independent Variable
Deadline is disabled versus deadline is enabled
## Invariants
Single workload
Use FIFO policy
## Dependent Variables
End-to-end execution time of a workload measured from a client measured relative to its deadline

@ -0,0 +1,19 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run $experiment_directory/spec.json" \
sledgert

@ -0,0 +1,14 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=EDF perf record -g -s sledgert "$experiment_directory/spec.json"

@ -0,0 +1,124 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
schedulers=(EDF FIFO)
for scheduler in ${schedulers[*]}; do
results_directory="$experiment_directory/res/$timestamp/$scheduler"
log=log.txt
mkdir -p "$results_directory"
log_environment >>"$results_directory/$log"
# Start the runtime
if [ "$1" != "-d" ]; then
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" &
sleep 1
else
echo "Running under gdb"
echo "Running under gdb" >>"$results_directory/$log"
fi
inputs=(40 10)
duration_sec=15
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for input in ${inputs[*]}; do
hey -z ${duration_sec}s -cpus 3 -t 0 -o csv -m GET -d "$input\n" http://localhost:$((10000 + input))
done
echo "[DONE]"
sleep 5
echo "Running Experiments"
# Run each separately
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40.csv"
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10.csv"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40-con.csv" &
sleep $offset
hey -z ${duration_sec}s -cpus 2 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset + 15))
# Stop the runtime if not in debug mode
[ "$1" != "-d" ] && kill_runtime
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >>"$results_directory/success.csv"
printf "Payload,Throughput\n" >>"$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv"
deadlines_ms=(2 2 3000 3000)
payloads=(fib10 fib10-con fib40 fib40-con)
for ((i = 0; i < 4; i++)); do
# for payload in ${payloads[*]}; do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
# Get Number of Requests
requests=$(($(wc -l <"$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
' <"$results_directory/$payload.csv" >>"$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" |
sort -g >"$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l <"$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" >"$results_directory/$file.dat"
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"
done

@ -0,0 +1,124 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
schedulers=(EDF FIFO)
for scheduler in ${schedulers[*]}; do
results_directory="$experiment_directory/res/$timestamp/$scheduler"
log=log.txt
mkdir -p "$results_directory"
log_environment >>"$results_directory/$log"
# Start the runtime
if [ "$1" != "-d" ]; then
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" &
sleep 1
else
echo "Running under gdb"
echo "Running under gdb" >>"$results_directory/$log"
fi
inputs=(40 10)
duration_sec=15
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for input in ${inputs[*]}; do
hey -z ${duration_sec}s -cpus 3 -t 0 -o csv -m GET -d "$input\n" http://localhost:$((10000 + input))
done
echo "[DONE]"
sleep 5
echo "Running Experiments"
# Run each separately
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40.csv"
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10.csv"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40-con.csv" &
sleep $offset
hey -z ${duration_sec}s -cpus 2 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset + 15))
# Stop the runtime if not in debug mode
[ "$1" != "-d" ] && kill_runtime
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >>"$results_directory/success.csv"
printf "Payload,Throughput\n" >>"$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv"
deadlines_ms=(2 2 3000 3000)
payloads=(fib10 fib10-con fib40 fib40-con)
for ((i = 0; i < 4; i++)); do
# for payload in ${payloads[*]}; do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
# Get Number of Requests
requests=$(($(wc -l <"$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
' <"$results_directory/$payload.csv" >>"$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" |
sort -g >"$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l <"$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
NR==p90 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
NR==p99 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
NR==p100 {printf "%1.4f%\n", $0 / '"$deadline"' * 100}
' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" >"$results_directory/$file.dat"
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"
done

@ -0,0 +1,5 @@
hey -n 200 -c 200 -t 0 -m GET -d "40\n" http://localhost:10040
hey -n 500 -c 500 -t 0 -m GET -d "10\n" http://localhost:10010

@ -3,7 +3,8 @@
"name": "fibonacci_10",
"path": "fibonacci_wasm.so",
"port": 10010,
"relative-deadline-us": 4000,
"expected-execution-us": 600,
"relative-deadline-us": 2000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
@ -17,6 +18,7 @@
"name": "fibonacci_20",
"path": "fibonacci_wasm.so",
"port": 10020,
"expected-execution-us": 900,
"relative-deadline-us": 5000,
"argsize": 1,
"http-req-headers": [],
@ -31,7 +33,8 @@
"name": "fibonacci_25",
"path": "fibonacci_wasm.so",
"port": 10025,
"relative-deadline-us": 6000,
"expected-execution-us": 90000,
"relative-deadline-us": 200000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
@ -45,7 +48,8 @@
"name": "fibonacci_30",
"path": "fibonacci_wasm.so",
"port": 10030,
"relative-deadline-us": 8000,
"expected-execution-us": 9000,
"relative-deadline-us": 80000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
@ -59,6 +63,7 @@
"name": "fibonacci_35",
"path": "fibonacci_wasm.so",
"port": 10035,
"expected-execution-us": 9000,
"relative-deadline-us": 53000,
"argsize": 1,
"http-req-headers": [],
@ -67,4 +72,19 @@
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain"
},
{
"active": "yes",
"name": "fibonacci_40",
"path": "fibonacci_wasm.so",
"port": 10040,
"expected-execution-us": 550000,
"relative-deadline-us": 300000000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1024,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain"
}

@ -0,0 +1,12 @@
#!/bin/bash
# Generates payloads of 1KB, 10KB, 100KB, 1MB
for size in 1024 $((1024 * 10)) $((1024 * 100)) $((1024 * 1024)); do
rm -rf $size.txt
i=0
echo -n "Generating $size:"
while ((i < size)); do
printf 'a' >>$size.txt
((i++))
done
echo "[DONE]"
done

@ -0,0 +1,19 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run $experiment_directory/spec.json" \
sledgert

@ -0,0 +1,20 @@
reset
set term jpeg
set output "latency.jpg"
set xlabel "Payload (bytes)"
set xrange [-5:1050000]
set ylabel "Latency (ms)"
set yrange [0:]
set key left top
set style histogram columnstacked
plot 'latency.dat' using 1:2 title 'p50', \
'latency.dat' using 1:3 title 'p90', \
'latency.dat' using 1:4 title 'p99', \
'latency.dat' using 1:5 title 'p100', \

@ -0,0 +1,123 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt
mkdir -p "$results_directory"
log_environment >>"$results_directory/$log"
# Start the runtime
if [ "$1" != "-d" ]; then
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" &
sleep 1
else
echo "Running under gdb"
echo "Running under gdb" >>"$results_directory/$log"
fi
payloads=(1024 10240 102400 1048576)
ports=(10000 10001 10002 10003)
iterations=10000
# If the one of the expected body files doesn't exist, trigger the generation script.
for payload in ${payloads[*]}; do
if test -f "$experiment_directory/body/$payload.txt"; then
continue
else
echo "Generating Payloads: "
{
cd "$experiment_directory/body" && ./generate.sh
}
break
fi
done
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
hey -n "$iterations" -c 3 -q 200 -o csv -m GET -D "$experiment_directory/body/1024.txt" http://localhost:10000
hey -n "$iterations" -c 3 -q 200 -o csv -m GET -D "$experiment_directory/body/10240.txt" http://localhost:10001
hey -n "$iterations" -c 3 -q 200 -o csv -m GET -D "$experiment_directory/body/102400.txt" http://localhost:10002
hey -n "$iterations" -c 3 -q 200 -o csv -m GET -D "$experiment_directory/body/1048576.txt" http://localhost:10003
sleep 5
echo "[DONE]"
# Execute the experiments
echo "Running Experiments"
for i in {0..3}; do
printf "\t%d Payload: " "${payloads[$i]}"
hey -n "$iterations" -c 1 -cpus 2 -o csv -m GET -D "$experiment_directory/body/${payloads[$i]}.txt" http://localhost:"${ports[$i]}" >"$results_directory/${payloads[$i]}.csv"
echo "[DONE]"
done
# Stop the runtime
if [ "$1" != "-d" ]; then
sleep 5
kill_runtime
fi
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >>"$results_directory/success.csv"
printf "Payload,Throughput\n" >>"$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv"
for payload in ${payloads[*]}; do
# Calculate Success Rate for csv
awk -F, '
$7 == 200 {ok++}
END{printf "'"$payload"',%3.5f\n", (ok / '"$iterations"' * 100)}
' <"$results_directory/$payload.csv" >>"$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" |
sort -g >"$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l <"$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%d,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" >"$results_directory/$file.dat"
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
done
# Generate gnuplots
generate_gnuplots
# Cleanup, if requires
echo "[DONE]"

@ -0,0 +1,60 @@
{
"active": "yes",
"name": "work1k",
"path": "work1k_wasm.so",
"port": 10000,
"expected-execution-us": 400,
"relative-deadline-us": 2000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1548,
"http-resp-headers": [],
"http-resp-size": 1548,
"http-resp-content-type": "text/plain"
},
{
"active": "yes",
"name": "work10k",
"path": "work10k_wasm.so",
"port": 10001,
"expected-execution-us": 600,
"relative-deadline-us": 2000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 10480,
"http-resp-headers": [],
"http-resp-size": 10480,
"http-resp-content-type": "text/plain"
},
{
"active": "yes",
"name": "work100k",
"path": "work100k_wasm.so",
"port": 10002,
"expected-execution-us": 700,
"relative-deadline-us": 2000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 104800,
"http-resp-headers": [],
"http-resp-size": 104800,
"http-resp-content-type": "text/plain"
},
{
"active": "yes",
"name": "work1m",
"path": "work1m_wasm.so",
"port": 10003,
"expected-execution-us": 2000,
"relative-deadline-us": 6000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1048776,
"http-resp-headers": [],
"http-resp-size": 1048776,
"http-resp-content-type": "text/plain"
}

@ -0,0 +1,12 @@
reset
set term jpeg
set output "success.jpg"
set xlabel "Payload (bytes)"
set xrange [-5:1050000]
set ylabel "% 2XX"
set yrange [0:110]
plot 'success.dat' using 1:2 title '2XX'

@ -0,0 +1,3 @@
#!/bin/bash
hey -n 100 -c 3 -q 100 -m GET -D "./body/1024.txt" http://localhost:10000

@ -0,0 +1,13 @@
reset
set term jpeg
set output "throughput.jpg"
# TODO: Axis shouldn't be linear
set xlabel "Payload (bytes)"
set xrange [-5:1050000]
set ylabel "Requests/sec"
set yrange [0:]
plot 'throughput.dat' using 1:2 title 'Reqs/sec'

@ -0,0 +1,28 @@
# Preemption
## Question
- How do mixed criticality workloads perform under the Sledge scheduler policies?
- How does the latency of a high criticality workload that triggers preemption on a system under load compare to being the only workload on the system?
- What is the slowdown on the low priority workload?
- How does this affect aggregate throughput?
## Setup
The system is configured with admission control disabled.
The driver script drives a bimodal distribution of long-running low-priority and short-running high-priority workloads
Relative Deadlines are tuned such that the scheduler should always preempt the low-priority workload for the high-priority workload.
A driver script runs the two workloads separately as a baseline
It then runs them concurrently, starting the low-priority long-running workload first such that the system begins execution and accumulates requests in the data structures. The high-priority short-running workload then begins.
## Independent Variable
The Scheduling Policy: EDF versus FIFO
## Dependent Variables
Latency of high priority workload

@ -0,0 +1,104 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Modified to target a remote host
timestamp=$(date +%s)
experiment_directory=$(pwd)
host=192.168.1.13
results_directory="$experiment_directory/res/$timestamp"
log=log.txt
mkdir -p "$results_directory"
log_environment >>"$results_directory/$log"
# Start the runtime
inputs=(40 10)
duration_sec=15
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for input in ${inputs[*]}; do
hey -z ${duration_sec}s -cpus 6 -t 0 -o csv -m GET -d "$input\n" http://"$host":$((10000 + input))
done
echo "[DONE]"
sleep 5
echo "Running Experiments"
# Run each separately
hey -z ${duration_sec}s -cpus 6 -c 100 -t 0 -o csv -m GET -d "40\n" "http://$host:10040" >"$results_directory/fib40.csv"
hey -z ${duration_sec}s -cpus 6 -c 100 -t 0 -o csv -m GET -d "10\n" "http://$host:10010" >"$results_directory/fib10.csv"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -z $((duration_sec + 2 * offset))s -cpus 3 -c 100 -t 0 -o csv -m GET -d "40\n" "http://$host:10040" >"$results_directory/fib40-con.csv" &
sleep $offset
hey -z ${duration_sec}s -cpus 3 -c 100 -t 0 -o csv -m GET -d "10\n" "http://$host:10010" >"$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset + 15))
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >>"$results_directory/success.csv"
printf "Payload,Throughput\n" >>"$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv"
payloads=(fib10 fib10-con fib40 fib40-con)
for payload in ${payloads[*]}; do
# Get Number of Requests
requests=$(($(wc -l <"$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
' <"$results_directory/$payload.csv" >>"$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" |
sort -g >"$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l <"$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" >"$results_directory/$file.dat"
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if required
echo "[DONE]"

@ -0,0 +1,19 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run $experiment_directory/spec.json" \
sledgert

@ -0,0 +1,34 @@
#!/bin/bash
cd ../../bin
LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/mixed_preemption/test_mixed_preemption.json &
cd ../tests/mixed_preemption/
# Run small samples on each port to let the runtime figure out the execution time
sleep 10
echo "Running Samples"
wrk -d 20s -t1 -s post.lua http://localhost:10010 -- --delay 500 10\n
wrk -d 20s -t1 -s post.lua http://localhost:10020 -- --delay 500 20\n
wrk -d 20s -t1 -s post.lua http://localhost:10030 -- --delay 500 25\n
# Run in Parallel
sleep 10
echo "Running Experiments"
wrk -d 1m -t1 -s post.lua http://localhost:10010 -- --delay 125 10\n >./res/fib10.txt &
wrk -d 2m -t1 -s post.lua http://localhost:10020 -- --delay 250 20\n >./res/fib20.txt &
wrk -d 3m -t1 -s post.lua http://localhost:10025 -- --delay 500 25\n >./res/fib25.txt
# Kill the Background Sledge processes
sleep 10
echo "Running Cleanup"
pkill sledgert
pkill wrk
# Extract the Latency CSV Data from the Log
echo 'Fib10, Fib10' >./res/fib10.csv
grep -A200 -m1 -e 'Percentile, Latency' ./res/fib10.txt >>./res/fib10.csv
echo 'Fib20, Fib20' >./res/fib20.csv
grep -A200 -m1 -e 'Percentile, Latency' ./res/fib20.txt >>./res/fib20.csv
echo 'Fib25, Fib25' >./res/fib25.csv
grep -A200 -m1 -e 'Percentile, Latency' ./res/fib25.txt >>./res/fib25.csv
paste -d, ./res/fib10.csv ./res/fib20.csv ./res/fib25.csv >./res/merged.csv

@ -0,0 +1,14 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=EDF perf record -g -s sledgert "$experiment_directory/spec.json"

@ -0,0 +1,124 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
schedulers=(EDF FIFO)
for scheduler in ${schedulers[*]}; do
results_directory="$experiment_directory/res/$timestamp/$scheduler"
log=log.txt
mkdir -p "$results_directory"
log_environment >>"$results_directory/$log"
# Start the runtime
if [ "$1" != "-d" ]; then
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" &
sleep 1
else
echo "Running under gdb"
echo "Running under gdb" >>"$results_directory/$log"
fi
inputs=(40 10)
duration_sec=15
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for input in ${inputs[*]}; do
hey -z ${duration_sec}s -cpus 3 -t 0 -o csv -m GET -d "$input\n" http://localhost:$((10000 + input))
done
echo "[DONE]"
sleep 5
echo "Running Experiments"
# Run each separately
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40.csv"
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10.csv"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40-con.csv" &
sleep $offset
hey -z ${duration_sec}s -cpus 2 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset + 15))
# Stop the runtime if not in debug mode
[ "$1" != "-d" ] && kill_runtime
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >>"$results_directory/success.csv"
printf "Payload,Throughput\n" >>"$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv"
deadlines_ms=(2 2 3000 3000)
payloads=(fib10 fib10-con fib40 fib40-con)
for ((i = 0; i < 4; i++)); do
# for payload in ${payloads[*]}; do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
# Get Number of Requests
requests=$(($(wc -l <"$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
' <"$results_directory/$payload.csv" >>"$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" |
sort -g >"$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l <"$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" >"$results_directory/$file.dat"
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"
done

@ -0,0 +1,124 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
schedulers=(EDF FIFO)
for scheduler in ${schedulers[*]}; do
results_directory="$experiment_directory/res/$timestamp/$scheduler"
log=log.txt
mkdir -p "$results_directory"
log_environment >>"$results_directory/$log"
# Start the runtime
if [ "$1" != "-d" ]; then
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >>"$results_directory/$log" 2>>"$results_directory/$log" &
sleep 1
else
echo "Running under gdb"
echo "Running under gdb" >>"$results_directory/$log"
fi
inputs=(40 10)
duration_sec=15
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for input in ${inputs[*]}; do
hey -z ${duration_sec}s -cpus 3 -t 0 -o csv -m GET -d "$input\n" http://localhost:$((10000 + input))
done
echo "[DONE]"
sleep 5
echo "Running Experiments"
# Run each separately
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40.csv"
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10.csv"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 >"$results_directory/fib40-con.csv" &
sleep $offset
hey -z ${duration_sec}s -cpus 2 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 >"$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset + 15))
# Stop the runtime if not in debug mode
[ "$1" != "-d" ] && kill_runtime
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >>"$results_directory/success.csv"
printf "Payload,Throughput\n" >>"$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >>"$results_directory/latency.csv"
deadlines_ms=(2 2 3000 3000)
payloads=(fib10 fib10-con fib40 fib40-con)
for ((i = 0; i < 4; i++)); do
# for payload in ${payloads[*]}; do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
# Get Number of Requests
requests=$(($(wc -l <"$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
' <"$results_directory/$payload.csv" >>"$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/$payload.csv" |
sort -g >"$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l <"$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
NR==p90 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
NR==p99 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
NR==p100 {printf "%1.4f%\n", $0 / '"$deadline"' * 100}
' <"$results_directory/$payload-response.csv" >>"$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" >"$results_directory/$file.dat"
tr ',' ' ' <"$results_directory/$file.csv" | column -t >>"$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"
done

@ -0,0 +1,5 @@
hey -n 200 -c 200 -t 0 -m GET -d "40\n" http://localhost:10040
hey -n 500 -c 500 -t 0 -m GET -d "10\n" http://localhost:10010

@ -0,0 +1,30 @@
{
"active": "yes",
"name": "fibonacci_10",
"path": "fibonacci_wasm.so",
"port": 10010,
"expected-execution-us": 600,
"relative-deadline-us": 2000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1024,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain"
},
{
"active": "yes",
"name": "fibonacci_40",
"path": "fibonacci_wasm.so",
"port": 10040,
"expected-execution-us": 550000,
"relative-deadline-us": 300000000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1024,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain"
}

@ -0,0 +1,123 @@
#pragma once
#include <stdatomic.h>
#include <stdbool.h>
#include <stdint.h>
#include <unistd.h>
#include "debuglog.h"
#include "client_socket.h"
#define ADMISSIONS_CONTROL_GRANULARITY 1000000
/*
* Unitless estimate of the instantaneous fraction of system capacity required to complete all previously
* admitted work. This is used to calculate free capacity as part of admissions control
*
* The estimated requirements of a single admitted request is calculated as
* estimated execution time (cycles) / relative deadline (cycles)
*
* These estimates are incremented on request acceptance and decremented on request completion (either
* success or failure)
*/
extern _Atomic uint64_t admissions_control_admitted;
extern uint64_t admissions_control_capacity;
static inline void
admissions_control_initialize()
{
#ifdef ADMISSIONS_CONTROL
atomic_init(&admissions_control_admitted, 0);
admissions_control_capacity = runtime_worker_threads_count * ADMISSIONS_CONTROL_GRANULARITY;
#endif
}
static inline void
admissions_control_add(uint64_t admissions_estimate)
{
#ifdef ADMISSIONS_CONTROL
assert(admissions_estimate > 0);
atomic_fetch_add(&admissions_control_admitted, admissions_estimate);
#ifdef LOG_ADMISSIONS_CONTROL
debuglog("Runtime Admitted: %lu / %lu\n", admissions_control_admitted, admissions_control_capacity);
#endif
#endif /* ADMISSIONS_CONTROL */
}
static inline void
admissions_control_substract(uint64_t admissions_estimate)
{
#ifdef ADMISSIONS_CONTROL
/* Assumption: Should never underflow */
if (unlikely(admissions_estimate > admissions_control_admitted)) panic("Admissions Estimate underflow\n");
atomic_fetch_sub(&admissions_control_admitted, admissions_estimate);
#ifdef LOG_ADMISSIONS_CONTROL
debuglog("Runtime Admitted: %lu / %lu\n", admissions_control_admitted, admissions_control_capacity);
#endif
#endif /* ADMISSIONS_CONTROL */
}
static inline uint64_t
admissions_control_calculate_estimate(uint64_t estimated_execution, uint64_t relative_deadline)
{
#ifdef ADMISSIONS_CONTROL
assert(relative_deadline != 0);
uint64_t admissions_estimate = (estimated_execution * (uint64_t)ADMISSIONS_CONTROL_GRANULARITY)
/ relative_deadline;
if (admissions_estimate == 0)
panic("Ration of Deadline to Execution time cannot exceed %d\n", ADMISSIONS_CONTROL_GRANULARITY);
return admissions_estimate;
#else
return 0;
#endif
}
static inline uint64_t
admissions_control_calculate_estimate_us(uint32_t estimated_execution_us, uint32_t relative_deadline_us)
{
#ifdef ADMISSIONS_CONTROL
assert(relative_deadline_us != 0);
return (uint64_t)((uint64_t)(estimated_execution_us * ADMISSIONS_CONTROL_GRANULARITY)) / relative_deadline_us;
#else
return 0;
#endif
}
static inline void
admissions_control_log_decision(uint64_t admissions_estimate, bool admitted)
{
#ifdef LOG_ADMISSIONS_CONTROL
debuglog("Admitted: %lu, Capacity: %lu, Estimate: %lu, Admitted? %s\n", admissions_control_admitted,
admissions_control_capacity, admissions_estimate, admitted ? "yes" : "no");
#endif /* LOG_ADMISSIONS_CONTROL */
}
static inline uint64_t
admissions_control_decide(uint64_t admissions_estimate)
{
uint64_t work_admitted = 1; /* Nominal non-zero value in case admissions control is disabled */
#ifdef ADMISSIONS_CONTROL
if (unlikely(admissions_estimate == 0)) panic("Admissions estimate should never be zero");
uint64_t total_admitted = atomic_load(&admissions_control_admitted);
if (total_admitted + admissions_estimate >= admissions_control_capacity) {
admissions_control_log_decision(admissions_estimate, false);
work_admitted = 0;
} else {
admissions_control_log_decision(admissions_estimate, true);
admissions_control_add(admissions_estimate);
work_admitted = admissions_estimate;
}
#endif /* ADMISSIONS_CONTROL */
return work_admitted;
}

@ -0,0 +1,56 @@
#pragma once
#include "debuglog.h"
#include "perf_window.h"
struct admissions_info {
struct perf_window perf_window;
int percentile; /* 50 - 99 */
int control_index; /* Precomputed Lookup index when perf_window is full */
uint64_t estimate; /* cycles */
uint64_t relative_deadline; /* Relative deadline in cycles. This is duplicated state */
};
/**
* Initializes perf window
* @param self
*/
static inline void
admissions_info_initialize(struct admissions_info *self, int percentile, uint64_t expected_execution,
uint64_t relative_deadline)
{
#ifdef ADMISSIONS_CONTROL
self->relative_deadline = relative_deadline;
self->estimate = admissions_control_calculate_estimate(expected_execution, relative_deadline);
debuglog("Initial Estimate: %lu\n", self->estimate);
assert(self != NULL);
perf_window_initialize(&self->perf_window);
if (unlikely(percentile < 50 || percentile > 99)) panic("Invalid admissions percentile");
self->percentile = percentile;
self->control_index = PERF_WINDOW_BUFFER_SIZE * percentile / 100;
#endif
}
/*
* Adds an execution value to the perf window and calculates and caches and updated estimate
* @param self
* @param execution_duration
*/
static inline void
admissions_info_update(struct admissions_info *self, uint64_t execution_duration)
{
#ifdef ADMISSIONS_CONTROL
assert(!software_interrupt_is_enabled());
struct perf_window *perf_window = &self->perf_window;
LOCK_LOCK(&self->perf_window.lock);
perf_window_add(perf_window, execution_duration);
uint64_t estimated_execution = perf_window_get_percentile(perf_window, self->percentile, self->control_index);
self->estimate = admissions_control_calculate_estimate(estimated_execution, self->relative_deadline);
LOCK_UNLOCK(&self->perf_window.lock);
#endif
}

@ -0,0 +1,65 @@
#pragma once
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include "panic.h"
#include "debuglog.h"
#include "http_response.h"
#include "http_total.h"
#include "runtime.h"
#include "worker_thread.h"
static inline void
client_socket_close(int client_socket)
{
if (close(client_socket) < 0) debuglog("Error closing client socket - %s", strerror(errno));
}
/**
* Rejects request due to admission control or error
* @param client_socket - the client we are rejecting
* @param status_code - either 503 or 400
*/
static inline int
client_socket_send(int client_socket, int status_code)
{
const char *response;
int rc;
switch (status_code) {
case 503:
response = HTTP_RESPONSE_503_SERVICE_UNAVAILABLE;
http_total_increment_5XX();
break;
case 400:
response = HTTP_RESPONSE_400_BAD_REQUEST;
http_total_increment_4XX();
break;
default:
panic("%d is not a valid status code\n", status_code);
}
int sent = 0;
int to_send = strlen(response);
while (sent < to_send) {
rc = write(client_socket, &response[sent], to_send - sent);
if (rc < 0) {
if (errno == EAGAIN) { debuglog("Unexpectedly blocking on write of %s\n", response); }
goto send_err;
}
sent += rc;
};
rc = 0;
done:
return rc;
send_err:
debuglog("Error sending to client: %s", strerror(errno));
rc = -1;
goto done;
}

@ -5,7 +5,6 @@
void current_sandbox_close_file_descriptor(int io_handle_index);
struct sandbox * current_sandbox_get(void);
int current_sandbox_get_file_descriptor(int io_handle_index);
union uv_any_handle *current_sandbox_get_libuv_handle(int io_handle_index);
int current_sandbox_initialize_io_handle(void);
void current_sandbox_set(struct sandbox *sandbox);
int current_sandbox_set_file_descriptor(int io_handle_index, int file_descriptor);

@ -1,22 +1,26 @@
#pragma once
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
extern int32_t debuglog_file_descriptor;
#ifdef LOG_TO_FILE
#ifndef DEBUG
#error LOG_TO_FILE is only valid if in DEBUG mode
#endif /* DEBUG */
#ifdef NDEBUG
#error LOG_TO_FILE is invalid if NDEBUG is set
#endif /* NDEBUG */
#endif /* LOG_TO_FILE */
/**
* debuglog is a macro that behaves based on the macros DEBUG and LOG_TO_FILE
* If DEBUG is not set, debuglog does nothing
* If DEBUG is set and LOG_TO_FILE is set, debuglog prints to the logfile defined in debuglog_file_descriptor
* If DEBUG is set and LOG_TO_FILE is not set, debuglog prints to STDOUT
* debuglog is a macro that behaves based on the macros NDEBUG and LOG_TO_FILE
* If NDEBUG is set, debuglog does nothing
* If NDEBUG is not set and LOG_TO_FILE is set, debuglog prints to the logfile defined in debuglog_file_descriptor
* If NDEBUG is not set and LOG_TO_FILE is not set, debuglog prints to STDERR
*/
#ifdef DEBUG
#ifdef NDEBUG
#define debuglog(fmt, ...)
#else /* NDEBUG */
#ifdef LOG_TO_FILE
#define debuglog(fmt, ...) \
dprintf(debuglog_file_descriptor, "C: %02d, T: 0x%lx, F: %s> \n\t" fmt "\n", sched_getcpu(), pthread_self(), \
@ -26,6 +30,4 @@ extern int32_t debuglog_file_descriptor;
fprintf(stderr, "C: %02d, T: 0x%lx, F: %s> \n\t" fmt "\n", sched_getcpu(), pthread_self(), __func__, \
##__VA_ARGS__);
#endif /* LOG_TO_FILE */
#else /* !DEBUG */
#define debuglog(fmt, ...)
#endif /* DEBUG */
#endif /* !NDEBUG */

@ -1,12 +1,16 @@
#pragma once
#include <stdbool.h>
#include <stdio.h>
#include "http.h"
/* all in-memory ptrs.. don't mess around with that! */
struct http_header {
char *key;
int key_length;
char *value;
int value_length;
};
struct http_request {
@ -24,6 +28,21 @@ struct http_request {
bool message_end; /* boolean flag set when body processing is complete */
};
static inline void
http_request_print(struct http_request *self)
{
printf("Header Count %d\n", self->header_count);
printf("Header Content:\n");
for (int i = 0; i < self->header_count; i++) {
for (int j = 0; j < self->headers[i].key_length; j++) { putchar(self->headers[i].key[j]); }
putchar(':');
for (int j = 0; j < self->headers[i].value_length; j++) { putchar(self->headers[i].value[j]); }
putchar('\n');
}
printf("Body Length %d\n", self->body_length);
printf("Body Read Length %d\n", self->body_read_length);
}
/***************************************************
* General HTTP Request Functions *
**************************************************/

@ -2,15 +2,11 @@
#include <http_parser.h>
#include <sys/uio.h>
/* Conditionally load libuv */
#ifdef USE_HTTP_UVIO
#include <uv.h>
#endif
#include "http.h"
#define HTTP_RESPONSE_200_OK "HTTP/1.1 200 OK\r\n"
#define HTTP_RESPONSE_504_SERVICE_UNAVAILABLE "HTTP/1.1 504 Service Unavailable\r\n\r\n"
#define HTTP_RESPONSE_503_SERVICE_UNAVAILABLE "HTTP/1.1 503 Service Unavailable\r\n\r\n"
#define HTTP_RESPONSE_400_BAD_REQUEST "HTTP/1.1 400 Bad Request\r\n\r\n"
#define HTTP_RESPONSE_CONTENT_LENGTH "Content-Length: "
#define HTTP_RESPONSE_CONTENT_LENGTH_TERMINATOR "\r\n\r\n" /* content body follows this */
@ -30,11 +26,7 @@ struct http_response {
int body_length;
char * status;
int status_length;
#ifdef USE_HTTP_UVIO
uv_buf_t bufs[HTTP_MAX_HEADER_COUNT * 2 + 3]; /* max headers, one line for status code, remaining for body! */
#else
struct iovec bufs[HTTP_MAX_HEADER_COUNT * 2 + 3];
#endif
struct iovec bufs[HTTP_MAX_HEADER_COUNT * 2 + 3];
};
/***************************************************

@ -0,0 +1,57 @@
#pragma once
#include <stdatomic.h>
#include <stdint.h>
/*
* Counts to track requests and responses
* requests and 5XX (admissions control rejections) are only tracked by the listener core, so they are not
* behind a compiler flag. 2XX and 4XX can be incremented by worker cores, so they are behind a flag because
* of concerns about contention
*/
extern _Atomic uint32_t http_total_requests;
extern _Atomic uint32_t http_total_5XX;
#ifdef LOG_TOTAL_REQS_RESPS
extern _Atomic uint32_t http_total_2XX;
extern _Atomic uint32_t http_total_4XX;
#endif
static inline void
http_total_init()
{
atomic_init(&http_total_requests, 0);
atomic_init(&http_total_5XX, 0);
#ifdef LOG_TOTAL_REQS_RESPS
atomic_init(&http_total_2XX, 0);
atomic_init(&http_total_4XX, 0);
#endif
}
static inline void
http_total_increment_request()
{
atomic_fetch_add(&http_total_requests, 1);
}
static inline void
http_total_increment_2xx()
{
#ifdef LOG_TOTAL_REQS_RESPS
atomic_fetch_add(&http_total_2XX, 1);
#endif
}
static inline void
http_total_increment_4XX()
{
#ifdef LOG_TOTAL_REQS_RESPS
atomic_fetch_add(&http_total_4XX, 1);
#endif
}
static inline void
http_total_increment_5XX()
{
atomic_fetch_add(&http_total_5XX, 1);
}

@ -1,89 +0,0 @@
#pragma once
#include <assert.h>
#include <sys/mman.h>
#include <signal.h>
#include <uv.h>
#include "http_request.h"
#include "runtime.h"
#include "sandbox.h"
/**
* Parses data read by the libuv stream chunk-by-chunk until the message is complete
* Then stops the stream and wakes up the sandbox
* @param stream
* @param number_read bytes read
* @param buffer unused
*
* FIXME: is there some weird edge case where a UNICODE character might be split between reads? Do we care?
* Called after libuv has read a chunk of data. Issue #100
*/
static inline void
libuv_callbacks_on_read_parse_http_request(uv_stream_t *stream, ssize_t number_read, const uv_buf_t *buffer)
{
struct sandbox *sandbox = stream->data;
/* Parse the chunks libuv has read on our behalf until we've parse to message end */
if (number_read > 0) {
// FIXME: Broken by refactor to sandbox_parse_http_request changes to return code
if (sandbox_parse_http_request(sandbox, number_read) != 0) return;
sandbox->request_response_data_length += number_read;
struct http_request *rh = &sandbox->http_request;
if (!rh->message_end) return;
}
/* When the entire message has been read, stop the stream and wakeup the sandbox */
uv_read_stop(stream);
worker_thread_wakeup_sandbox(sandbox);
}
/**
* On libuv close, executes this callback to wake the blocked sandbox back up
* @param stream
*/
static inline void
libuv_callbacks_on_close_wakeup_sakebox(uv_handle_t *stream)
{
struct sandbox *sandbox = stream->data;
worker_thread_wakeup_sandbox(sandbox);
}
/**
* On libuv shutdown, executes this callback to wake the blocked sandbox back up
* @param req shutdown request
* @param status unused in callback
*/
static inline void
libuv_callbacks_on_shutdown_wakeup_sakebox(uv_shutdown_t *req, int status)
{
struct sandbox *sandbox = req->data;
worker_thread_wakeup_sandbox(sandbox);
}
/**
* On libuv write, executes this callback to wake the blocked sandbox back up
* In case of error, shutdown the sandbox
* @param write shutdown request
* @param status status code
*/
static inline void
libuv_callbacks_on_write_wakeup_sandbox(uv_write_t *write, int status)
{
struct sandbox *sandbox = write->data;
if (status < 0) {
sandbox->client_libuv_shutdown_request.data = sandbox;
uv_shutdown(&sandbox->client_libuv_shutdown_request, (uv_stream_t *)&sandbox->client_libuv_stream,
libuv_callbacks_on_shutdown_wakeup_sakebox);
return;
}
worker_thread_wakeup_sandbox(sandbox);
}
static inline void
libuv_callbacks_on_allocate_setup_request_response_data(uv_handle_t *h, size_t suggested, uv_buf_t *buf)
{
struct sandbox *sandbox = h->data;
size_t l = (sandbox->module->max_request_or_response_size - sandbox->request_response_data_length);
buf->base = (sandbox->request_response_data + sandbox->request_response_data_length);
buf->len = l > suggested ? suggested : l;
}

@ -0,0 +1,4 @@
#pragma once
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)

@ -26,12 +26,12 @@ typedef ck_spinlock_mcs_t lock_t;
* @param lock - the address of the lock
* @param unique_variable_name - a unique prefix to hygienically namespace an associated lock/unlock pair
*/
#define LOCK_LOCK_WITH_BOOKKEEPING(lock, unique_variable_name) \
assert(!runtime_is_worker() || !software_interrupt_is_enabled()); \
struct ck_spinlock_mcs unique_variable_name##_node; \
uint64_t unique_variable_name##_pre = __getcycles(); \
ck_spinlock_mcs_lock((lock), &(unique_variable_name##_node)); \
worker_thread_lock_duration += (__getcycles() - unique_variable_name##_pre);
#define LOCK_LOCK_WITH_BOOKKEEPING(lock, unique_variable_name) \
assert(!runtime_is_worker() || !software_interrupt_is_enabled()); \
struct ck_spinlock_mcs _hygiene_##unique_variable_name##_node; \
uint64_t _hygiene_##unique_variable_name##_pre = __getcycles(); \
ck_spinlock_mcs_lock((lock), &(_hygiene_##unique_variable_name##_node)); \
worker_thread_lock_duration += (__getcycles() - _hygiene_##unique_variable_name##_pre);
/**
* Unlocks a lock
@ -40,7 +40,7 @@ typedef ck_spinlock_mcs_t lock_t;
*/
#define LOCK_UNLOCK_WITH_BOOKKEEPING(lock, unique_variable_name) \
assert(!runtime_is_worker() || !software_interrupt_is_enabled()); \
ck_spinlock_mcs_unlock(lock, &(unique_variable_name##_node));
ck_spinlock_mcs_unlock(lock, &(_hygiene_##unique_variable_name##_node));
/**
* Locks a lock, keeping track of overhead

@ -1,11 +1,14 @@
#pragma once
#include <string.h>
#include <uv.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <netdb.h>
#include "admissions_control.h"
#include "admissions_info.h"
#include "http.h"
#include "panic.h"
#include "perf_window.h"
#include "software_interrupt.h"
#include "types.h"
@ -52,7 +55,7 @@ struct module {
struct indirect_table_entry indirect_table[INDIRECT_TABLE_SIZE];
struct sockaddr_in socket_address;
int socket_descriptor;
struct perf_window perf_window;
struct admissions_info admissions_info;
int port;
/*
@ -229,5 +232,6 @@ module_set_http_info(struct module *module, int request_count, char *request_hea
void module_free(struct module *module);
struct module *module_new(char *mod_name, char *mod_path, int32_t argument_count, uint32_t stack_sz, uint32_t max_heap,
uint32_t relative_deadline_us, int port, int req_sz, int resp_sz);
uint32_t relative_deadline_us, int port, int req_sz, int resp_sz, int admissions_percentile,
uint32_t expected_execution_us);
int module_new_from_json(char *filename);

@ -2,7 +2,7 @@
#include <errno.h>
#include "debuglog.h"
#include "panic.h"
#include "module.h"
struct module *module_database_find_by_name(char *name);
@ -30,7 +30,7 @@ module_database_add(struct module *module)
done:
return rc;
err_no_space:
debuglog("Cannot add module. Database is full.\n");
panic("Cannot add module. Database is full.\n");
rc = -ENOSPC;
goto done;
}

@ -4,6 +4,7 @@
#include <pthread.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#define panic(fmt, ...) \
{ \

@ -21,7 +21,7 @@
* overwritten, providing a sorted circular buffer
*/
struct execution_node {
uint32_t execution_time;
uint64_t execution_time;
uint16_t by_termination_idx; /* Reverse idx of the associated by_termination bin. Used for swaps! */
};
@ -70,8 +70,8 @@ perf_window_swap(struct perf_window *self, uint16_t first_by_duration_idx, uint1
assert(self->by_termination[first_by_termination_idx] == first_by_duration_idx);
assert(self->by_termination[second_by_termination_idx] == second_by_duration_idx);
uint32_t first_execution_time = self->by_duration[first_by_duration_idx].execution_time;
uint32_t second_execution_time = self->by_duration[second_by_duration_idx].execution_time;
uint64_t first_execution_time = self->by_duration[first_by_duration_idx].execution_time;
uint64_t second_execution_time = self->by_duration[second_by_duration_idx].execution_time;
/* Swap Indices in Buffer*/
self->by_termination[first_by_termination_idx] = second_by_duration_idx;
@ -96,15 +96,15 @@ perf_window_swap(struct perf_window *self, uint16_t first_by_duration_idx, uint1
* @param value
*/
static inline void
perf_window_add(struct perf_window *self, uint32_t value)
perf_window_add(struct perf_window *self, uint64_t value)
{
assert(self != NULL);
if (unlikely(!LOCK_IS_LOCKED(&self->lock))) panic("lock not held when calling perf_window_add\n");
/* A successful invocation should run for a non-zero amount of time */
assert(value > 0);
LOCK_LOCK(&self->lock);
/* If count is 0, then fill entire array with initial execution times */
if (self->count == 0) {
for (int i = 0; i < PERF_WINDOW_BUFFER_SIZE; i++) {
@ -149,24 +149,27 @@ perf_window_add(struct perf_window *self, uint32_t value)
self->count++;
done:
LOCK_UNLOCK(&self->lock);
return;
}
/**
* Returns pXX execution time
* @param self
* @param percentile represented by double between 0 and 1
* @returns execution time or -1 if by_termination is empty
* @param percentile represented by int between 50 and 99
* @param precomputed_index memoized index for quick lookup when by_duration is full
* @returns execution time
*/
static inline uint32_t
perf_window_get_percentile(struct perf_window *self, double percentile)
static inline uint64_t
perf_window_get_percentile(struct perf_window *self, int percentile, int precomputed_index)
{
assert(self != NULL);
assert(percentile > 0 && percentile < 1);
assert(percentile >= 50 && percentile <= 99);
int size = self->count;
assert(size > 0);
if (self->count == 0) return -1;
if (likely(size >= PERF_WINDOW_BUFFER_SIZE)) return self->by_duration[precomputed_index].execution_time;
return self->by_duration[(int)(PERF_WINDOW_BUFFER_SIZE * percentile)].execution_time;
return self->by_duration[size * percentile / 100].execution_time;
}
/**

@ -5,8 +5,6 @@
#include "runtime.h"
#include "worker_thread.h"
#define MAX 4096
/**
* How to get the priority out of the generic element
* We assume priority is expressed as an unsigned 64-bit integer (i.e. cycles or
@ -19,31 +17,51 @@ typedef uint64_t (*priority_queue_get_priority_fn_t)(void *element);
/* We assume that priority is expressed in terms of a 64 bit unsigned integral */
struct priority_queue {
priority_queue_get_priority_fn_t get_priority_fn;
bool use_lock;
lock_t lock;
uint64_t highest_priority;
void * items[MAX];
int first_free;
priority_queue_get_priority_fn_t get_priority_fn;
size_t size;
size_t capacity;
void * items[];
};
/**
* Checks if a priority queue is empty
* @param self the priority queue to check
* @returns true if empty, else otherwise
* Peek at the priority of the highest priority task without having to take the lock
* Because this is a min-heap PQ, the highest priority is the lowest 64-bit integer
* This is used to store an absolute deadline
* @returns value of highest priority value in queue or ULONG_MAX if empty
*/
static inline bool
priority_queue_is_empty(struct priority_queue *self)
static inline uint64_t
priority_queue_peek(struct priority_queue *self)
{
return self->highest_priority == ULONG_MAX;
return self->highest_priority;
}
void priority_queue_initialize(struct priority_queue *self, priority_queue_get_priority_fn_t get_priority_fn);
int priority_queue_enqueue(struct priority_queue *self, void *value);
int priority_queue_dequeue(struct priority_queue *self, void **dequeued_element);
int priority_queue_dequeue_if_earlier(struct priority_queue *self, void **dequeued_element, uint64_t target_deadline);
int priority_queue_length(struct priority_queue *self);
struct priority_queue *
priority_queue_initialize(size_t capacity, bool use_lock, priority_queue_get_priority_fn_t get_priority_fn);
void priority_queue_free(struct priority_queue *self);
int priority_queue_length(struct priority_queue *self);
int priority_queue_length_nolock(struct priority_queue *self);
int priority_queue_enqueue(struct priority_queue *self, void *value);
int priority_queue_enqueue_nolock(struct priority_queue *self, void *value);
int priority_queue_delete(struct priority_queue *self, void *value);
int priority_queue_delete_nolock(struct priority_queue *self, void *value);
int priority_queue_dequeue(struct priority_queue *self, void **dequeued_element);
int priority_queue_dequeue_nolock(struct priority_queue *self, void **dequeued_element);
int priority_queue_dequeue_if_earlier(struct priority_queue *self, void **dequeued_element, uint64_t target_deadline);
int priority_queue_dequeue_if_earlier_nolock(struct priority_queue *self, void **dequeued_element,
uint64_t target_deadline);
uint64_t priority_queue_peek(struct priority_queue *self);
int priority_queue_delete(struct priority_queue *self, void *value);
int priority_queue_top(struct priority_queue *self, void **dequeued_element);
int priority_queue_top(struct priority_queue *self, void **dequeued_element);
int priority_queue_top_nolock(struct priority_queue *self, void **dequeued_element);
#endif /* PRIORITY_QUEUE_H */

@ -5,18 +5,15 @@
#include <stdatomic.h>
#include <stdbool.h>
#include "likely.h"
#include "types.h"
#define LISTENER_THREAD_CORE_ID 0 /* Dedicated Listener Core */
#define LISTENER_THREAD_MAX_EPOLL_EVENTS 128
#define RUNTIME_LOG_FILE "awesome.log"
#define RUNTIME_LOG_FILE "sledge.log"
#define RUNTIME_MAX_SANDBOX_REQUEST_COUNT (1 << 19) /* random! */
#define RUNTIME_READ_WRITE_VECTOR_LENGTH 16
#define RUNTIME_GRANULARITY 100000
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
/*
* Descriptor of the epoll instance used to monitor the socket descriptors of registered
@ -24,49 +21,19 @@
*/
extern int runtime_epoll_file_descriptor;
/* Optional path to a file to log sandbox perf metrics */
extern FILE *runtime_sandbox_perf_log;
/*
* Assumption: All cores are the same speed
* See runtime_get_processor_speed_MHz for further details
*/
extern float runtime_processor_speed_MHz;
extern uint32_t runtime_processor_speed_MHz;
extern uint64_t runtime_relative_deadline_us_max;
/* Count of worker threads and array of their pthread identifiers */
extern pthread_t runtime_worker_threads[];
extern uint32_t runtime_worker_threads_count;
extern uint64_t runtime_admissions_capacity;
#ifdef LOG_TOTAL_REQS_RESPS
/* Counts to track requests and responses */
extern _Atomic uint32_t runtime_total_requests;
extern _Atomic uint32_t runtime_total_2XX_responses;
extern _Atomic uint32_t runtime_total_4XX_responses;
extern _Atomic uint32_t runtime_total_5XX_responses;
#endif
#ifdef LOG_SANDBOX_TOTALS
/* Counts to track sanboxes running through state transitions */
extern _Atomic uint32_t runtime_total_freed_requests;
extern _Atomic uint32_t runtime_total_initialized_sandboxes;
extern _Atomic uint32_t runtime_total_runnable_sandboxes;
extern _Atomic uint32_t runtime_total_blocked_sandboxes;
extern _Atomic uint32_t runtime_total_running_sandboxes;
extern _Atomic uint32_t runtime_total_preempted_sandboxes;
extern _Atomic uint32_t runtime_total_returned_sandboxes;
extern _Atomic uint32_t runtime_total_error_sandboxes;
extern _Atomic uint32_t runtime_total_complete_sandboxes;
#endif
/*
* Unitless estimate of the instantaneous fraction of system capacity required to complete all previously
* admitted work. This is used to calculate free capacity as part of admissions control
*
* The estimated requirements of a single admitted request is calculated as
* estimated execution time (cycles) / relative deadline (cycles)
*
* These estimates are incremented on request acceptance and decremented on request completion (either
* success or failure)
*/
extern _Atomic uint64_t runtime_admitted;
void alloc_linear_memory(void);
void expand_memory(void);
@ -92,3 +59,22 @@ runtime_is_worker()
return false;
}
enum RUNTIME_SCHEDULER
{
RUNTIME_SCHEDULER_FIFO = 0,
RUNTIME_SCHEDULER_EDF = 1
};
static inline char *
print_runtime_scheduler(enum RUNTIME_SCHEDULER variant)
{
switch (variant) {
case RUNTIME_SCHEDULER_FIFO:
return "RUNTIME_SCHEDULER_FIFO";
case RUNTIME_SCHEDULER_EDF:
return "RUNTIME_SCHEDULER_EDF";
}
};
extern enum RUNTIME_SCHEDULER runtime_scheduler;

@ -1,17 +1,17 @@
#pragma once
#include <ucontext.h>
#include <uv.h>
#include <stdbool.h>
#include "arch/context.h"
#include "debuglog.h"
#include "client_socket.h"
#include "deque.h"
#include "http_request.h"
#include "http_response.h"
#include "module.h"
#include "ps_list.h"
#include "sandbox_request.h"
#include "sandbox_state.h"
#include "software_interrupt.h"
#define SANDBOX_FILE_DESCRIPTOR_PREOPEN_MAGIC (707707707) /* upside down LOLLOLLOL 🤣😂🤣*/
@ -23,34 +23,11 @@
********************/
struct sandbox_io_handle {
int file_descriptor;
union uv_any_handle libuv_handle;
int file_descriptor;
};
typedef enum
{
SANDBOX_UNINITIALIZED = 0, /* Assumption: mmap zeros out structure */
SANDBOX_ALLOCATED,
SANDBOX_SET_AS_INITIALIZED,
SANDBOX_INITIALIZED,
SANDBOX_SET_AS_RUNNABLE,
SANDBOX_RUNNABLE,
SANDBOX_SET_AS_RUNNING,
SANDBOX_RUNNING,
SANDBOX_SET_AS_PREEMPTED,
SANDBOX_PREEMPTED,
SANDBOX_SET_AS_BLOCKED,
SANDBOX_BLOCKED,
SANDBOX_SET_AS_RETURNED,
SANDBOX_RETURNED,
SANDBOX_SET_AS_COMPLETE,
SANDBOX_COMPLETE,
SANDBOX_SET_AS_ERROR,
SANDBOX_ERROR,
SANDBOX_STATE_COUNT
} sandbox_state_t;
struct sandbox {
uint64_t id;
sandbox_state_t state;
uint32_t sandbox_size; /* The struct plus enough buffer to hold the request or response (sized off largest) */
@ -71,12 +48,12 @@ struct sandbox {
uint64_t last_state_change_timestamp; /* Used for bookkeeping of actual execution time */
/* Duration of time (in cycles) that the sandbox is in each state */
uint32_t initializing_duration;
uint32_t runnable_duration;
uint32_t preempted_duration;
uint32_t running_duration;
uint32_t blocked_duration;
uint32_t returned_duration;
uint64_t initializing_duration;
uint64_t runnable_duration;
uint64_t preempted_duration;
uint64_t running_duration;
uint64_t blocked_duration;
uint64_t returned_duration;
uint64_t absolute_deadline;
uint64_t total_time; /* From Request to Response */
@ -96,8 +73,6 @@ struct sandbox {
struct sandbox_io_handle io_handles[SANDBOX_MAX_IO_HANDLE_COUNT];
struct sockaddr client_address; /* client requesting connection! */
int client_socket_descriptor;
uv_tcp_t client_libuv_stream;
uv_shutdown_t client_libuv_shutdown_request;
bool is_repeat_header;
http_parser http_parser;
@ -140,52 +115,6 @@ void sandbox_free_linear_memory(struct sandbox *sandbox);
void sandbox_main(struct sandbox *sandbox);
size_t sandbox_parse_http_request(struct sandbox *sandbox, size_t length);
static inline char *
sandbox_state_stringify(sandbox_state_t state)
{
switch (state) {
case SANDBOX_UNINITIALIZED:
return "Uninitialized";
case SANDBOX_ALLOCATED:
return "Allocated";
case SANDBOX_SET_AS_INITIALIZED:
return "Set As Initialized";
case SANDBOX_INITIALIZED:
return "Initialized";
case SANDBOX_SET_AS_RUNNABLE:
return "Set As Runnable";
case SANDBOX_RUNNABLE:
return "Runnable";
case SANDBOX_SET_AS_RUNNING:
return "Set As Running";
case SANDBOX_RUNNING:
return "Running";
case SANDBOX_SET_AS_PREEMPTED:
return "Set As Preempted";
case SANDBOX_PREEMPTED:
return "Preempted";
case SANDBOX_SET_AS_BLOCKED:
return "Set As Blocked";
case SANDBOX_BLOCKED:
return "Blocked";
case SANDBOX_SET_AS_RETURNED:
return "Set As Returned";
case SANDBOX_RETURNED:
return "Returned";
case SANDBOX_SET_AS_COMPLETE:
return "Set As Complete";
case SANDBOX_COMPLETE:
return "Complete";
case SANDBOX_SET_AS_ERROR:
return "Set As Error";
case SANDBOX_ERROR:
return "Error";
default:
/* Crash, as this should be exclusive */
panic("%d is an unrecognized sandbox state\n", state);
}
}
/**
* Given a sandbox, returns the module that sandbox is executing
@ -226,7 +155,6 @@ sandbox_initialize_io_handle(struct sandbox *sandbox)
}
if (io_handle_index == SANDBOX_MAX_IO_HANDLE_COUNT) return -1;
sandbox->io_handles[io_handle_index].file_descriptor = SANDBOX_FILE_DESCRIPTOR_PREOPEN_MAGIC;
memset(&sandbox->io_handles[io_handle_index].libuv_handle, 0, sizeof(union uv_any_handle));
return io_handle_index;
}
@ -298,26 +226,16 @@ sandbox_close_file_descriptor(struct sandbox *sandbox, int io_handle_index)
}
/**
* Get the Libuv handle located at idx of the sandbox ith io_handle
* @param sandbox
* @param io_handle_index index of the handle containing libuv_handle???
* @returns any libuv handle or a NULL pointer in case of error
*/
static inline union uv_any_handle *
sandbox_get_libuv_handle(struct sandbox *sandbox, int io_handle_index)
{
if (!sandbox) return NULL;
if (io_handle_index >= SANDBOX_MAX_IO_HANDLE_COUNT || io_handle_index < 0) return NULL;
return &sandbox->io_handles[io_handle_index].libuv_handle;
}
/**
* Prints key performance metrics for a sandbox to STDOUT
* Prints key performance metrics for a sandbox to runtime_sandbox_perf_log
* This is defined by an environment variable
* @param sandbox
*/
static inline void
sandbox_print_perf(struct sandbox *sandbox)
{
/* If the log was not defined by an environment variable, early out */
if (runtime_sandbox_perf_log == NULL) return;
uint32_t total_time_us = sandbox->total_time / runtime_processor_speed_MHz;
uint32_t queued_us = (sandbox->allocation_timestamp - sandbox->request_arrival_timestamp)
/ runtime_processor_speed_MHz;
@ -326,14 +244,11 @@ sandbox_print_perf(struct sandbox *sandbox)
uint32_t running_us = sandbox->running_duration / runtime_processor_speed_MHz;
uint32_t blocked_us = sandbox->blocked_duration / runtime_processor_speed_MHz;
uint32_t returned_us = sandbox->returned_duration / runtime_processor_speed_MHz;
debuglog("%lu, %s():%d, state: %s, deadline: %u, actual: %u, queued: %u, initializing: %u, "
"runnable: %u, "
"running: %u, "
"blocked: %u, "
"returned: %u\n",
sandbox->request_arrival_timestamp, sandbox->module->name, sandbox->module->port,
sandbox_state_stringify(sandbox->state), sandbox->module->relative_deadline_us, total_time_us,
queued_us, initializing_us, runnable_us, running_us, blocked_us, returned_us);
fprintf(runtime_sandbox_perf_log, "%lu,%s():%d,%s,%u,%u,%u,%u,%u,%u,%u,%u\n", sandbox->id,
sandbox->module->name, sandbox->module->port, sandbox_state_stringify(sandbox->state),
sandbox->module->relative_deadline_us, total_time_us, queued_us, initializing_us, runnable_us,
running_us, blocked_us, returned_us);
}
static inline void
@ -341,17 +256,10 @@ sandbox_close_http(struct sandbox *sandbox)
{
assert(sandbox != NULL);
#ifdef USE_HTTP_UVIO
uv_close((uv_handle_t *)&sandbox->client_libuv_stream, libuv_callbacks_on_close_wakeup_sakebox);
worker_thread_process_io();
#else
int rc = epoll_ctl(worker_thread_epoll_file_descriptor, EPOLL_CTL_DEL, sandbox->client_socket_descriptor, NULL);
if (unlikely(rc < 0)) panic_err();
if (close(sandbox->client_socket_descriptor) < 0) {
panic("Error closing client socket - %s", strerror(errno));
}
#endif
client_socket_close(sandbox->client_socket_descriptor);
}

@ -1,13 +1,19 @@
#pragma once
#include <stdatomic.h>
#include <stdbool.h>
#include <stdint.h>
#include <sys/socket.h>
#include "debuglog.h"
#include "deque.h"
#include "http_total.h"
#include "module.h"
#include "runtime.h"
#include "sandbox_state.h"
struct sandbox_request {
uint64_t id;
struct module * module;
char * arguments;
int socket_descriptor;
@ -24,6 +30,30 @@ struct sandbox_request {
DEQUE_PROTOTYPE(sandbox, struct sandbox_request *);
/* Count of the total number of requests we've ever allocated. Never decrements as it is used to generate IDs */
extern _Atomic uint32_t sandbox_request_count;
static inline void
sandbox_request_count_initialize()
{
atomic_init(&sandbox_request_count, 0);
}
static inline uint32_t
sandbox_request_count_postfix_increment()
{
return atomic_fetch_add(&sandbox_request_count, 1);
}
static inline void
sandbox_request_log_allocation(struct sandbox_request *sandbox_request)
{
#ifdef LOG_REQUEST_ALLOCATION
debuglog("Sandbox Request %lu: of %s:%d\n", sandbox_request->id, sandbox_request->module->name,
sandbox_request->module->port);
#endif
}
/**
* Allocates a new Sandbox Request and places it on the Global Deque
* @param module the module we want to request
@ -40,17 +70,25 @@ sandbox_request_allocate(struct module *module, char *arguments, int socket_desc
{
struct sandbox_request *sandbox_request = (struct sandbox_request *)malloc(sizeof(struct sandbox_request));
assert(sandbox_request);
/* Sets the ID to the value before the increment */
sandbox_request->id = sandbox_request_count_postfix_increment();
sandbox_request->module = module;
sandbox_request->arguments = arguments;
sandbox_request->socket_descriptor = socket_descriptor;
sandbox_request->socket_address = (struct sockaddr *)socket_address;
sandbox_request->request_arrival_timestamp = request_arrival_timestamp;
sandbox_request->absolute_deadline = request_arrival_timestamp + module->relative_deadline;
sandbox_request->admissions_estimate = admissions_estimate;
#ifdef LOG_REQUEST_ALLOCATION
debuglog("Allocating %lu of %s:%d\n", sandbox_request->request_arrival_timestamp, sandbox_request->module->name,
sandbox_request->module->port);
#endif
/*
* Admissions Control State
* Assumption: an estimate of 0 should have been interpreted as a rejection
*/
assert(admissions_estimate != 0);
sandbox_request->admissions_estimate = admissions_estimate;
sandbox_request_log_allocation(sandbox_request);
return sandbox_request;
}

@ -0,0 +1,81 @@
#pragma once
#include <stdatomic.h>
#include "debuglog.h"
#include "likely.h"
#include "panic.h"
typedef enum
{
SANDBOX_UNINITIALIZED = 0, /* Assumption: mmap zeros out structure */
SANDBOX_ALLOCATED,
SANDBOX_SET_AS_INITIALIZED,
SANDBOX_INITIALIZED,
SANDBOX_SET_AS_RUNNABLE,
SANDBOX_RUNNABLE,
SANDBOX_SET_AS_RUNNING,
SANDBOX_RUNNING,
SANDBOX_SET_AS_PREEMPTED,
SANDBOX_PREEMPTED,
SANDBOX_SET_AS_BLOCKED,
SANDBOX_BLOCKED,
SANDBOX_SET_AS_RETURNED,
SANDBOX_RETURNED,
SANDBOX_SET_AS_COMPLETE,
SANDBOX_COMPLETE,
SANDBOX_SET_AS_ERROR,
SANDBOX_ERROR,
SANDBOX_STATE_COUNT
} sandbox_state_t;
extern const bool sandbox_state_is_terminal[SANDBOX_STATE_COUNT];
extern const char *sandbox_state_labels[SANDBOX_STATE_COUNT];
static inline const char *
sandbox_state_stringify(sandbox_state_t state)
{
if (unlikely(state >= SANDBOX_STATE_COUNT)) panic("%d is an unrecognized sandbox state\n", state);
return sandbox_state_labels[state];
}
static inline void
sandbox_state_log_transition(uint64_t sandbox_id, sandbox_state_t last_state, sandbox_state_t current_state)
{
#ifdef LOG_STATE_CHANGES
debuglog("Sandbox %lu | %s => %s\n", sandbox_id, sandbox_state_stringify(last_state),
sandbox_state_stringify(current_state));
#endif
}
#ifdef LOG_SANDBOX_COUNT
extern _Atomic uint32_t sandbox_state_count[SANDBOX_STATE_COUNT];
#endif
static inline void
sandbox_count_initialize()
{
#ifdef LOG_SANDBOX_COUNT
for (int i = 0; i < SANDBOX_STATE_COUNT; i++) atomic_init(&sandbox_state_count[i], 0);
#endif
}
static inline void
runtime_sandbox_total_increment(sandbox_state_t state)
{
#ifdef LOG_SANDBOX_COUNT
if (!sandbox_state_is_terminal[state]) panic("Unexpectedly logging intermediate transition state");
atomic_fetch_add(&sandbox_state_count[state], 1);
#endif
}
static inline void
runtime_sandbox_total_decrement(sandbox_state_t state)
{
#ifdef LOG_SANDBOX_COUNT
if (atomic_load(&sandbox_state_count[state]) == 0) panic("Underflow of %s\n", sandbox_state_stringify(state));
atomic_fetch_sub(&sandbox_state_count[state], 1);
#endif
}

@ -11,8 +11,8 @@
#include "debuglog.h"
#define SOFTWARE_INTERRUPT_TIME_TO_START_IN_USEC (10 * 1000) /* 10 ms */
#define SOFTWARE_INTERRUPT_INTERVAL_DURATION_IN_USEC (5 * 1000) /* 5 ms */
#define SOFTWARE_INTERRUPT_TIME_TO_START_IN_USEC (2 * 1000) /* 2 ms */
#define SOFTWARE_INTERRUPT_INTERVAL_DURATION_IN_USEC (1 * 1000) /* 1 ms */
/************
* Externs *

@ -1,17 +1,21 @@
#pragma once
#include <uv.h>
#include "runtime.h"
/* If multicore, use all but the dedicated listener core
If there are fewer cores than this, main dynamically overrides this and uses all available */
#ifndef NCORES
#warning "NCORES not defined in Makefile. Defaulting to 2"
#define NCORES 2
#endif
#if NCORES == 1
#error "RUNTIME MINIMUM REQUIREMENT IS 2 CORES"
#endif
#define WORKER_THREAD_CORE_COUNT (NCORES > 1 ? NCORES - 1 : NCORES)
extern __thread uint64_t worker_thread_lock_duration;
extern __thread uint64_t worker_thread_start_timestamp;
extern __thread uv_loop_t worker_thread_uvio_handle;
extern __thread int worker_thread_epoll_file_descriptor;
extern __thread uint64_t worker_thread_lock_duration;
extern __thread uint64_t worker_thread_start_timestamp;
extern __thread int worker_thread_epoll_file_descriptor;
void *worker_thread_main(void *return_code);
@ -54,12 +58,3 @@ worker_thread_get_memory_string(uint32_t offset, uint32_t max_length)
}
return NULL;
}
/**
* Get global libuv handle
*/
static inline uv_loop_t *
worker_thread_get_libuv_handle(void)
{
return &worker_thread_uvio_handle;
}

@ -0,0 +1,4 @@
#include "admissions_control.h"
_Atomic uint64_t admissions_control_admitted;
uint64_t admissions_control_capacity;

@ -91,15 +91,3 @@ current_sandbox_close_file_descriptor(int io_handle_index)
struct sandbox *sandbox = current_sandbox_get();
sandbox_close_file_descriptor(sandbox, io_handle_index);
}
/**
* Get the Libuv handle located at idx of the sandbox ith io_handle
* @param io_handle_index index of the handle containing libuv_handle???
* @returns any libuv handle
*/
union uv_any_handle *
current_sandbox_get_libuv_handle(int io_handle_index)
{
struct sandbox *sandbox = current_sandbox_get();
return sandbox_get_libuv_handle(sandbox, io_handle_index);
}

@ -5,7 +5,7 @@
#include "priority_queue.h"
#include "runtime.h"
static struct priority_queue global_request_scheduler_minheap;
static struct priority_queue *global_request_scheduler_minheap;
/**
* Pushes a sandbox request to the global deque
@ -15,12 +15,11 @@ static struct priority_queue global_request_scheduler_minheap;
static struct sandbox_request *
global_request_scheduler_minheap_add(void *sandbox_request)
{
/* This function is called by both the listener core and workers */
#ifndef NDEBUG
if (runtime_is_worker()) assert(!software_interrupt_is_enabled());
#endif
assert(sandbox_request);
assert(global_request_scheduler_minheap);
if (unlikely(runtime_is_worker())) panic("%s is only callable by the listener thread\n", __func__);
int return_code = priority_queue_enqueue(&global_request_scheduler_minheap, sandbox_request);
int return_code = priority_queue_enqueue(global_request_scheduler_minheap, sandbox_request);
/* TODO: Propagate -1 to caller. Issue #91 */
if (return_code == -ENOSPC) panic("Request Queue is full\n");
return sandbox_request;
@ -34,7 +33,7 @@ int
global_request_scheduler_minheap_remove(struct sandbox_request **removed_sandbox_request)
{
assert(!software_interrupt_is_enabled());
return priority_queue_dequeue(&global_request_scheduler_minheap, (void **)removed_sandbox_request);
return priority_queue_dequeue(global_request_scheduler_minheap, (void **)removed_sandbox_request);
}
/**
@ -47,7 +46,7 @@ global_request_scheduler_minheap_remove_if_earlier(struct sandbox_request **remo
uint64_t target_deadline)
{
assert(!software_interrupt_is_enabled());
return priority_queue_dequeue_if_earlier(&global_request_scheduler_minheap, (void **)removed_sandbox_request,
return priority_queue_dequeue_if_earlier(global_request_scheduler_minheap, (void **)removed_sandbox_request,
target_deadline);
}
@ -60,7 +59,7 @@ global_request_scheduler_minheap_remove_if_earlier(struct sandbox_request **remo
static uint64_t
global_request_scheduler_minheap_peek(void)
{
return priority_queue_peek(&global_request_scheduler_minheap);
return priority_queue_peek(global_request_scheduler_minheap);
}
uint64_t
@ -77,7 +76,7 @@ sandbox_request_get_priority_fn(void *element)
void
global_request_scheduler_minheap_initialize()
{
priority_queue_initialize(&global_request_scheduler_minheap, sandbox_request_get_priority_fn);
global_request_scheduler_minheap = priority_queue_initialize(1000, true, sandbox_request_get_priority_fn);
struct global_request_scheduler_config config = {
.add_fn = global_request_scheduler_minheap_add,
@ -88,3 +87,9 @@ global_request_scheduler_minheap_initialize()
global_request_scheduler_initialize(&config);
}
void
global_request_scheduler_minheap_free()
{
priority_queue_free(global_request_scheduler_minheap);
}

@ -1,5 +1,4 @@
#include <uv.h>
#include "debuglog.h"
#include "http.h"
#include "http_request.h"
#include "http_response.h"
@ -24,10 +23,12 @@ int
http_parser_settings_on_url(http_parser *parser, const char *at, size_t length)
{
struct sandbox *sandbox = (struct sandbox *)parser->data;
if (sandbox->http_request.message_end || sandbox->http_request.header_end) return 0;
assert(!sandbox->http_request.message_end);
assert(!sandbox->http_request.header_end);
#ifdef LOG_HTTP_PARSER
debuglog("sandbox: %lu\n", sandbox->request_arrival_timestamp);
debuglog("sandbox: %lu, length: %zu, Content \"%.*s\"\n", sandbox->id, length, (int)length, at);
assert(strncmp(sandbox->module->name, (at + 1), length - 1) == 0);
#endif
@ -45,15 +46,16 @@ http_parser_settings_on_message_begin(http_parser *parser)
struct sandbox * sandbox = (struct sandbox *)parser->data;
struct http_request *http_request = &sandbox->http_request;
if (sandbox->http_request.message_end || sandbox->http_request.header_end) return 0;
assert(!sandbox->http_request.message_end);
assert(!sandbox->http_request.header_end);
#ifdef LOG_HTTP_PARSER
debuglog("sandbox: %lu\n", sandbox->request_arrival_timestamp);
debuglog("sandbox: %lu\n", sandbox->id);
#endif
http_request->message_begin = true;
http_request->last_was_value = true; /* should always start with a header */
sandbox->is_repeat_header = false;
return 0;
}
@ -72,43 +74,30 @@ http_parser_settings_on_header_field(http_parser *parser, const char *at, size_t
struct sandbox * sandbox = (struct sandbox *)parser->data;
struct http_request *http_request = &sandbox->http_request;
if (sandbox->http_request.message_end || sandbox->http_request.header_end) return 0;
#ifdef LOG_HTTP_PARSER
debuglog("sandbox: %lu\n", sandbox->request_arrival_timestamp);
debuglog("sandbox: %lu, length: %zu, Content \"%.*s\"\n", sandbox->id, length, (int)length, at);
#endif
/* Previous name continues */
assert(!sandbox->http_request.message_end);
assert(!sandbox->http_request.header_end);
if (http_request->last_was_value == false) {
/* Previous key continues */
assert(http_request->header_count > 0);
strncat(http_request->headers[http_request->header_count].key, at, length);
return 0;
}
/*
* We receive repeat headers for an unknown reason, so we need to ignore repeat headers
* This probably means that the headers are getting reparsed, so for the sake of performance
* this should be fixed upstream
*/
#ifdef LOG_HTTP_PARSER
for (int i = 0; i < http_request->header_count; i++) {
if (strncmp(http_request->headers[i].key, at, length) == 0) {
debuglog("Repeat header!\n");
assert(0);
sandbox->is_repeat_header = true;
break;
if (unlikely(http_request->headers[http_request->header_count].key_length + length
> HTTP_MAX_HEADER_LENGTH)) {
return -1;
}
}
#endif
if (!sandbox->is_repeat_header) {
if (unlikely(http_request->header_count >= HTTP_MAX_HEADER_COUNT)) { return -1; }
if (unlikely(length > HTTP_MAX_HEADER_LENGTH)) { return -1; }
http_request->headers[http_request->header_count++].key = (char *)at;
http_request->last_was_value = false;
sandbox->is_repeat_header = false;
http_request->headers[http_request->header_count].key_length += length;
return 0;
} else {
/* Start of new key */
if (unlikely(http_request->header_count >= HTTP_MAX_HEADER_COUNT)) return -1;
if (unlikely(length > HTTP_MAX_HEADER_LENGTH)) return -1;
http_request->header_count++;
http_request->headers[http_request->header_count - 1].key = (char *)at;
http_request->headers[http_request->header_count - 1].key_length = length;
http_request->last_was_value = false;
}
return 0;
@ -128,21 +117,24 @@ http_parser_settings_on_header_value(http_parser *parser, const char *at, size_t
struct sandbox * sandbox = (struct sandbox *)parser->data;
struct http_request *http_request = &sandbox->http_request;
if (http_request->message_end || http_request->header_end) return 0;
#ifdef LOG_HTTP_PARSER
debuglog("sandbox: %lu\n", sandbox->request_arrival_timestamp);
debuglog("sandbox: %lu, length: %zu, Content \"%.*s\"\n", sandbox->id, length, (int)length, at);
#endif
// TODO: If last_was_value is already true, we might need to append to value
assert(!sandbox->http_request.message_end);
assert(!sandbox->http_request.header_end);
/* it is from the sandbox's request_response_data, should persist. */
if (!sandbox->is_repeat_header) {
if (!http_request->last_was_value) {
if (unlikely(length >= HTTP_MAX_HEADER_VALUE_LENGTH)) return -1;
http_request->headers[http_request->header_count - 1].value = (char *)at;
http_request->last_was_value = true;
http_request->headers[http_request->header_count - 1].value = (char *)at;
http_request->headers[http_request->header_count - 1].value_length = length;
} else {
assert(http_request->headers[http_request->header_count - 1].value_length > 0);
http_request->headers[http_request->header_count - 1].value_length += length;
}
http_request->last_was_value = true;
return 0;
}
@ -156,16 +148,21 @@ http_parser_settings_on_header_end(http_parser *parser)
{
struct sandbox * sandbox = (struct sandbox *)parser->data;
struct http_request *http_request = &sandbox->http_request;
if (http_request->message_end || http_request->header_end) return 0;
assert(!sandbox->http_request.message_end);
assert(!sandbox->http_request.header_end);
#ifdef LOG_HTTP_PARSER
debuglog("sandbox: %lu\n", sandbox->request_arrival_timestamp);
debuglog("sandbox: %lu\n", sandbox->id);
#endif
http_request->header_end = true;
return 0;
}
const size_t http_methods_len = 8;
const char * http_methods[http_methods_len] = { "OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT" };
/**
* http-parser callback called for HTTP Bodies
* Assigns the parsed data to the http_request body of the sandbox struct
@ -181,38 +178,34 @@ http_parser_settings_on_body(http_parser *parser, const char *at, size_t length)
struct sandbox * sandbox = (struct sandbox *)parser->data;
struct http_request *http_request = &sandbox->http_request;
if (http_request->message_end) return 0;
assert(sandbox->http_request.header_end);
assert(!sandbox->http_request.message_end);
#ifdef LOG_HTTP_PARSER
debuglog("sandbox: %lu\n", sandbox->request_arrival_timestamp);
#endif
/* Assumption: We should never exceed the buffer we're reusing */
assert(http_request->body_length + length <= sandbox->module->max_request_size);
if (!http_request->body) {
#ifdef LOG_HTTP_PARSER
debuglog("Setting start of body!\n");
#endif
/* If this is the first invocation of the callback, just set */
http_request->body = (char *)at;
http_request->body_length = length;
} else {
#ifdef LOG_HTTP_PARSER
debuglog("Body: %p, Existing Length: %d\n", http_request->body, http_request->body_length);
debuglog("Expected Offset %d, Actual Offset: %lu\n", http_request->body_length,
at - http_request->body);
/* Attempt to copy and print the entire body */
uint64_t possible_body_len = at - http_request->body;
char test_buffer[possible_body_len + length + 1];
strncpy(test_buffer, http_request->body, possible_body_len);
test_buffer[length] = '\0';
debuglog("http_parser_settings_on_body: len %lu, content: %s\n", possible_body_len, test_buffer);
debuglog("Appending to existing body!\n");
#endif
http_request->body_length += length;
}
#ifdef LOG_HTTP_PARSER
int capped_len = length > 1000 ? 1000 : length;
debuglog("sandbox: %lu, length: %zu, Content(up to 1000 chars) \"%.*s\"\n", sandbox->id, length,
(int)capped_len, at);
#endif
return 0;
}
@ -227,12 +220,15 @@ http_parser_settings_on_msg_end(http_parser *parser)
struct sandbox * sandbox = (struct sandbox *)parser->data;
struct http_request *http_request = &sandbox->http_request;
if (http_request->message_end) return 0;
assert(sandbox->http_request.header_end);
assert(!sandbox->http_request.message_end);
#ifdef LOG_HTTP_PARSER
debuglog("sandbox: %lu\n", sandbox->request_arrival_timestamp);
debuglog("sandbox: %lu\n", sandbox->id);
#endif
http_request->message_end = true;
return 0;
}

@ -1,7 +1,4 @@
#include <assert.h>
#ifdef USE_HTTP_UVIO
#include <uv.h>
#endif
#include "http_response.h"
@ -19,25 +16,6 @@ http_response_encode_as_vector(struct http_response *http_response)
{
int buffer_count = 0;
#ifdef USE_HTTP_UVIO
http_response->bufs[buffer_count] = uv_buf_init(http_response->status, http_response->status_length);
buffer_count++;
for (int i = 0; i < http_response->header_count; i++) {
http_response->bufs[buffer_count] = uv_buf_init(http_response->headers[i].header,
http_response->headers[i].length);
buffer_count++;
}
if (http_response->body) {
http_response->bufs[buffer_count] = uv_buf_init(http_response->body, http_response->body_length);
buffer_count++;
http_response->bufs[buffer_count] = uv_buf_init(http_response->status + http_response->status_length
- 2,
2); /* for crlf */
buffer_count++;
}
#else
http_response->bufs[buffer_count].iov_base = http_response->status;
http_response->bufs[buffer_count].iov_len = http_response->status_length;
buffer_count++;
@ -57,7 +35,6 @@ http_response_encode_as_vector(struct http_response *http_response)
http_response->bufs[buffer_count].iov_len = 2;
buffer_count++;
}
#endif
return buffer_count;
}

@ -0,0 +1,34 @@
#include <stdint.h>
#include "debuglog.h"
#include "http_total.h"
/* 2XX + 4XX should equal sandboxes */
/* Listener Core Bookkeeping */
_Atomic uint32_t http_total_requests = 0;
_Atomic uint32_t http_total_5XX = 0;
#ifdef LOG_TOTAL_REQS_RESPS
_Atomic uint32_t http_total_2XX = 0;
_Atomic uint32_t http_total_4XX = 0;
#endif
void
http_total_log()
{
uint32_t total_reqs = atomic_load(&http_total_requests);
uint32_t total_5XX = atomic_load(&http_total_5XX);
#ifdef LOG_TOTAL_REQS_RESPS
uint32_t total_2XX = atomic_load(&http_total_2XX);
uint32_t total_4XX = atomic_load(&http_total_4XX);
int64_t total_responses = total_2XX + total_4XX + total_5XX;
int64_t outstanding_requests = (int64_t)total_reqs - total_responses;
debuglog("Requests: %u (%ld outstanding)\n\tResponses: %ld\n\t\t2XX: %u\n\t\t4XX: %u\n\t\t5XX: %u\n",
total_reqs, outstanding_requests, total_responses, total_2XX, total_4XX, total_5XX);
#else
debuglog("Requests: %u\n\tResponses:\n\t\t\t5XX: %u\n", total_reqs, total_5XX);
#endif
};

@ -1,14 +1,14 @@
#ifndef USE_HTTP_UVIO
/*
* This code originally came from the aWsm compiler
* It has since been updated
* https://github.com/gwsystems/aWsm/blob/master/runtime/libc/libc_backing.c
*/
#include <current_sandbox.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include "current_sandbox.h"
// What should we tell the child program its UID and GID are?
#define UID 0xFF
@ -624,6 +624,7 @@ wasm_get_time(int32_t clock_id, int32_t timespec_off)
return res;
}
#define SYS_EXIT 60
#define SYS_EXIT_GROUP 231
int32_t
wasm_exit_group(int32_t status)
@ -711,30 +712,38 @@ inner_syscall_handler(int32_t n, int32_t a, int32_t b, int32_t c, int32_t d, int
return wasm_write(a, b, c);
case SYS_WRITEV:
return wasm_writev(a, b, c);
case SYS_CLOSE:
return wasm_close(a);
case SYS_LSEEK:
return wasm_lseek(a, b, c);
case SYS_EXIT:
case SYS_EXIT_GROUP:
return wasm_exit_group(a);
case SYS_MMAP:
return wasm_mmap(a, b, c, d, e, f);
case SYS_GET_TIME:
return wasm_get_time(a, b);
case SYS_READV:
return wasm_readv(a, b, c);
case SYS_MUNMAP:
case SYS_IOCTL:
case SYS_SET_THREAD_AREA:
case SYS_SET_TID_ADDRESS:
case SYS_BRK:
case SYS_MADVISE:
/* Note: These are called, but are unimplemented and fail silently */
return 0;
case SYS_MUNMAP:
case SYS_BRK:
case SYS_RT_SIGACTION:
case SYS_RT_SIGPROGMASK:
case SYS_MADVISE:
default:
/* This is a general catch all for the other functions below */
debuglog("Call to unknown or implemented syscall %d\n", n);
errno = ENOSYS;
return -1;
/* TODO: The calls below need to be validated / refactored to be non-blocking */
// case SYS_READV:
// return wasm_readv(a, b, c);
// case SYS_OPEN:
// return wasm_open(a, b, c);
// case SYS_CLOSE:
// return wasm_close(a);
// case SYS_STAT:
// return wasm_stat(a, b);
// case SYS_FSTAT:
@ -743,8 +752,6 @@ inner_syscall_handler(int32_t n, int32_t a, int32_t b, int32_t c, int32_t d, int
// return wasm_lstat(a, b);
// case SYS_LSEEK:
// return wasm_lseek(a, b, c);
// case SYS_MMAP:
// return wasm_mmap(a, b, c, d, e, f);
// case SYS_GETPID:
// return wasm_getpid();
// case SYS_FCNTL:
@ -757,10 +764,6 @@ inner_syscall_handler(int32_t n, int32_t a, int32_t b, int32_t c, int32_t d, int
// return wasm_getcwd(a, b);
// case SYS_GETEUID:
// return wasm_geteuid();
// case SYS_GET_TIME:
// return wasm_get_time(a, b);
// case SYS_EXIT_GROUP:
// return wasm_exit_group(a);
// case SYS_FCHOWN:
// return wasm_fchown(a, b, c);
// case SYS_SOCKET:
@ -783,5 +786,3 @@ inner_syscall_handler(int32_t n, int32_t a, int32_t b, int32_t c, int32_t d, int
return 0;
}
#endif

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save