Merge pull request #187 from gwsystems/further-script-cleanup

Further script cleanup
main
Sean McBride 4 years ago committed by GitHub
commit 720eb374a7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,32 +1,29 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
// https://github.com/microsoft/vscode-dev-containers/tree/v0.166.1/containers/docker-existing-dockerfile
{
"name": "Dockerfile",
// Sets the run context to one level up instead of the .devcontainer folder.
"context": "..",
// Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename.
"dockerFile": "../Dockerfile.x86_64",
// Set *default* container specific settings.json values on container create.
"settings": {
"terminal.integrated.shell.linux": null
},
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"editorconfig.editorconfig",
"foxundermoon.shell-format",
"timonwong.shellcheck",
"dtsvet.vscode-wasm",
"13xforever.language-x86-64-assembly",
"ms-vscode.cpptools",
"ms-vscode.cpptools-themes",
"jeff-hykin.better-cpp-syntax"
],
"workspaceMount": "source=${localWorkspaceFolder},target=/sledge,type=bind,consistency=cached",
"workspaceFolder": "/sledge",
"postCreateCommand": "make -C /sledge install && make -B -C /sledge/runtime/tests clean all",
"containerUser": "dev",
"name": "Dockerfile",
// Sets the run context to one level up instead of the .devcontainer folder.
"context": "..",
// Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename.
"dockerFile": "../Dockerfile.x86_64",
// Set *default* container specific settings.json values on container create.
"settings": {
"terminal.integrated.shell.linux": "bash"
},
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"editorconfig.editorconfig",
"foxundermoon.shell-format",
"timonwong.shellcheck",
"dtsvet.vscode-wasm",
"13xforever.language-x86-64-assembly",
"ms-vscode.cpptools",
"ms-vscode.cpptools-themes",
"jeff-hykin.better-cpp-syntax",
"mads-hartmann.bash-ide-vscode"
],
"workspaceMount": "source=${localWorkspaceFolder},target=/sledge,type=bind,consistency=cached",
"workspaceFolder": "/sledge",
"postCreateCommand": "make -C /sledge install && make -B -C /sledge/runtime/tests clean all",
"containerUser": "dev",
}

@ -23,3 +23,15 @@ ignore = true
[thirdparty/**]
ignore = true
[*.json]
indent_style = tab
indent_size = 4
[dockerfile]
indent_style = tab
indent_size = 4
[Dockerfile.*]
indent_style = tab
indent_size = 4

@ -1,19 +1,23 @@
{
"configurations": [
{
"name": "Linux",
"intelliSenseMode": "clang-x64",
"includePath": [
"/usr/include/",
"${workspaceFolder}/runtime/include/",
"${workspaceFolder}/runtime/thirdparty/ck/include/",
"${workspaceFolder}/runtime/thirdparty/http-parser/",
"${workspaceFolder}/runtime/thirdparty/jsmn/"
],
"defines": ["USE_MEM_VM", "x86_64", "_GNU_SOURCE"],
"cStandard": "c17",
"compilerPath": "/usr/bin/clang"
}
],
"version": 4
}
"configurations": [
{
"name": "Linux",
"intelliSenseMode": "clang-x64",
"includePath": [
"/usr/include/",
"${workspaceFolder}/runtime/include/",
"${workspaceFolder}/runtime/thirdparty/ck/include/",
"${workspaceFolder}/runtime/thirdparty/http-parser/",
"${workspaceFolder}/runtime/thirdparty/jsmn/"
],
"defines": [
"USE_MEM_VM",
"x86_64",
"_GNU_SOURCE"
],
"cStandard": "c17",
"compilerPath": "/usr/bin/clang"
}
],
"version": 4
}

@ -1,12 +1,12 @@
{
"recommendations": [
"editorconfig.editorconfig",
"foxundermoon.shell-format",
"timonwong.shellcheck",
"dtsvet.vscode-wasm",
"13xforever.language-x86-64-assembly",
"ms-vscode.cpptools",
"ms-vscode.cpptools-themes",
"jeff-hykin.better-cpp-syntax"
]
}
"recommendations": [
"editorconfig.editorconfig",
"foxundermoon.shell-format",
"timonwong.shellcheck",
"dtsvet.vscode-wasm",
"13xforever.language-x86-64-assembly",
"ms-vscode.cpptools",
"ms-vscode.cpptools-themes",
"jeff-hykin.better-cpp-syntax"
]
}

102
.vscode/launch.json vendored

@ -1,52 +1,52 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Hyde",
"type": "cppdbg",
"request": "launch",
"program": "${workspaceFolder}/runtime/bin/sledgert",
"args": [
"${workspaceFolder}/runtime/experiments/applications/ocr/hyde/spec.json"
],
"stopAtEntry": false,
"cwd": "${workspaceFolder}",
"environment": [],
"externalConsole": false,
"MIMode": "gdb",
"envFile": "${workspaceFolder}/.env",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
}
]
},
{
"name": "Preemption",
"type": "cppdbg",
"request": "launch",
"program": "${workspaceFolder}/runtime/bin/sledgert",
"args": [
"${workspaceFolder}/runtime/experiments/preemption/spec.json"
],
"stopAtEntry": false,
"cwd": "${workspaceFolder}",
"environment": [],
"externalConsole": false,
"MIMode": "gdb",
"envFile": "${workspaceFolder}/.env",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
}
]
}
]
}
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Hyde",
"type": "cppdbg",
"request": "launch",
"program": "${workspaceFolder}/runtime/bin/sledgert",
"args": [
"${workspaceFolder}/runtime/experiments/applications/ocr/hyde/spec.json"
],
"stopAtEntry": false,
"cwd": "${workspaceFolder}",
"environment": [],
"externalConsole": false,
"MIMode": "gdb",
"envFile": "${workspaceFolder}/.env",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
}
]
},
{
"name": "Preemption",
"type": "cppdbg",
"request": "launch",
"program": "${workspaceFolder}/runtime/bin/sledgert",
"args": [
"${workspaceFolder}/runtime/experiments/preemption/spec.json"
],
"stopAtEntry": false,
"cwd": "${workspaceFolder}",
"environment": [],
"externalConsole": false,
"MIMode": "gdb",
"envFile": "${workspaceFolder}/.env",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
}
]
}
]
}

@ -1,81 +1,111 @@
{
"files.associations": {
"*.inc": "cpp",
"arm_nnexamples_cifar10_parameter.h": "c",
"arm_nnexamples_cifar10_weights.h": "c",
"__hash_table": "cpp",
"__split_buffer": "cpp",
"__tree": "cpp",
"array": "cpp",
"bitset": "cpp",
"deque": "cpp",
"dynarray": "cpp",
"simd": "cpp",
"hash_map": "cpp",
"hash_set": "cpp",
"initializer_list": "cpp",
"iterator": "cpp",
"list": "cpp",
"map": "cpp",
"queue": "cpp",
"random": "cpp",
"regex": "cpp",
"set": "cpp",
"stack": "cpp",
"string": "cpp",
"string_view": "cpp",
"unordered_map": "cpp",
"unordered_set": "cpp",
"utility": "cpp",
"valarray": "cpp",
"vector": "cpp",
"__locale": "cpp",
"__config": "c",
"*.def": "c",
"mman.h": "c",
"types.h": "c",
"assert.h": "c",
"fstream": "c",
"locale": "c",
"*.tcc": "c",
"sandbox.h": "c",
"runtime.h": "c",
"panic.h": "c",
"ucontext.h": "c",
"stdlib.h": "c",
"pthread.h": "c",
"signal.h": "c",
"current_sandbox.h": "c",
"admissions_control.h": "c",
"sigval_t.h": "c",
"__sigval_t.h": "c",
"sigaction.h": "c",
"string.h": "c",
"errno.h": "c",
"siginfo_t.h": "c",
"features.h": "c"
},
"files.exclude": {
"**/.git": true,
"**/.svn": true,
"**/.hg": true,
"**/CVS": true,
"**/.DS_Store": true,
"awsm/wasmception": true
},
"C_Cpp.default.cStandard": "c17",
"C_Cpp.exclusionPolicy": "checkFilesAndFolders",
"C_Cpp.experimentalFeatures": "Enabled",
"C_Cpp.files.exclude": {
"awsm/wasmception": true,
"**/.vscode": true
},
"shellformat.flag": "-ln=bash -i 0 -bn -ci -sr -kp",
"terminal.integrated.profiles.linux": {
"bash": {
"path": "bash"
}
},
"terminal.integrated.shell.linux": "bash"
"files.associations": {
"*.inc": "cpp",
"arm_nnexamples_cifar10_parameter.h": "c",
"arm_nnexamples_cifar10_weights.h": "c",
"__hash_table": "cpp",
"__split_buffer": "cpp",
"__tree": "cpp",
"array": "cpp",
"bitset": "cpp",
"deque": "cpp",
"dynarray": "cpp",
"simd": "cpp",
"hash_map": "cpp",
"hash_set": "cpp",
"initializer_list": "cpp",
"iterator": "cpp",
"list": "cpp",
"map": "cpp",
"queue": "cpp",
"random": "cpp",
"regex": "cpp",
"set": "cpp",
"stack": "cpp",
"string": "cpp",
"string_view": "cpp",
"unordered_map": "cpp",
"unordered_set": "cpp",
"utility": "cpp",
"valarray": "cpp",
"vector": "cpp",
"__locale": "cpp",
"__config": "c",
"*.def": "c",
"mman.h": "c",
"types.h": "c",
"assert.h": "c",
"fstream": "c",
"locale": "c",
"*.tcc": "c",
"sandbox.h": "c",
"runtime.h": "c",
"panic.h": "c",
"ucontext.h": "c",
"stdlib.h": "c",
"pthread.h": "c",
"signal.h": "c",
"current_sandbox.h": "c",
"admissions_control.h": "c",
"sigval_t.h": "c",
"__sigval_t.h": "c",
"sigaction.h": "c",
"string.h": "c",
"errno.h": "c",
"siginfo_t.h": "c",
"features.h": "c"
},
"files.exclude": {
"**/.git": true,
"**/.svn": true,
"**/.hg": true,
"**/CVS": true,
"**/.DS_Store": true,
"awsm/wasmception": true
},
"C_Cpp.default.cStandard": "c17",
"C_Cpp.exclusionPolicy": "checkFilesAndFolders",
"C_Cpp.experimentalFeatures": "Enabled",
"C_Cpp.files.exclude": {
"awsm/wasmception": true,
"**/.vscode": true
},
"shellformat.effectLanguages": [
"shellscript",
"dockerfile",
"ignore",
"gitignore",
],
"shellformat.path": "/usr/local/bin/shfmt",
"shellformat.flag": "-ln=bash -i 0 -bn -ci -sr -kp",
"terminal.integrated.shell.linux": "bash",
"[jsonc]": {
"editor.defaultFormatter": "vscode.json-language-features"
},
"[json]": {
"editor.defaultFormatter": "vscode.json-language-features"
},
"[shellscript]": {
"editor.defaultFormatter": "foxundermoon.shell-format"
},
"[dockerfile]": {
"editor.defaultFormatter": "foxundermoon.shell-format"
},
"[ignore]": {
"editor.defaultFormatter": "foxundermoon.shell-format"
},
"files.watcherExclude": {
"**/.git/objects/**": true,
"**/.git/subtree-cache/**": true,
"**/.git/**": true,
"**/awsm/target/**": true,
"**/runtime/thirdparty/**": true,
"**/runtime/thirdparty/ck/**": true,
"**/runtime/thirdparty/http-parser/**": true,
"**/runtime/thirdparty/jsmn/**": true,
"**/runtime/thirdparty/dist/**": true,
"*.o": true,
"*.bc": true,
"*.wasm": true,
}
}

@ -6,32 +6,32 @@ FROM ubuntu:bionic
# install some basic packages
RUN apt-get update
RUN apt-get install -y --no-install-recommends \
build-essential \
curl \
git \
cmake \
ca-certificates \
libssl-dev \
pkg-config \
gcc \
g++ \
clang-8 \
clang-tools-8 \
llvm-8 \
llvm-8-dev \
libc++-dev \
libc++abi-dev \
lld-8 \
lldb-8 \
libclang-8-dev \
libclang-common-8-dev \
vim \
binutils-dev \
build-essential \
automake \
libtool \
strace \
less
build-essential \
curl \
git \
cmake \
ca-certificates \
libssl-dev \
pkg-config \
gcc \
g++ \
clang-8 \
clang-tools-8 \
llvm-8 \
llvm-8-dev \
libc++-dev \
libc++abi-dev \
lld-8 \
lldb-8 \
libclang-8-dev \
libclang-common-8-dev \
vim \
binutils-dev \
build-essential \
automake \
libtool \
strace \
less
RUN rm -rf /var/lib/apt/lists/*
# set to use our installed clang version

@ -8,6 +8,15 @@ ARG WASMCEPTION_URL=https://github.com/gwsystems/wasmception/releases/download/v
ARG SHFMT_URL=https://github.com/mvdan/sh/releases/download/v3.2.4/shfmt_v3.2.4_linux_amd64
ARG SHELLCHECK_URL=https://github.com/koalaman/shellcheck/releases/download/v0.7.1/shellcheck-v0.7.1.linux.x86_64.tar.xz
# Use bash, not sh
SHELL ["/bin/bash", "-c"]
# We run the dev container interactively, so unminimize and install missing packages
RUN apt-get update && apt-get install -y --no-install-recommends \
apt-utils \
man-db \
&& yes | unminimize
# General GCC C/C++ Build toolchain
# pkg-config, libtool - used by PocketSphinx
# cmake - used by cmsis
@ -116,4 +125,4 @@ ENV PATH=/opt/sledge/bin:$PATH
# TODO: Does the build process for the sample applications actually copy here?
# TODO: Should we create a special SLEDGE_MODULE_PATH that is searched for these modules?
ENV LD_LIBRARY_PATH=/opt/sledge/bin:LD_LIBRARY_PATH
ENV LD_LIBRARY_PATH=/opt/sledge/bin:$LD_LIBRARY_PATH

@ -132,11 +132,11 @@ envrun() {
echo "Starting ${SYS_DOC_NAME}"
docker run \
--privileged \
--security-opt seccomp:unconfined \
--security-opt seccomp:unconfined \
--name=${SYS_DOC_NAME} \
--detach \
--mount type=bind,src="$(cd "$(dirname "${0}")" && pwd -P || exit 1),target=/${SYS_NAME}" \
${SYS_DOC_NAMETAG} /bin/sleep 99999999 > /dev/null
--mount type=bind,src="$(cd "$(dirname "${0}")" && pwd -P || exit 1),target=/${SYS_NAME}" \
${SYS_DOC_NAMETAG} /bin/sleep 99999999 > /dev/null
fi
echo "Running shell"

@ -0,0 +1,26 @@
#!/bin/bash
# If already installed, just return
command -v perf && {
echo "perf is already installed."
exit 0
}
[[ "$(whoami)" != "root" ]] && {
echo "Expected to run as root"
exit 1
}
# Under WSL2, perf has to be installed from source
if grep --silent 'WSL2' <(uname -r); then
echo "WSL detected. perf must be built from source"
sudo apt-get install flex bison python3-dev liblzma-dev libnuma-dev zlib1g libperl-dev libgtk2.0-dev libslang2-dev systemtap-sdt-dev libelf-dev binutils-dev libbabeltrace-dev libdw-dev libunwind-dev libiberty-dev --yes
git clone --depth 1 https://github.com/microsoft/WSL2-Linux-Kernel ~/WSL2-Linux-Kernel
make -Wno-error -j8 -C ~/WSL2-Linux-Kernel/tools/perf
sudo cp ~/WSL2-Linux-Kernel/tools/perf/perf /usr/local/bin
rm -rf ~/WSL2-Linux-Kernel
else
apt-get install "linux-tools-$(uname -r)" linux-tools-generic -y
fi
exit 0

@ -1,19 +0,0 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run $experiment_directory/spec.json" \
sledgert

@ -1,20 +0,0 @@
reset
set term jpeg
set output "latency.jpg"
set xlabel "Payload (bytes)"
set xrange [-5:1050000]
set ylabel "Latency (ms)"
set yrange [0:]
set key left top
set style histogram columnstacked
plot 'latency.dat' using 1:2 title 'p50', \
'latency.dat' using 1:3 title 'p90', \
'latency.dat' using 1:4 title 'p99', \
'latency.dat' using 1:5 title 'p100', \

@ -1,111 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt
mkdir -p "$results_directory"
log_environment >> "$results_directory/$log"
# Start the runtime
if [ "$1" != "-d" ]; then
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >> "$results_directory/$log" 2>> "$results_directory/$log" &
sleep 1
else
echo "Running under gdb"
echo "Running under gdb" >> "$results_directory/$log"
fi
payloads=(fivebyeight/5x8 handwriting/handwrt1 hyde/hyde)
ports=(10000 10001 10002)
iterations=1000
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for i in {0..2}; do
hey -n 200 -c 3 -q 200 -o csv -m GET -D "$experiment_directory/${payloads[$i]}.pnm" "http://localhost:${ports[$i]}"
done
sleep 1
echo "[DONE]"
# Execute the experiments
echo "Running Experiments"
for i in {0..2}; do
printf "\t%s Payload: " "${payloads[$i]}"
file=$(echo "${payloads[$i]}" | awk -F/ '{print $2}').csv
hey -n "$iterations" -c 3 -cpus 2 -o csv -m GET -D "$experiment_directory/${payloads[$i]}.pnm" "http://localhost:${ports[$i]}" > "$results_directory/$file"
echo "[DONE]"
done
# Stop the runtime
if [ "$1" != "-d" ]; then
sleep 5
kill_runtime
fi
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Concurrency,Success_Rate\n" >> "$results_directory/success.csv"
printf "Concurrency,Throughput\n" >> "$results_directory/throughput.csv"
printf "Con,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
for payload in ${payloads[*]}; do
# Calculate Success Rate for csv
file=$(echo "$payload" | awk -F/ '{print $2}')
awk -F, '
$7 == 200 {ok++}
END{printf "'"$file"',%3.5f\n", (ok / '"$iterations"' * 100)}
' < "$results_directory/$file.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$file.csv" \
| sort -g > "$results_directory/$file-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$file-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$file.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$file" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$file"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$file-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
rm -rf "$results_directory/$file-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots
generate_gnuplots
# Cleanup, if requires
echo "[DONE]"

@ -1,42 +0,0 @@
{
"active": true,
"name": "gocr",
"path": "gocr.aso",
"port": 10000,
"relative-deadline-us": 50000000000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1024000,
"http-resp-headers": [],
"http-resp-size": 1024000,
"http-resp-content-type": "text/plain"
},
{
"active": true,
"name": "gocr",
"path": "gocr.aso",
"port": 10001,
"relative-deadline-us": 50000000000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1024000,
"http-resp-headers": [],
"http-resp-size": 1024000,
"http-resp-content-type": "text/plain"
},
{
"active": true,
"name": "gocr",
"path": "gocr.aso",
"port": 10002,
"relative-deadline-us": 50000000000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 5335057,
"http-resp-headers": [],
"http-resp-size": 5335057,
"http-resp-content-type": "text/plain"
}

@ -1,12 +0,0 @@
reset
set term jpeg
set output "success.jpg"
set xlabel "Connections"
set xrange [-5:105]
set ylabel "% 2XX"
set yrange [0:110]
plot 'success.dat' using 1:2 title '2XX'

@ -1,10 +0,0 @@
#!/bin/bash
cd handwriting || exit
./run.sh
cd .. || exit
cd hyde || exit
./run.sh
cd .. || exit
cd fivebyeight || exit
./run.sh
cd ..

@ -1,13 +0,0 @@
reset
set term jpeg
set output "throughput.jpg"
# TODO: Axis shouldn't be linear
set xlabel "Connections"
set xrange [-5:105]
set ylabel "Requests/sec"
set yrange [0:]
plot 'throughput.dat' using 1:2 title 'Reqs/sec'

@ -0,0 +1,29 @@
# shellcheck shell=bash
if [ -n "$__csv_to_dat_sh__" ]; then return; fi
__csv_to_dat_sh__=$(date)
source "panic.sh" || exit 1
# Takes a variadic number of paths to *.csv files and converts to *.dat files in the same directory
csv_to_dat() {
if (($# == 0)); then
panic "insufficient parameters"
return 1
fi
for arg in "$@"; do
if ! [[ "$arg" =~ ".csv"$ ]]; then
panic "$arg is not a *.csv file"
return 1
fi
if [[ ! -f "$arg" ]]; then
panic "$arg does not exit"
return 1
fi
done
for file in "$@"; do
echo -n "#" > "${file/.csv/.dat}"
tr ',' ' ' < "$file" | column -t >> "${file/.csv/.dat}"
done
}

@ -0,0 +1,7 @@
# shellcheck shell=bash
if [ -n "$__error_msg_sh__" ]; then return; fi
__error_msg_sh__=$(date)
error_msg() {
printf "%.23s %s() at %s:%s - %s\n" "$(date +%F.%T.%N)" "${FUNCNAME[0]}" "$(realpath "${BASH_SOURCE[0]##*/}")" "${BASH_LINENO[0]}" "${@}"
}

@ -0,0 +1,410 @@
# shellcheck shell=bash
if [ -n "$__framework_sh__" ]; then return; fi
__framework_sh__=$(date)
#
# This framework simplifies the scripting of experiments
#
# To use, import the framework source file and pass all arguments to the provided main function
# source "framework.sh"
#
# main "$@"
#
# In your script, implement the following functions above main:
# - experiment_main
#
source "path_join.sh" || exit 1
source "panic.sh" || exit 1
__framework_sh__usage() {
echo "$0 [options...]"
echo ""
echo "Options:"
echo " -t,--target=<target url> Execute as client against remote URL"
echo " -s,--serve=<EDF|FIFO> Serve with scheduling policy, but do not run client"
echo " -d,--debug=<EDF|FIFO> Debug under GDB with scheduling policy, but do not run client"
echo " -p,--perf=<EDF|FIFO> Run under perf with scheduling policy. Run on baremetal Linux host!"
}
# Declares application level global state
__framework_sh__initialize_globals() {
# timestamp is used to name the results directory for a particular test run
# shellcheck disable=SC2155
# shellcheck disable=SC2034
declare -gir __framework_sh__timestamp=$(date +%s)
# Globals used by parse_arguments
declare -g __framework_sh__target=""
declare -g __framework_sh__policy=""
declare -g __framework_sh__role=""
# Configure environment variables
# shellcheck disable=SC2155
declare -gr __framework_sh__application_directory="$(dirname "$(realpath "$0"))")"
local -r binary_directory="$(cd "$__framework_sh__application_directory" && cd ../../bin && pwd)"
export PATH=$binary_directory:$PATH
export LD_LIBRARY_PATH=$binary_directory:$LD_LIBRARY_PATH
export SLEDGE_NWORKERS=5
}
# Parses arguments from the user and sets associates global state
__framework_sh__parse_arguments() {
for i in "$@"; do
case $i in
-t=* | --target=*)
if [[ "$__framework_sh__role" == "server" ]]; then
echo "Cannot set target when server"
__framework_sh__usage
return 1
fi
__framework_sh__role=client
__framework_sh__target="${i#*=}"
shift
;;
-s=* | --serve=*)
if [[ "$__framework_sh__role" == "client" ]]; then
echo "Cannot use -s,--serve with -t,--target"
__framework_sh__usage
return 1
fi
__framework_sh__role=server
__framework_sh__policy="${i#*=}"
if [[ ! $__framework_sh__policy =~ ^(EDF|FIFO)$ ]]; then
echo "\"$__framework_sh__policy\" is not a valid policy. EDF or FIFO allowed"
__framework_sh__usage
return 1
fi
shift
;;
-d=* | --debug=*)
if [[ "$__framework_sh__role" == "client" ]]; then
echo "Cannot use -d,--debug with -t,--target"
__framework_sh__usage
return 1
fi
__framework_sh__role=debug
__framework_sh__policy="${i#*=}"
if [[ ! $__framework_sh__policy =~ ^(EDF|FIFO)$ ]]; then
echo "\"$__framework_sh__policy\" is not a valid policy. EDF or FIFO allowed"
__framework_sh__usage
return 1
fi
shift
;;
-p=* | --perf=*)
if [[ "$__framework_sh__role" == "perf" ]]; then
echo "Cannot use -p,--perf with -t,--target"
__framework_sh__usage
return 1
fi
__framework_sh__role=perf
__framework_sh__policy="${i#*=}"
if [[ ! $__framework_sh__policy =~ ^(EDF|FIFO)$ ]]; then
echo "\"$__framework_sh__policy\" is not a valid policy. EDF or FIFO allowed"
__framework_sh__usage
return 1
fi
shift
;;
-h | --help)
__framework_sh__usage
exit 0
;;
*)
echo "$1 is a not a valid option"
__framework_sh__usage
return 1
;;
esac
done
# default to both if no arguments were passed
if [[ -z "$__framework_sh__role" ]]; then
__framework_sh__role="both"
__framework_sh__target="localhost"
fi
# Set globals as read only
declare -r __framework_sh__target
declare -r __framework_sh__policy
declare -r __framework_sh__role
}
# Log hardware and software info for the execution
__framework_sh__log_environment() {
if ! command -v git &> /dev/null; then
echo "git could not be found"
exit
fi
echo "*******"
echo "* Git *"
echo "*******"
git log | head -n 1 | cut -d' ' -f2
git status
echo ""
echo "************"
echo "* Makefile *"
echo "************"
cat "$(path_join "$__framework_sh__application_directory" ../../Makefile)"
echo ""
echo "**********"
echo "* Run.sh *"
echo "**********"
cat "$(path_join "$__framework_sh__application_directory" ./run.sh)"
echo ""
echo "************"
echo "* Hardware *"
echo "************"
lscpu
echo ""
echo "*************"
echo "* Execution *"
echo "*************"
}
# $1 - Scheduler Variant (EDF|FIFO)
# $2 - Results Directory
# $3 - How to run (foreground|background)
# $4 - JSON specification
__framework_sh__start_runtime() {
printf "Starting Runtime: "
if (($# != 4)); then
printf "[ERR]\n"
panic "invalid number of arguments \"$1\""
return 1
elif ! [[ $1 =~ ^(EDF|FIFO)$ ]]; then
printf "[ERR]\n"
panic "expected EDF or FIFO was \"$1\""
return 1
elif ! [[ -d "$2" ]]; then
printf "[ERR]\n"
panic "directory \"$2\" does not exist"
return 1
elif ! [[ $3 =~ ^(foreground|background)$ ]]; then
printf "[ERR]\n"
panic "expected foreground or background was \"$3\""
return 1
elif [[ ! -f "$4" || "$4" != *.json ]]; then
printf "[ERR]\n"
panic "\"$4\" does not exist or is not a JSON"
return 1
fi
local -r scheduler="$1"
local -r results_directory="$2"
local -r how_to_run="$3"
local -r specification="$4"
local -r log_name=log.txt
local log="$results_directory/${log_name}"
__framework_sh__log_environment >> "$log"
case "$how_to_run" in
"background")
SLEDGE_SCHEDULER="$scheduler" \
sledgert "$specification" >> "$log" 2>> "$log" &
;;
"foreground")
SLEDGE_SCHEDULER="$scheduler" \
sledgert "$specification"
;;
esac
printf "[OK]\n"
return 0
}
__framework_sh__run_server() {
if (($# != 2)); then
panic "invalid number of arguments \"$1\""
return 1
elif ! [[ $1 =~ ^(EDF|FIFO)$ ]]; then
panic "expected EDF or FIFO was \"$1\""
return 1
elif ! [[ $2 =~ ^(foreground|background)$ ]]; then
printf "[ERR]\n"
panic "expected foreground or background was \"$3\""
return 1
fi
local -r scheduler="$1"
local -r how_to_run="$2"
__framework_sh__start_runtime "$scheduler" "$RESULTS_DIRECTORY" "$how_to_run" "$__framework_sh__application_directory/spec.json" || {
echo "__framework_sh__start_runtime RC: $?"
panic "Error calling __framework_sh__start_runtime $scheduler $RESULTS_DIRECTORY $how_to_run $__framework_sh__application_directory/spec.json"
return 1
}
return 0
}
__framework_sh__run_perf() {
if (($# != 1)); then
printf "[ERR]\n"
panic "invalid number of arguments \"$1\""
return 1
elif ! [[ $1 =~ ^(EDF|FIFO)$ ]]; then
printf "[ERR]\n"
panic "expected EDF or FIFO was \"$1\""
return 1
fi
if ! command -v perf; then
echo "perf is not present."
exit 1
fi
local -r scheduler="$1"
SLEDGE_SCHEDULER="$scheduler" perf record -g -s sledgert "$__framework_sh__application_directory/spec.json"
}
# Starts the Sledge Runtime under GDB
__framework_sh__run_debug() {
# shellcheck disable=SC2155
local project_directory=$(cd ../.. && pwd)
if (($# != 1)); then
printf "[ERR]\n"
panic "invalid number of arguments \"$1\""
return 1
elif ! [[ $1 =~ ^(EDF|FIFO)$ ]]; then
printf "[ERR]\n"
panic "expected EDF or FIFO was \"$1\""
return 1
fi
local -r scheduler="$1"
if [[ "$project_directory" != "/sledge/runtime" ]]; then
printf "It appears that you are not running in the container. Substituting path to match host environment\n"
SLEDGE_SCHEDULER="$scheduler" gdb \
--eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run $__framework_sh__application_directory/spec.json" \
sledgert
else
SLEDGE_SCHEDULER="$scheduler" gdb \
--eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="run $__framework_sh__application_directory/spec.json" \
sledgert
fi
return 0
}
__framework_sh__run_client() {
experiment_main "$__framework_sh__target" "$RESULTS_DIRECTORY" || {
panic "Error calling process_results $RESULTS_DIRECTORY"
return 1
}
return 0
}
__framework_sh__run_both() {
local -ar schedulers=(EDF FIFO)
for scheduler in "${schedulers[@]}"; do
printf "Running %s\n" "$scheduler"
__framework_sh__run_server "$scheduler" background || {
panic "Error calling __framework_sh__run_server"
return 1
}
__framework_sh__run_client || {
panic "Error calling __framework_sh__run_client"
__framework_sh__stop_runtime
return 1
}
__framework_sh__stop_runtime || {
panic "Error calling __framework_sh__stop_runtime"
return 1
}
done
return 0
}
__framework_sh__create_and_export_results_directory() {
local dir=""
# If we are running both client, and server, we need to namespace by scheduler since we run both variants
case "$__framework_sh__role" in
"both")
dir="$__framework_sh__application_directory/res/$__framework_sh__timestamp/$scheduler"
;;
"client" | "server" | "debug" | "perf")
dir="$__framework_sh__application_directory/res/$__framework_sh__timestamp"
;;
*)
panic "${FUNCNAME[0]} Unexpected $__framework_sh__role"
return 1
;;
esac
mkdir -p "$dir" || {
panic "mkdir -p $dir"
return 1
}
export RESULTS_DIRECTORY="$dir"
}
# Responsible for ensuring that the experiment file meets framework assumptions
__framework_sh__validate_experiment() {
if [[ $(type -t experiment_main) != "function" ]]; then
panic "function experiment_main was not defined"
return 1
fi
}
main() {
__framework_sh__validate_experiment || exit 1
__framework_sh__initialize_globals || exit 1
__framework_sh__parse_arguments "$@" || exit 1
__framework_sh__create_and_export_results_directory || exit 1
case $__framework_sh__role in
both)
__framework_sh__run_both
;;
server)
__framework_sh__run_server "$__framework_sh__policy" foreground
;;
debug)
__framework_sh__run_debug "$__framework_sh__policy"
;;
perf)
__framework_sh__run_perf "$__framework_sh__policy"
;;
client)
__framework_sh__run_client
;;
*)
echo "Invalid state"
false
;;
esac
exit "$?"
}
__framework_sh__stop_runtime() {
printf "Stopping Runtime: "
pkill sledgert > /dev/null 2> /dev/null
pkill hey > /dev/null 2> /dev/null
printf "[OK]\n"
}

@ -0,0 +1,36 @@
# shellcheck shell=bash
if [ -n "$__generate_gnuplots_sh__" ]; then return; fi
__generate_gnuplots_sh__=$(date)
source "panic.sh" || exit 1
# Runs all *.gnuplot files found gnuplot_directory from results_directory
# Outputting resulting diagrams in results_directory
# $1 - results_directory containing the data file referenced in the gnuplot file
# $2 - gnuplot_directory containing the *.gnuplot specification files
generate_gnuplots() {
local -r results_directory="$1"
local -r experiment_directory="$2"
if ! command -v gnuplot &> /dev/null; then
panic "gnuplot could not be found in path"
return 1
fi
# shellcheck disable=SC2154
if [ -z "$results_directory" ]; then
panic "results_directory was unset or empty"
return 1
fi
# shellcheck disable=SC2154
if [ -z "$experiment_directory" ]; then
panic "error: EXPERIMENT_DIRECTORY was unset or empty"
return 1
fi
cd "$results_directory" || exit
shopt -s nullglob
for gnuplot_file in "$experiment_directory"/*.gnuplot; do
gnuplot "$gnuplot_file"
done
cd "$experiment_directory" || exit
}

@ -0,0 +1,38 @@
# shellcheck shell=bash
if [ -n "$__get_result_count_sh__" ]; then return; fi
__get_result_count_sh__=$(date)
source "panic.sh" || exit 1
# Given a file, returns the number of results
# This assumes a *.csv file with a header
# $1 the file we want to check for results
# $2 an optional return nameref
get_result_count() {
if (($# != 1)); then
panic "insufficient parameters. $#/1"
return 1
elif [[ ! -f $1 ]]; then
panic "the file $1 does not exist"
return 1
elif [[ ! -s $1 ]]; then
panic "the file $1 is size 0"
return 1
fi
local -r file=$1
# Subtract one line for the header
local -i count=$(($(wc -l < "$file") - 1))
if (($# == 2)); then
# shellcheck disable=2034
local -n __result=$2
fi
if ((count > 0)); then
return 0
else
return 1
fi
}

@ -0,0 +1,24 @@
# shellcheck shell=bash
if [ -n "$__panic_sh__" ]; then return; fi
__panic_sh__=$(date)
source "error_msg.sh" || exit 1
declare __common_did_dump_callstack=false
__common_dump_callstack() {
echo "Call Stack:"
# Skip the dump_bash_stack and error_msg_frames
for ((i = 2; i < ${#FUNCNAME[@]}; i++)); do
printf "\t%d - %s\n" "$((i - 2))" "${FUNCNAME[i]} (${BASH_SOURCE[i + 1]}:${BASH_LINENO[i]})"
done
}
# Public API
panic() {
error_msg "${@}"
[[ "$__common_did_dump_callstack" == false ]] && {
__common_dump_callstack
__common_did_dump_callstack=true
}
}

@ -0,0 +1,14 @@
# shellcheck shell=bash
if [ -n "$__path_join_sh__" ]; then return; fi
__path_join_sh__=$(date)
path_join() {
local base=$1
local relative=$2
relative_path="$(dirname "$relative")"
file_name="$(basename "$relative")"
absolute_path="$(cd "$base" && cd "$relative_path" && pwd)"
echo "$absolute_path/$file_name"
}

@ -0,0 +1,3 @@
res
perf.data
perf.data.old

@ -0,0 +1,15 @@
# Bimodal Distribution
This experiment drives a bimodal distribution of long-running low-priority and short-running high-priority workloads
Relative Deadlines are tuned such that the scheduler should always preempt the low-priority workload for the high-priority workload if preemption is disabled.
The two workloads are run separately as a baseline. They are then run concurrently, starting the low-priority long-running workload first such that the system begins execution and accumulates requests in the data structures. The high-priority short-running workload then begins.
## Independent Variable
The Scheduling Policy: EDF versus FIFO
## Dependent Variables
Latency of high priority workload

@ -0,0 +1,259 @@
#!/bin/bash
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Success - The percentage of requests that complete by their deadlines
# TODO: Does this handle non-200s?
# Throughput - The mean number of successful requests per second
# Latency - the rount-trip resonse time (unit?) of successful requests at the p50, p90, p99, and p100 percetiles
# Add bash_libraries directory to path
__run_sh__base_path="$(dirname "$(realpath --logical "${BASH_SOURCE[0]}")")"
__run_sh__bash_libraries_relative_path="../bash_libraries"
__run_sh__bash_libraries_absolute_path=$(cd "$__run_sh__base_path" && cd "$__run_sh__bash_libraries_relative_path" && pwd)
export PATH="$__run_sh__bash_libraries_absolute_path:$PATH"
source csv_to_dat.sh || exit 1
source framework.sh || exit 1
# source generate_gnuplots.sh || exit 1
source get_result_count.sh || exit 1
source panic.sh || exit 1
source path_join.sh || exit 1
# Sends requests until the per-module perf window buffers are full
# This ensures that Sledge has accurate estimates of execution time
run_samples() {
if (($# != 1)); then
panic "invalid number of arguments \"$1\""
return 1
elif [[ -z "$1" ]]; then
panic "hostname \"$1\" was empty"
return 1
fi
local hostname="${1}"
# Scrape the perf window size from the source if possible
# TODO: Make a util function
local -r perf_window_path="$(path_join "$__run_sh__base_path" ../../include/perf_window.h)"
local -i perf_window_buffer_size
if ! perf_window_buffer_size=$(grep "#define PERF_WINDOW_BUFFER_SIZE" < "$perf_window_path" | cut -d\ -f3); then
printf "Failed to scrape PERF_WINDOW_BUFFER_SIZE from ../../include/perf_window.h\n"
printf "Defaulting to 16\n"
perf_window_buffer_size=16
fi
local -ir perf_window_buffer_size
printf "Running Samples: "
hey -n "$perf_window_buffer_size" -c "$perf_window_buffer_size" -cpus 3 -t 0 -o csv -m GET -d "40\n" "http://${hostname}:10040" 1> /dev/null 2> /dev/null || {
printf "[ERR]\n"
panic "fib40 samples failed"
return 1
}
hey -n "$perf_window_buffer_size" -c "$perf_window_buffer_size" -cpus 3 -t 0 -o csv -m GET -d "10\n" "http://${hostname}:100010" 1> /dev/null 2> /dev/null || {
printf "[ERR]\n"
panic "fib10 samples failed"
return 1
}
printf "[OK]\n"
return 0
}
# Execute the fib10 and fib40 experiments sequentially and concurrently
# $1 (hostname)
# $2 (results_directory) - a directory where we will store our results
run_experiments() {
if (($# != 2)); then
panic "invalid number of arguments \"$1\""
return 1
elif [[ -z "$1" ]]; then
panic "hostname \"$1\" was empty"
return 1
elif [[ ! -d "$2" ]]; then
panic "directory \"$2\" does not exist"
return 1
fi
local hostname="$1"
local results_directory="$2"
# The duration in seconds that we want the client to send requests
local -ir duration_sec=15
# The duration in seconds that the low priority task should run before the high priority task starts
local -ir offset=5
printf "Running Experiments\n"
# Run each separately
printf "\tfib40: "
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "40\n" "http://$hostname:10040" > "$results_directory/fib40.csv" 2> /dev/null || {
printf "[ERR]\n"
panic "fib40 failed"
return 1
}
get_result_count "$results_directory/fib40.csv" || {
printf "[ERR]\n"
panic "fib40 unexpectedly has zero requests"
return 1
}
printf "[OK]\n"
printf "\tfib10: "
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "10\n" "http://$hostname:10010" > "$results_directory/fib10.csv" 2> /dev/null || {
printf "[ERR]\n"
panic "fib10 failed"
return 1
}
get_result_count "$results_directory/fib10.csv" || {
printf "[ERR]\n"
panic "fib10 unexpectedly has zero requests"
return 1
}
printf "[OK]\n"
# Run concurrently
# The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
# This asynchronously trigger jobs and then wait on their pids
local fib40_con_PID
local fib10_con_PID
hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 100 -t 0 -o csv -m GET -d "40\n" "http://${hostname}:10040" > "$results_directory/fib40_con.csv" 2> /dev/null &
fib40_con_PID="$!"
sleep $offset
hey -z "${duration_sec}s" -cpus 2 -c 100 -t 0 -o csv -m GET -d "10\n" "http://${hostname}:10010" > "$results_directory/fib10_con.csv" 2> /dev/null &
fib10_con_PID="$!"
wait -f "$fib10_con_PID" || {
printf "\tfib10_con: [ERR]\n"
panic "failed to wait -f ${fib10_con_PID}"
return 1
}
get_result_count "$results_directory/fib10_con.csv" || {
printf "\tfib10_con: [ERR]\n"
panic "fib10_con has zero requests. This might be because fib40_con saturated the runtime"
return 1
}
printf "\tfib10_con: [OK]\n"
wait -f "$fib40_con_PID" || {
printf "\tfib40_con: [ERR]\n"
panic "failed to wait -f ${fib40_con_PID}"
return 1
}
get_result_count "$results_directory/fib40_con.csv" || {
printf "\tfib40_con: [ERR]\n"
panic "fib40_con has zero requests."
return 1
}
printf "\tfib40_con: [OK]\n"
return 0
}
# Process the experimental results and generate human-friendly results for success rate, throughput, and latency
process_results() {
if (($# != 1)); then
error_msg "invalid number of arguments ($#, expected 1)"
return 1
elif ! [[ -d "$1" ]]; then
error_msg "directory $1 does not exist"
return 1
fi
local -r results_directory="$1"
printf "Processing Results: "
# Write headers to CSVs
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
# The four types of results that we are capturing.
# fib10 and fib 40 are run sequentially.
# fib10_con and fib40_con are run concurrently
local -ar payloads=(fib10 fib10_con fib40 fib40_con)
# The deadlines for each of the workloads
# TODO: Scrape these from spec.json
local -Ar deadlines_ms=(
[fib10]=2
[fib40]=3000
)
for payload in "${payloads[@]}"; do
# Strip the _con suffix when getting the deadline
local -i deadline=${deadlines_ms[${payload/_con/}]}
# Calculate Success Rate for csv (percent of requests that return 200 within deadline)
awk -F, '
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f\n", (ok / (NR - 1) * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convert from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# We determine duration by looking at the timestamp of the last complete request
# TODO: Should this instead just use the client-side synthetic duration_sec value?
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
# Throughput is calculated as the mean number of successful requests per second
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
csv_to_dat "$results_directory/success.csv" "$results_directory/throughput.csv" "$results_directory/latency.csv"
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots "$results_directory" "$__run_sh__base_path" || {
# printf "[ERR]\n"
# panic "failed to generate gnuplots"
# }
printf "[OK]\n"
return 0
}
# Expected Symbol used by the framework
experiment_main() {
local -r target_hostname="$1"
local -r results_directory="$2"
run_samples "$target_hostname" || return 1
run_experiments "$target_hostname" "$results_directory" || return 1
process_results "$results_directory" || return 1
return 0
}
main "$@"

@ -1,65 +0,0 @@
#!/bin/bash
log_environment() {
if ! command -v git &> /dev/null; then
echo "git could not be found"
exit
fi
echo "*******"
echo "* Git *"
echo "*******"
git log | head -n 1 | cut -d' ' -f2
git status
echo ""
echo "************"
echo "* Makefile *"
echo "************"
cat ../../Makefile
echo ""
echo "**********"
echo "* Run.sh *"
echo "**********"
cat run.sh
echo ""
echo "************"
echo "* Hardware *"
echo "************"
lscpu
echo ""
echo "*************"
echo "* Execution *"
echo "*************"
}
kill_runtime() {
echo -n "Running Cleanup: "
pkill sledgert > /dev/null 2> /dev/null
pkill hey > /dev/null 2> /dev/null
echo "[DONE]"
}
generate_gnuplots() {
if ! command -v gnuplot &> /dev/null; then
echo "gnuplot could not be found"
exit
fi
# shellcheck disable=SC2154
if [ -z "$results_directory" ]; then
echo "results_directory is unset or empty"
exit
fi
# shellcheck disable=SC2154
if [ -z "$experiment_directory" ]; then
echo "experiment_directory is unset or empty"
exit
fi
cd "$results_directory" || exit
gnuplot ../../latency.gnuplot
gnuplot ../../success.gnuplot
gnuplot ../../throughput.gnuplot
cd "$experiment_directory" || exit
}

@ -20,16 +20,6 @@ _How does increasing levels of concurrent client requests affect tail latency, t
- `hey` (https://github.com/rakyll/hey) is available in your PATH
- You have compiled `sledgert` and the `empty.so` test workload
## To Execute
1. Run `./run.sh`
2. View the results in the newest timestamped directory in `./res`
## To Debug
1. Run `./debug.sh` in a tab
2. Run `./run.sh -d` in a second tab
## TODO
- Harden scripts to validate assumptions

@ -1,19 +0,0 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run $experiment_directory/spec.json" \
sledgert

@ -1,81 +1,144 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt
# Add bash_libraries directory to path
__run_sh__base_path="$(dirname "$(realpath --logical "${BASH_SOURCE[0]}")")"
__run_sh__bash_libraries_relative_path="../bash_libraries"
__run_sh__bash_libraries_absolute_path=$(cd "$__run_sh__base_path" && cd "$__run_sh__bash_libraries_relative_path" && pwd)
export PATH="$__run_sh__bash_libraries_absolute_path:$PATH"
source csv_to_dat.sh || exit 1
source framework.sh || exit 1
source generate_gnuplots.sh || exit 1
source get_result_count.sh || exit 1
source panic.sh || exit 1
source path_join.sh || exit 1
declare -gi iterations=10000
declare -ga concurrency=(1 20 40 60 80 100)
run_samples() {
if (($# != 1)); then
panic "invalid number of arguments \"$1\""
return 1
elif [[ -z "$1" ]]; then
panic "hostname \"$1\" was empty"
return 1
fi
local hostname="$1"
# Scrape the perf window size from the source if possible
# TODO: Make a util function
local -r perf_window_path="$(path_join "$__run_sh__base_path" ../../include/perf_window.h)"
local -i perf_window_buffer_size
if ! perf_window_buffer_size=$(grep "#define PERF_WINDOW_BUFFER_SIZE" < "$perf_window_path" | cut -d\ -f3); then
printf "Failed to scrape PERF_WINDOW_BUFFER_SIZE from ../../include/perf_window.h\n"
printf "Defaulting to 16\n"
perf_window_buffer_size=16
fi
local -ir perf_window_buffer_size
printf "Running Samples: "
hey -n "$perf_window_buffer_size" -c "$perf_window_buffer_size" -q 200 -cpus 3 -o csv -m GET "http://${hostname}:10000" 1> /dev/null 2> /dev/null || {
printf "[ERR]\n"
panic "samples failed"
return 1
}
printf "[OK]\n"
return 0
}
mkdir -p "$results_directory"
# Execute the experiments
# $1 (hostname)
# $2 (results_directory) - a directory where we will store our results
run_experiments() {
if (($# != 2)); then
panic "invalid number of arguments \"$1\""
return 1
elif [[ -z "$1" ]]; then
panic "hostname \"$1\" was empty"
return 1
elif [[ ! -d "$2" ]]; then
panic "directory \"$2\" does not exist"
return 1
fi
local hostname="$1"
local results_directory="$2"
# Execute the experiments
printf "Running Experiments:\n"
for conn in ${concurrency[*]}; do
printf "\t%d Concurrency: " "$conn"
hey -n "$iterations" -c "$conn" -cpus 2 -o csv -m GET "http://$hostname:10000" > "$results_directory/con$conn.csv" 2> /dev/null || {
printf "[ERR]\n"
panic "experiment failed"
return 1
}
get_result_count "$results_directory/con$conn.csv" || {
printf "[ERR]\n"
panic "con$conn.csv unexpectedly has zero requests"
return 1
}
printf "[OK]\n"
done
log_environment >> "$results_directory/$log"
return 0
}
# Start the runtime
if [ "$1" != "-d" ]; then
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >> "$results_directory/$log" 2>> "$results_directory/$log" &
sleep 1
else
echo "Running under gdb"
echo "Running under gdb" >> "$results_directory/$log"
fi
process_results() {
if (($# != 1)); then
panic "invalid number of arguments ($#, expected 1)"
return 1
elif ! [[ -d "$1" ]]; then
panic "directory $1 does not exist"
return 1
fi
iterations=10000
local -r results_directory="$1"
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
hey -n "$iterations" -c 3 -q 200 -o csv -m GET http://localhost:10000
sleep 5
echo "[DONE]"
printf "Processing Results: "
# Execute the experiments
concurrency=(1 20 40 60 80 100)
echo "Running Experiments"
for conn in ${concurrency[*]}; do
printf "\t%d Concurrency: " "$conn"
hey -n "$iterations" -c "$conn" -cpus 2 -o csv -m GET http://localhost:10000 > "$results_directory/con$conn.csv"
echo "[DONE]"
done
# Stop the runtime
if [ "$1" != "-d" ]; then
sleep 5
kill_runtime
fi
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Concurrency,Success_Rate\n" >> "$results_directory/success.csv"
printf "Concurrency,Throughput\n" >> "$results_directory/throughput.csv"
printf "Con,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
for conn in ${concurrency[*]}; do
# Calculate Success Rate for csv
awk -F, '
# Write headers to CSVs
printf "Concurrency,Success_Rate\n" >> "$results_directory/success.csv"
printf "Concurrency,Throughput\n" >> "$results_directory/throughput.csv"
printf "Con,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
for conn in ${concurrency[*]}; do
if [[ ! -f "$results_directory/con$conn.csv" ]]; then
printf "[ERR]\n"
panic "Missing $results_directory/con$conn.csv"
return 1
fi
# Calculate Success Rate for csv (percent of requests resulting in 200)
awk -F, '
$7 == 200 {ok++}
END{printf "'"$conn"',%3.5f\n", (ok / '"$iterations"' * 100)}
' < "$results_directory/con$conn.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/con$conn.csv" \
| sort -g > "$results_directory/con$conn-response.csv"
# Filter on 200s, convert from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/con$conn.csv" \
| sort -g > "$results_directory/con$conn-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/con$conn-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Number of 200s
oks=$(wc -l < "$results_directory/con$conn-response.csv")
((oks == 0)) && continue # If all errors, skip line
# We determine duration by looking at the timestamp of the last complete request
# TODO: Should this instead just use the client-side synthetic duration_sec value?
duration=$(tail -n1 "$results_directory/con$conn.csv" | cut -d, -f8)
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/con$conn.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%d,%f\n" "$conn" "$throughput" >> "$results_directory/throughput.csv"
# Throughput is calculated as the mean number of successful requests per second
throughput=$(echo "$oks/$duration" | bc)
printf "%d,%f\n" "$conn" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
@ -90,18 +153,33 @@ for conn in ${concurrency[*]}; do
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/con$conn-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
rm -rf "$results_directory/con$conn-response.csv"
done
# Delete scratch file used for sorting/counting
rm -rf "$results_directory/con$conn-response.csv"
done
# Transform csvs to dat files for gnuplot
csv_to_dat "$results_directory/success.csv" "$results_directory/throughput.csv" "$results_directory/latency.csv"
# Generate gnuplots
generate_gnuplots "$results_directory" "$__run_sh__base_path" || {
printf "[ERR]\n"
panic "failed to generate gnuplots"
}
printf "[OK]\n"
return 0
}
# Expected Symbol used by the framework
experiment_main() {
local -r target_hostname="$1"
local -r results_directory="$2"
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
run_samples "$target_hostname" || return 1
run_experiments "$target_hostname" "$results_directory" || return 1
process_results "$results_directory" || return 1
# Generate gnuplots
generate_gnuplots
return 0
}
# Cleanup, if requires
echo "[DONE]"
main "$@"

@ -1,14 +1,16 @@
{
"active": true,
"name": "empty",
"path": "empty_wasm.so",
"port": 10000,
"relative-deadline-us": 50000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1024,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain"
"active": true,
"name": "empty",
"path": "empty_wasm.so",
"port": 10000,
"expected-execution-us": 500,
"admissions-percentile": 70,
"relative-deadline-us": 50000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1024,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain"
}

@ -1,48 +0,0 @@
# Admissions Control
## Discussion of Implementation
The admissions control subsystem seeks to ensure that the system does not accept more work than it can execute while meeting the relative deadline defined in a module's JSON specification.
The system maintains an integral value expressing the capacity of the system as millionths of a worker core. This assumes that the runtime has "pinned" these workers to underlying processors and has no contention with other workloads.
The system maintains a second integral value expressing the total accepted work.
The module specification provides a relative deadline, an expected execution time, and a percentile target expressing the pXX latency that the admissions control system should use when making admissions decisions (tunable from 50% to 99%). Tuning this percentile expresses how conservative the system should be with regard to scheduling. Selecting a lower value, such as 50%, reserves less processor time and results in a higher likelihood that the relative deadline is not met. Selecting a higher value, such as 99%, reserves more processor time and provides a higher likelihood that that the relative deadline will be met. The provided expected execution time is assumed to match the percentile provided.
Dividing the expected execution time by the relative deadline yields the fraction of a worker needed to meet the deadline.
If the existing accepted workload plus the required work of this new workload is less than the system capacity, the workload is accepted, and the integral value expressing the total accepted work is increased. The resulting sandbox request is tagged with the fraction of a worker it was calculated to use, and when the request completes, the total accepted work is decreased by this amount.
If the existing accepted workload plus the required work of this new workload is greater than the system capacity, the request is rejected and the runtime sends the client an HTTP 503 response.
While the module specification provides an expected execution time, the system does not trust this value and only uses it in the absence of better information. Each sandbox is profiled as it runs through the system, and the end-to-end execution time of successful sandbox requests are added to a specialized performance window data structure that stores the last N execution times sorted in order of execution time. This structure optimizes for quick lookups of a specific ppXX percentile
Once data is seeded into this data structure, the initial execution estimate provided in the module specification is ignored, and the pXX target is instead used to lookup the actual pXX performance metric.
Future Work:
Currently, the scheduler takes no actual when an executing sandbox exceeds its pXX execution time or deadline.
In the case of the pXX workload, this means that a workload configured to target p50 during admissions control decisions with exceptionally poor p99 performance causes system-wide overheads that can cause other systems to miss their deadlines.
Even worse, when executing beyond the relative deadline, the request might be too stale for the client.
In the absolute worst case, one can imagine a client workload caught in an infinite loop that causes permanent head of line blocking because its deadline is earlier than the current time, such that nothing can possibly preempt the executing workload.
## Question
- Does Admissions Control guarantee that deadlines are met?
## Independent Variable
Deadline is disabled versus deadline is enabled
## Invariants
Single workload
Use FIFO policy
## Dependent Variables
End-to-end execution time of a workload measured from a client measured relative to its deadline

@ -1,105 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
host=localhost
timestamp=$(date +%s)
experiment_directory=$(pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt
mkdir -p "$results_directory"
log_environment >> "$results_directory/$log"
inputs=(40 10)
duration_sec=60
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for input in ${inputs[*]}; do
hey -n 16 -c 4 -t 0 -o csv -m GET -d "$input\n" http://${host}:$((10000 + input))
done
echo "[DONE]"
sleep 5
echo "Running Experiments"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -n 1000 -c 1000 -cpus 6 -t 0 -o csv -m GET -d "40\n" http://${host}:10040 > "$results_directory/fib40-con.csv"
sleep $offset
hey -n 25000 -c 1000000 -t 0 -o csv -m GET -d "10\n" http://${host}:10010 > "$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset + 45))
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
deadlines_ms=(20 20000)
# durations_s=(60 70)
payloads=(fib10-con fib40-con)
for ((i = 1; i < 2; i++)); do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
# duration=${durations_s[$i]}
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
# throughput=$(echo "$oks/$duration" | bc)
# printf "%s,%f\n" "$payload" "$throughput" >>"$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"

@ -1,109 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
host=192.168.1.13
# host=localhost
timestamp=$(date +%s)
experiment_directory=$(pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt
mkdir -p "$results_directory"
log_environment >> "$results_directory/$log"
inputs=(40 10)
duration_sec=30
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for input in ${inputs[*]}; do
hey -n 16 -c 4 -t 0 -o csv -m GET -d "$input\n" http://${host}:$((10000 + input))
done
echo "[DONE]"
sleep 5
echo "Running Experiments"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -z $((duration_sec + 2 * offset))s -cpus 3 -c 200 -t 0 -o csv -m GET -d "40\n" http://${host}:10040 > "$results_directory/fib40-con.csv" &
sleep $offset
hey -z ${duration_sec}s -cpus 3 -c 200 -t 0 -o csv -m GET -d "10\n" http://${host}:10010 > "$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset + 15))
sleep 30
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
deadlines_ms=(20 20000)
payloads=(fib10-con fib40-con)
durations_s=(30 40)
for ((i = 0; i < 2; i++)); do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
duration=${durations_s[$i]}
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 {denom++}
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / denom * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"

@ -1,107 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
host=192.168.1.13
# host=localhost
timestamp=$(date +%s)
experiment_directory=$(pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt
mkdir -p "$results_directory"
log_environment >> "$results_directory/$log"
inputs=(10)
duration_sec=30
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
hey -n 16 -c 4 -t 0 -o csv -m GET -d "10\n" http://${host}:10010
echo "[DONE]"
sleep 5
echo "Running Experiments"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
# hey -z $((duration_sec + 2 * offset))s -cpus 3 -c 200 -t 0 -o csv -m GET -d "40\n" http://${host}:10040 >"$results_directory/fib40-con.csv" &
# sleep $offset
hey -z ${duration_sec}s -cpus 6 -c 400 -t 0 -o csv -m GET -d "10\n" http://${host}:10010 > "$results_directory/fib10-con.csv"
# sleep $((duration_sec + offset + 15))
# sleep 30
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
deadlines_ms=(20 20000)
payloads=(fib10-con fib40-con)
durations_s=(30 40)
for ((i = 0; i < 1; i++)); do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
duration=${durations_s[$i]}
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 {denom++}
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / denom * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"

@ -1,19 +0,0 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run $experiment_directory/spec.json" \
sledgert

@ -1,77 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
experiment_directory=$(pwd)
results_directory="$experiment_directory/res/1606615320-fifo-adm"
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
deadlines_ms=(20 20000)
payloads=(fib10-con fib40-con)
for ((i = 0; i < 2; i++)); do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"

@ -1,111 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
host=192.168.1.13
# host=localhost
# timestamp=$(date +%s)
timestamp=1606697099
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt
mkdir -p "$results_directory"
log_environment >> "$results_directory/$log"
inputs=(40 10)
duration_sec=60
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
# echo -n "Running Samples: "
# for input in ${inputs[*]}; do
# hey -n 16 -c 4 -t 0 -o csv -m GET -d "$input\n" http://${host}:$((10000 + input))
# done
# echo "[DONE]"
# sleep 5
# echo "Running Experiments"
# # Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
# hey -z $((duration_sec + 2 * offset))s -cpus 3 -c 200 -t 0 -o csv -m GET -d "40\n" http://${host}:10040 >"$results_directory/fib40-con.csv" &
# sleep $offset
# hey -z ${duration_sec}s -cpus 3 -c 200 -t 0 -o csv -m GET -d "10\n" http://${host}:10010 >"$results_directory/fib10-con.csv" &
# sleep $((duration_sec + offset + 15))
# sleep 30
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
deadlines_ms=(20 20000)
payloads=(fib10-con fib40-con)
durations_s=(60 70)
for ((i = 0; i < 2; i++)); do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
duration=${durations_s[$i]}
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 {denom++}
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / denom * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
# duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"

@ -1,14 +0,0 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=EDF perf record -g -s sledgert "$experiment_directory/spec.json"

@ -1,124 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
schedulers=(EDF FIFO)
for scheduler in ${schedulers[*]}; do
results_directory="$experiment_directory/res/$timestamp/$scheduler"
log=log.txt
mkdir -p "$results_directory"
log_environment >> "$results_directory/$log"
# Start the runtime
if [ "$1" != "-d" ]; then
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >> "$results_directory/$log" 2>> "$results_directory/$log" &
sleep 1
else
echo "Running under gdb"
echo "Running under gdb" >> "$results_directory/$log"
fi
inputs=(40 10)
duration_sec=15
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for input in ${inputs[*]}; do
hey -z ${duration_sec}s -cpus 3 -t 0 -o csv -m GET -d "$input\n" http://localhost:$((10000 + input))
done
echo "[DONE]"
sleep 5
echo "Running Experiments"
# Run each separately
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 > "$results_directory/fib40.csv"
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 > "$results_directory/fib10.csv"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 > "$results_directory/fib40-con.csv" &
sleep $offset
hey -z ${duration_sec}s -cpus 2 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 > "$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset + 15))
# Stop the runtime if not in debug mode
[ "$1" != "-d" ] && kill_runtime
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
deadlines_ms=(2 2 3000 3000)
payloads=(fib10 fib10-con fib40 fib40-con)
for ((i = 0; i < 4; i++)); do
# for payload in ${payloads[*]}; do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"
done

@ -1,124 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
schedulers=(EDF FIFO)
for scheduler in ${schedulers[*]}; do
results_directory="$experiment_directory/res/$timestamp/$scheduler"
log=log.txt
mkdir -p "$results_directory"
log_environment >> "$results_directory/$log"
# Start the runtime
if [ "$1" != "-d" ]; then
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >> "$results_directory/$log" 2>> "$results_directory/$log" &
sleep 1
else
echo "Running under gdb"
echo "Running under gdb" >> "$results_directory/$log"
fi
inputs=(40 10)
duration_sec=15
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for input in ${inputs[*]}; do
hey -z ${duration_sec}s -cpus 3 -t 0 -o csv -m GET -d "$input\n" http://localhost:$((10000 + input))
done
echo "[DONE]"
sleep 5
echo "Running Experiments"
# Run each separately
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 > "$results_directory/fib40.csv"
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 > "$results_directory/fib10.csv"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 > "$results_directory/fib40-con.csv" &
sleep $offset
hey -z ${duration_sec}s -cpus 2 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 > "$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset + 15))
# Stop the runtime if not in debug mode
[ "$1" != "-d" ] && kill_runtime
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
deadlines_ms=(2 2 3000 3000)
payloads=(fib10 fib10-con fib40 fib40-con)
for ((i = 0; i < 4; i++)); do
# for payload in ${payloads[*]}; do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
NR==p90 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
NR==p99 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
NR==p100 {printf "%1.4f%\n", $0 / '"$deadline"' * 100}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"
done

@ -1,5 +0,0 @@
hey -n 200 -c 200 -t 0 -m GET -d "40\n" http://localhost:10040
hey -n 500 -c 500 -t 0 -m GET -d "10\n" http://localhost:10010

@ -1,8 +0,0 @@
#!/bin/bash
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
# Start the runtime
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json"

@ -1,12 +1,18 @@
#!/bin/bash
# Generates payloads of 1KB, 10KB, 100KB, 1MB
for size in 1024 $((1024 * 10)) $((1024 * 100)) $((1024 * 1024)); do
rm -rf $size.txt
i=0
echo -n "Generating $size:"
while ((i < size)); do
printf 'a' >> $size.txt
((i++))
done
echo "[DONE]"
# If the file exists, but is not the right size, wipe it
if [[ -f "$size.txt" ]] && (("$(wc -c "$size.txt" | cut -d\ -f1)" != size)); then
rm -rf "$size.txt"
fi
# Regenerate the file if missing
if [[ ! -f "$size.txt" ]]; then
echo -n "Generating $size: "
for ((i = 0; i < size; i++)); do
printf 'a' >> $size.txt
done
echo "[OK]"
fi
done

@ -1,19 +0,0 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run $experiment_directory/spec.json" \
sledgert

@ -1,97 +1,138 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influences
# - latency
# - throughput
# - success/failure rate
# Add bash_libraries directory to path
__run_sh__base_path="$(dirname "$(realpath --logical "${BASH_SOURCE[0]}")")"
__run_sh__bash_libraries_relative_path="../bash_libraries"
__run_sh__bash_libraries_absolute_path=$(cd "$__run_sh__base_path" && cd "$__run_sh__bash_libraries_relative_path" && pwd)
export PATH="$__run_sh__bash_libraries_absolute_path:$PATH"
# Source libraries from bash_libraries directory
source "path_join.sh" || exit 1
source "framework.sh" || exit 1
source "get_result_count.sh" || exit 1
source "generate_gnuplots.sh" || exit 1
# Experiment Globals and Setups
declare -ar payloads=(1024 10240 102400 1048576)
declare -Ar ports=(
[1024]=10000
[10240]=10001
[102400]=10002
[1048576]=10003
)
declare -ri iterations=10000
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
# If the one of the expected body files doesn't exist, trigger the generation script.
cd "$__run_sh__base_path/body" && ./generate.sh && cd "$OLDPWD" || exit
run_samples() {
local hostname="$1"
# Scrape the perf window size from the source if possible
local -r perf_window_path="$(path_join "$__run_sh__base_path" ../../include/perf_window.h)"
local -i perf_window_buffer_size
if ! perf_window_buffer_size=$(grep "#define PERF_WINDOW_BUFFER_SIZE" < "$perf_window_path" | cut -d\ -f3); then
printf "Failed to scrape PERF_WINDOW_BUFFER_SIZE from ../../include/perf_window.h\n"
printf "Defaulting to 16\n"
perf_window_buffer_size=16
fi
local -ir perf_window_buffer_size
# Execute workloads long enough for runtime to learn excepted execution time
printf "Running Samples:\n"
for payload in "${payloads[@]}"; do
printf "\t%d Payload: " "$payload"
hey -n "$perf_window_buffer_size" -c "$perf_window_buffer_size" -q 200 -o csv -m GET -D "$__run_sh__base_path/body/$payload.txt" "http://$hostname:${ports["$payload"]}" 1> /dev/null 2> /dev/null || {
printf "[ERR]\n"
panic "samples failed"
return 1
}
printf "[OK]\n"
done
return 0
}
run_experiments() {
if (($# != 2)); then
panic "invalid number of arguments \"$1\""
return 1
elif [[ ! -d "$2" ]]; then
panic "directory \"$2\" does not exist"
return 1
fi
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt
local hostname="$1"
local results_directory="$2"
# Execute the experiments
printf "Running Experiments:\n"
for payload in "${payloads[@]}"; do
printf "\t%d Payload: " "$payload"
hey -n "$iterations" -c 1 -cpus 2 -o csv -m GET -D "$__run_sh__base_path/body/$payload.txt" "http://$hostname:${ports["$payload"]}" > "$results_directory/$payload.csv" 2> /dev/null || {
printf "[ERR]\n"
panic "$payload experiment failed"
return 1
}
get_result_count "$results_directory/$payload.csv" || {
printf "[ERR]\n"
panic "$payload.csv unexpectedly has zero requests"
return 1
}
printf "[OK]\n"
done
return 0
}
process_results() {
if (($# != 1)); then
panic "invalid number of arguments ($#, expected 1)"
return 1
elif ! [[ -d "$1" ]]; then
panic "directory $1 does not exist"
return 1
fi
mkdir -p "$results_directory"
local -r results_directory="$1"
log_environment >> "$results_directory/$log"
printf "Processing Results: "
# Start the runtime
if [ "$1" != "-d" ]; then
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >> "$results_directory/$log" 2>> "$results_directory/$log" &
sleep 1
else
echo "Running under gdb"
echo "Running under gdb" >> "$results_directory/$log"
fi
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
payloads=(1024 10240 102400 1048576)
ports=(10000 10001 10002 10003)
iterations=10000
for payload in ${payloads[*]}; do
# Calculate Success Rate for csv
awk -F, '
$7 == 200 {ok++}
END{printf "'"$payload"',%3.5f\n", (ok / '"$iterations"' * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# If the one of the expected body files doesn't exist, trigger the generation script.
for payload in ${payloads[*]}; do
if test -f "$experiment_directory/body/$payload.txt"; then
continue
else
echo "Generating Payloads: "
{
cd "$experiment_directory/body" && ./generate.sh
}
break
fi
done
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
hey -n "$iterations" -c 3 -q 200 -o csv -m GET -D "$experiment_directory/body/1024.txt" http://localhost:10000
hey -n "$iterations" -c 3 -q 200 -o csv -m GET -D "$experiment_directory/body/10240.txt" http://localhost:10001
hey -n "$iterations" -c 3 -q 200 -o csv -m GET -D "$experiment_directory/body/102400.txt" http://localhost:10002
hey -n "$iterations" -c 3 -q 200 -o csv -m GET -D "$experiment_directory/body/1048576.txt" http://localhost:10003
sleep 5
echo "[DONE]"
# Execute the experiments
echo "Running Experiments"
for i in {0..3}; do
printf "\t%d Payload: " "${payloads[$i]}"
hey -n "$iterations" -c 1 -cpus 2 -o csv -m GET -D "$experiment_directory/body/${payloads[$i]}.txt" http://localhost:"${ports[$i]}" > "$results_directory/${payloads[$i]}.csv"
echo "[DONE]"
done
# Stop the runtime
if [ "$1" != "-d" ]; then
sleep 5
kill_runtime
fi
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
for payload in ${payloads[*]}; do
# Calculate Success Rate for csv
awk -F, '
$7 == 200 {ok++}
END{printf "'"$payload"',%3.5f\n", (ok / '"$iterations"' * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%d,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# We determine duration by looking at the timestamp of the last complete request
# TODO: Should this instead just use the client-side synthetic duration_sec value?
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
# Throughput is calculated as the mean number of successful requests per second
throughput=$(echo "$oks/$duration" | bc)
printf "%d,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
@ -106,18 +147,37 @@ for payload in ${payloads[*]}; do
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
rm -rf "$results_directory/$payload-response.csv"
done
# Delete scratch file used for sorting/counting
rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
printf "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots
generate_gnuplots "$results_directory" "$__run_sh__base_path" || {
printf "[ERR]\n"
panic "failed to generate gnuplots"
}
printf "[OK]\n"
return 0
}
# Expected Symbol used by the framework
experiment_main() {
local -r target_hostname="$1"
local -r results_directory="$2"
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
run_samples "$target_hostname" || return 1
run_experiments "$target_hostname" "$results_directory" || return 1
process_results "$results_directory" || return 1
# Generate gnuplots
generate_gnuplots
return 0
}
# Cleanup, if requires
echo "[DONE]"
# Delegating to main provided by framework
main "$@"

@ -1,3 +0,0 @@
#!/bin/bash
hey -n 100 -c 3 -q 100 -m GET -D "./body/1024.txt" http://localhost:10000

@ -1,28 +0,0 @@
# Preemption
## Question
- How do mixed criticality workloads perform under the Sledge scheduler policies?
- How does the latency of a high criticality workload that triggers preemption on a system under load compare to being the only workload on the system?
- What is the slowdown on the low priority workload?
- How does this affect aggregate throughput?
## Setup
The system is configured with admission control disabled.
The driver script drives a bimodal distribution of long-running low-priority and short-running high-priority workloads
Relative Deadlines are tuned such that the scheduler should always preempt the low-priority workload for the high-priority workload.
A driver script runs the two workloads separately as a baseline
It then runs them concurrently, starting the low-priority long-running workload first such that the system begins execution and accumulates requests in the data structures. The high-priority short-running workload then begins.
## Independent Variable
The Scheduling Policy: EDF versus FIFO
## Dependent Variables
Latency of high priority workload

@ -1,18 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
results_directory="$experiment_directory/res/$timestamp/$scheduler"
log=log.txt
mkdir -p "$results_directory"
log_environment >> "$results_directory/$log"
# Start the runtime
PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" | tee -a "$results_directory/$log"

@ -1,111 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Modified to target a remote host
timestamp=$(date +%s)
experiment_directory=$(pwd)
host=192.168.1.13
results_directory="$experiment_directory/res/$timestamp"
mkdir -p "$results_directory"
# Start the runtime
inputs=(40 10)
duration_sec=30
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for input in ${inputs[*]}; do
hey -n 45 -c 4 -t 0 -o csv -m GET -d "$input\n" http://"$host":$((10000 + input))
done
echo "[DONE]"
sleep 30
echo "Running Experiments"
# Run each separately
hey -z ${duration_sec}s -cpus 6 -c 100 -t 0 -o csv -m GET -d "10\n" "http://$host:10010" > "$results_directory/fib10.csv"
echo "fib(10) Complete"
sleep 60
hey -z ${duration_sec}s -cpus 6 -c 100 -t 0 -o csv -m GET -d "40\n" "http://$host:10040" > "$results_directory/fib40.csv"
echo "fib(40) Complete"
sleep 120
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -z $((duration_sec + 2 * offset))s -cpus 3 -c 100 -t 0 -o csv -m GET -d "40\n" "http://$host:10040" > "$results_directory/fib40-con.csv" &
sleep $offset
hey -z ${duration_sec}s -cpus 3 -c 100 -t 0 -o csv -m GET -d "10\n" "http://$host:10010" > "$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset + 15))
echo "fib(10) & fib(40) Complete"
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
durations_s=(15 15 15 25)
payloads=(fib10 fib10-con fib40 fib40-con)
for payload in ${payloads[*]}; do
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
duration=${durations_s[$i]}
# Calculate Success Rate for csv
awk -F, '
$7 == 200 {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
# duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if required
echo "[DONE]"

@ -1,20 +0,0 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
export SLEDGE_SCHEDULER="EDF"
gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="handle SIGPIPE nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_directory" \
--eval-command="run $experiment_directory/spec.json" \
sledgert

@ -1,81 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Modified to target a remote host
timestamp=1606608313-FIFO
experiment_directory=$(pwd)
results_directory="$experiment_directory/res/$timestamp"
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p998,p999,p100\n" >> "$results_directory/latency.csv"
durations_s=(15 15 15 25)
payloads=(fib10 fib10-con fib40 fib40-con)
for payload in ${payloads[*]}; do
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
duration=${durations_s[$i]}
# Calculate Success Rate for csv
awk -F, '
$7 == 200 {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
# duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p998 = int('"$oks"' * 0.998)
p999 = int('"$oks"' * 0.999)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p998 {printf "%1.4f,", $0}
NR==p999 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if required
echo "[DONE]"

@ -1,14 +0,0 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
experiment_directory=$(pwd)
project_directory=$(cd ../.. && pwd)
binary_directory=$(cd "$project_directory"/bin && pwd)
export LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH"
export PATH="$binary_directory:$PATH"
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=EDF perf record -g -s sledgert "$experiment_directory/spec.json"

@ -1,134 +0,0 @@
#!/bin/bash
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
source ../common.sh
# Validate dependencies
declare -a -r dependencies=(awk hey wc)
for dependency in "${dependencies[@]}"; do
if ! command -v "$dependency" &> /dev/null; then
echo "$dependency could not be found"
exit
fi
done
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
schedulers=(EDF FIFO)
for scheduler in ${schedulers[*]}; do
results_directory="$experiment_directory/res/$timestamp/$scheduler"
log=log.txt
mkdir -p "$results_directory"
log_environment >> "$results_directory/$log"
# Start the runtime
if [ "$1" != "-d" ]; then
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >> "$results_directory/$log" 2>> "$results_directory/$log" &
sleep 1
else
echo "Running under gdb"
echo "Running under gdb" >> "$results_directory/$log"
fi
inputs=(40 10)
duration_sec=15
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for input in ${inputs[*]}; do
hey -z ${duration_sec}s -cpus 3 -t 0 -o csv -m GET -d "$input\n" http://localhost:$((10000 + input))
done
echo "[DONE]"
sleep 5
echo "Running Experiments"
# Run each separately
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 > "$results_directory/fib40.csv"
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 > "$results_directory/fib10.csv"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 > "$results_directory/fib40-con.csv" &
sleep $offset
hey -z ${duration_sec}s -cpus 2 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 > "$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset + 15))
# Stop the runtime if not in debug mode
[ "$1" != "-d" ] && kill_runtime
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
deadlines_ms=(2 2 3000 3000)
payloads=(fib10 fib10-con fib40 fib40-con)
for ((i = 0; i < 4; i++)); do
# for payload in ${payloads[*]}; do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f\n", (ok / (NR - 1) * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f,", $0}
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"
done

@ -1,124 +0,0 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
schedulers=(EDF FIFO)
for scheduler in ${schedulers[*]}; do
results_directory="$experiment_directory/res/$timestamp/$scheduler"
log=log.txt
mkdir -p "$results_directory"
log_environment >> "$results_directory/$log"
# Start the runtime
if [ "$1" != "-d" ]; then
SLEDGE_NWORKERS=5 SLEDGE_SCHEDULER=$scheduler PATH="$binary_directory:$PATH" LD_LIBRARY_PATH="$binary_directory:$LD_LIBRARY_PATH" sledgert "$experiment_directory/spec.json" >> "$results_directory/$log" 2>> "$results_directory/$log" &
sleep 1
else
echo "Running under gdb"
echo "Running under gdb" >> "$results_directory/$log"
fi
inputs=(40 10)
duration_sec=15
offset=5
# Execute workloads long enough for runtime to learn excepted execution time
echo -n "Running Samples: "
for input in ${inputs[*]}; do
hey -z ${duration_sec}s -cpus 3 -t 0 -o csv -m GET -d "$input\n" http://localhost:$((10000 + input))
done
echo "[DONE]"
sleep 5
echo "Running Experiments"
# Run each separately
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 > "$results_directory/fib40.csv"
hey -z ${duration_sec}s -cpus 4 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 > "$results_directory/fib10.csv"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -z $((duration_sec + 2 * offset))s -cpus 2 -c 100 -t 0 -o csv -m GET -d "40\n" http://localhost:10040 > "$results_directory/fib40-con.csv" &
sleep $offset
hey -z ${duration_sec}s -cpus 2 -c 100 -t 0 -o csv -m GET -d "10\n" http://localhost:10010 > "$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset + 15))
# Stop the runtime if not in debug mode
[ "$1" != "-d" ] && kill_runtime
# Generate *.csv and *.dat results
echo -n "Parsing Results: "
printf "Payload,Success_Rate\n" >> "$results_directory/success.csv"
printf "Payload,Throughput\n" >> "$results_directory/throughput.csv"
printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv"
deadlines_ms=(2 2 3000 3000)
payloads=(fib10 fib10-con fib40 fib40-con)
for ((i = 0; i < 4; i++)); do
# for payload in ${payloads[*]}; do
payload=${payloads[$i]}
deadline=${deadlines_ms[$i]}
# Get Number of Requests
requests=$(($(wc -l < "$results_directory/$payload.csv") - 1))
((requests == 0)) && continue
# Calculate Success Rate for csv
awk -F, '
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
| sort -g > "$results_directory/$payload-response.csv"
# Get Number of 200s
oks=$(wc -l < "$results_directory/$payload-response.csv")
((oks == 0)) && continue # If all errors, skip line
# Get Latest Timestamp
duration=$(tail -n1 "$results_directory/$payload.csv" | cut -d, -f8)
throughput=$(echo "$oks/$duration" | bc)
printf "%s,%f\n" "$payload" "$throughput" >> "$results_directory/throughput.csv"
# Generate Latency Data for csv
awk '
BEGIN {
sum = 0
p50 = int('"$oks"' * 0.5)
p90 = int('"$oks"' * 0.9)
p99 = int('"$oks"' * 0.99)
p100 = '"$oks"'
printf "'"$payload"',"
}
NR==p50 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
NR==p90 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
NR==p99 {printf "%1.4f%,", $0 / '"$deadline"' * 100}
NR==p100 {printf "%1.4f%\n", $0 / '"$deadline"' * 100}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"
done
# Transform csvs to dat files for gnuplot
for file in success latency throughput; do
echo -n "#" > "$results_directory/$file.dat"
tr ',' ' ' < "$results_directory/$file.csv" | column -t >> "$results_directory/$file.dat"
done
# Generate gnuplots. Commented out because we don't have *.gnuplots defined
# generate_gnuplots
# Cleanup, if requires
echo "[DONE]"
done

@ -1,5 +0,0 @@
hey -n 200 -c 200 -t 0 -m GET -d "40\n" http://localhost:10040
hey -n 500 -c 500 -t 0 -m GET -d "10\n" http://localhost:10010

@ -1,30 +0,0 @@
{
"active": true,
"name": "fibonacci_10",
"path": "fibonacci_wasm.so",
"port": 10010,
"expected-execution-us": 600,
"relative-deadline-us": 2000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1024,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain"
},
{
"active": true,
"name": "fibonacci_40",
"path": "fibonacci_wasm.so",
"port": 10040,
"expected-execution-us": 550000,
"relative-deadline-us": 300000000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1024,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain"
}
Loading…
Cancel
Save