Enhanced Bash scripts (#384)

* bash enhancements
added depth_to_xyz experiment

* bash enhancements
added depth_to_xyz experiment

* updated awsm repo

* remove mtdbf env file for now
master
Emil 12 months ago committed by GitHub
parent de22264f4d
commit 4ae8b02413
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -1,24 +1,23 @@
{
"configurations": [
{
"name": "Linux",
"intelliSenseMode": "clang-x64",
"includePath": [
"/usr/include/",
"${workspaceFolder}/runtime/include/",
"${workspaceFolder}/runtime/thirdparty/ck/include/",
"${workspaceFolder}/runtime/thirdparty/http-parser/",
"${workspaceFolder}/runtime/thirdparty/jsmn/",
"${workspaceFolder}/awsm/runtime/libc/wasi/include/",
"${workspaceFolder}/libsledge/include"
],
"defines": [
"x86_64",
"_GNU_SOURCE"
],
"cStandard": "c17",
"compilerPath": "/usr/bin/clang"
}
],
"version": 4
"configurations": [
{
"name": "Linux",
"intelliSenseMode": "clang-x64",
"includePath": [
"/usr/include/",
"${workspaceFolder}/runtime/include/",
"${workspaceFolder}/runtime/thirdparty/ck/include/",
"${workspaceFolder}/runtime/thirdparty/http-parser/",
"${workspaceFolder}/runtime/thirdparty/jsmn/",
"${workspaceFolder}/awsm/runtime/libc/wasi/include/",
"${workspaceFolder}/libsledge/include"
],
"defines": [
"x86_64",
"_GNU_SOURCE"
],
"cStandard": "c17"
}
],
"version": 4
}

@ -126,8 +126,25 @@
"local_cleanup_queue.h": "c",
"sandbox_state_transition.h": "c",
"http_session_perf_log.h": "c",
"perf_window.h": "c",
"global_request_scheduler_deque.h": "c"
"traffic_control.h": "c",
"memory_resource": "c",
"memory": "c",
"istream": "c",
"ostream": "c",
"sstream": "c",
"streambuf": "c",
"sandbox_perf_log.h": "c",
"global_request_scheduler_deque.h": "c",
"message.h": "c",
"dbf.h": "c",
"dbf_generic.h": "c",
"tenant_functions.h": "c",
"thread": "c",
"limits": "c",
"algorithm": "c",
"stdio.h": "c",
"get_time.h": "c",
"unistd.h": "c"
},
"files.exclude": {
"**/.git": true,
@ -189,5 +206,6 @@
"TKILL",
"WASI"
],
"C_Cpp.errorSquiggles": "Enabled"
"C_Cpp.errorSquiggles": "Enabled",
"C_Cpp.default.compilerPath": "/usr/bin/clang"
}

@ -1 +1 @@
Subproject commit f0b35e756395f79b06be8dd2660eecac94506e94
Subproject commit e2ba697861201f2aaca37460842ab5c34c8d1716

1
tests/.gitignore vendored

@ -1,4 +1,5 @@
res
res-*
perf.data
perf.data.old
samples

@ -3,29 +3,20 @@
if [ -n "$__experiment_server_globals_sh__" ]; then return; fi
__experiment_server_globals_sh__=$(date)
# App WASM so files:
declare -r FIBONACCI="fibonacci.wasm.so"
declare -r EKF="gps_ekf.wasm.so"
declare -r CIFAR10="cifar10.wasm.so"
declare -r GOCR="gocr.wasm.so"
declare -r LPD="license_plate_detection.wasm.so"
declare -r RESIZE="resize_image.wasm.so"
declare -r CNN="cnn_face_detection.wasm.so"
# The global configs for the scripts
declare -gr SERVER_LOG_FILE="perf.log"
declare -gr SERVER_HTTP_LOG_FILE="http_perf.log"
declare -gr HEY_OPTS="-disable-compression -disable-keepalive -disable-redirects"
# Globals to fill during run_init in run.sh, to use in base and generate_spec
declare -A ports=()
declare -A repl_periods=()
declare -A max_budgets=()
declare -A wasm_paths=()
declare -A expected_execs=()
declare -A deadlines=()
declare -A resp_content_types=()
declare -A arg_opts_hey=()
declare -A arg_opts_lt=()
declare -A args=()
declare -A concurrencies=()
declare -A rpss=()
declare -a workloads=()
declare -A workload_tids=()
declare -A workload_deadlines=()
declare -A workload_vars=()
# Sandbox Perf Log Globals:
declare -ga SANDBOX_METRICS=(total queued uninitialized allocated initialized runnable interrupted preempted running_sys running_user asleep returned complete error)
declare -gA SANDBOX_METRICS_FIELDS=(
@ -48,6 +39,7 @@ declare -gr SANDBOX_TENANT_NAME_FIELD=2
declare -gr SANDBOX_ROUTE_FIELD=3
declare -gr SANDBOX_CPU_FREQ_FIELD=20
declare -gr SANDBOX_RESPONSE_CODE_FIELD=21
declare -gr SANDBOX_GUARANTEE_TYPE_FIELD=22
# HTTP Session Perf Log Globals:
declare -ga HTTP_METRICS=(http_receive http_sent http_total)
@ -59,110 +51,3 @@ declare -gA HTTP_METRICS_FIELDS=(
declare -gr HTTP_TENANT_NAME_FIELD=1
declare -gr HTTP_ROUTE_FIELD=2
declare -gr HTTP_CPU_FREQ_FIELD=9
assert_run_experiments_args() {
if (($# != 3)); then
panic "invalid number of arguments \"$#\""
return 1
elif [[ -z "$1" ]]; then
panic "hostname \"$1\" was empty"
return 1
elif [[ ! -d "$2" ]]; then
panic "directory \"$2\" does not exist"
return 1
elif [[ -z "$3" ]]; then
panic "load gen \"$3\" was empty"
return 1
fi
}
assert_process_client_results_args() {
if (($# != 1)); then
error_msg "invalid number of arguments ($#, expected 1)"
return 1
elif ! [[ -d "$1" ]]; then
error_msg "directory $1 does not exist"
return 1
fi
}
assert_process_server_results_args() {
if (($# != 1)); then
panic "invalid number of arguments \"$#\""
return 1
elif [[ ! -d "$1" ]]; then
panic "directory \"$1\" does not exist"
return 1
fi
}
load_value() {
local result=$1
if [ "$result" = "?" ]; then
result=$2
fi
echo "$result"
}
run_init() {
for var in "${VARYING[@]}"; do
for t_idx in "${!TENANT_IDS[@]}"; do
local tenant_id=${TENANT_IDS[$t_idx]}
local tenant=$(printf "%s-%03d" "$tenant_id" "$var")
local port=$((INIT_PORTS[t_idx]+var))
local repl_period=$(load_value ${MTDS_REPL_PERIODS_us[$t_idx]} $var)
local budget=$(load_value ${MTDS_MAX_BUDGETS_us[$t_idx]} $var)
# TENANTS+=("$tenant")
ports+=([$tenant]=$port)
repl_periods+=([$tenant]=$repl_period)
max_budgets+=([$tenant]=$budget)
local t_routes r_expected_execs r_deadlines r_arg_opts_hey r_arg_opts_lt r_args r_loads
IFS=' ' read -r -a t_routes <<< "${ROUTES[$t_idx]}"
IFS=' ' read -r -a r_wasm_paths <<< "${WASM_PATHS[$t_idx]}"
IFS=' ' read -r -a r_expected_execs <<< "${EXPECTED_EXEC_TIMES_us[$t_idx]}"
IFS=' ' read -r -a r_deadlines <<< "${DEADLINES_us[$t_idx]}"
IFS=' ' read -r -a r_resp_content_types <<< "${RESP_CONTENT_TYPES[$t_idx]}"
IFS=' ' read -r -a r_arg_opts_hey <<< "${ARG_OPTS_HEY[$t_idx]}"
IFS=' ' read -r -a r_arg_opts_lt <<< "${ARG_OPTS_LT[$t_idx]}"
IFS=' ' read -r -a r_args <<< "${ARGS[$t_idx]}"
IFS=' ' read -r -a r_loads <<< "${LOADS[$t_idx]}"
for r_idx in "${!t_routes[@]}"; do
local route=${t_routes[$r_idx]}
local wasm_path=${r_wasm_paths[$r_idx]}
local expected=${r_expected_execs[$r_idx]}
local deadline=${r_deadlines[$r_idx]}
local resp_content_type=${r_resp_content_types[$r_idx]}
local arg_opt_hey=${r_arg_opts_hey[$r_idx]}
local arg_opt_lt=${r_arg_opts_lt[$r_idx]}
local arg=${r_args[$r_idx]}
local load=$(load_value ${r_loads[$r_idx]} $var)
local workload="$tenant-$route"
# Divide as float, cast the result to int (Loadtest is okay floats, HEY is not)
local con=$(echo "x = $NWORKERS * $deadline / $expected * $load / 100; x/1" | bc)
local rps=$((1000000 * con / deadline))
# local rps=$(echo "x = 1000000 * $con / $deadline; x/1" | bc)
wasm_paths+=([$workload]=$wasm_path)
expected_execs+=([$workload]=$expected)
deadlines+=([$workload]=$deadline)
resp_content_types+=([$workload]=$resp_content_type)
arg_opts_hey+=([$workload]=$arg_opt_hey)
arg_opts_lt+=([$workload]=$arg_opt_lt)
args+=([$workload]=$arg)
concurrencies+=([$workload]=$con)
rpss+=([$workload]=$rps)
workloads+=("$workload")
workload_tids+=([$workload]=$tenant_id)
workload_deadlines+=([$workload]=$deadline)
workload_vars+=([$workload]=$var)
done
done
done
}

@ -164,7 +164,7 @@ __framework_sh__parse_arguments() {
;;
-n=* | --name=*)
echo "Set experiment name to ${i#*=}"
__framework_sh__experiment_name="${i#*=}"
__framework_sh__experiment_name+=" ${i#*=}"
shift
;;
-e=* | --envfile=*)
@ -190,6 +190,9 @@ __framework_sh__parse_arguments() {
__framework_sh__usage
exit 0
;;
nuclio | Nuclio)
echo "Running for Nuclio"
;;
*)
echo "$1 is a not a valid option"
__framework_sh__usage
@ -199,7 +202,7 @@ __framework_sh__parse_arguments() {
done
if [[ -z "$__framework_sh__envfile" ]]; then
if [[ -d "$__framework_sh__application_directory/res/$__framework_sh__experiment_name/" ]]; then
if [[ -d "$__framework_sh__application_directory/res-$__framework_sh__role/$__framework_sh__experiment_name/" ]]; then
echo "Experiment $__framework_sh__experiment_name already exists. Pick a unique name!"
exit 1
fi
@ -209,8 +212,8 @@ __framework_sh__parse_arguments() {
exit 1
fi
short_name="$(basename "${__framework_sh__envfile/.env/}")"
echo "$__framework_sh__application_directory/res/$__framework_sh__experiment_name/$short_name/"
if [[ -d "$__framework_sh__application_directory/res/$__framework_sh__experiment_name/$short_name/" ]]; then
echo "$__framework_sh__application_directory/res-$__framework_sh__role/$__framework_sh__experiment_name/$short_name/"
if [[ -d "$__framework_sh__application_directory/res-$__framework_sh__role/$__framework_sh__experiment_name/$short_name/" ]]; then
echo "Variant $short_name was already run in experiment ${__framework_sh__experiment_name}."
exit 1
fi
@ -469,8 +472,8 @@ __framework_sh__run_both() {
__framework_sh__create_and_export_results_directory() {
local -r subdirectory=${1:-""}
local dir="$__framework_sh__application_directory/res/$__framework_sh__experiment_name"
# local dir="$__framework_sh__application_directory/res/$__framework_sh__experiment_name/$subdirectory"
local dir="$__framework_sh__application_directory/res-$__framework_sh__role/$__framework_sh__experiment_name"
# local dir="$__framework_sh__application_directory/res-$__framework_sh__role/$__framework_sh__experiment_name/$subdirectory"
mkdir -p "$dir" || {
panic "mkdir -p $dir"

@ -1,8 +1,30 @@
# shellcheck shell=bash
# shellcheck disable=SC2154
# shellcheck disable=SC2154,SC2155
if [ -n "$__generate_spec_json_sh__" ]; then return; fi
__generate_spec_json_sh__=$(date)
jq_admin_spec() {
jq ". + {\
\"name\": \"Admin\",\
\"port\": 55555,\
\"replenishment-period-us\": 0,\
\"max-budget-us\": 0,\
\"reservation-percentile\": 0,\
\"routes\": [\
.routes[] + {\
\"route\": \"/admin\",\
\"admissions-percentile\": 50,\
\"expected-execution-us\": 1000,\
\"relative-deadline-us\": 10000},\
.routes[] + {\
\"route\": \"/terminator\",\
\"admissions-percentile\": 50,\
\"expected-execution-us\": 1000,\
\"relative-deadline-us\": 10000}\
]\
}" < "./template.json" > "./result_admin.json"
}
generate_spec_json() {
printf "Generating 'spec.json'\n"
@ -13,12 +35,14 @@ generate_spec_json() {
local port=${ports[$tenant]}
local repl_period=${repl_periods[$tenant]}
local budget=${max_budgets[$tenant]}
local reservation=${reservations[$tenant]}
jq_str=". + {
\"name\": \"$tenant\",\
\"port\": $port,\
\"replenishment-period-us\": $repl_period,\
\"max-budget-us\": $budget,\
\"reservation-percentile\": $reservation,\
\"routes\": ["
local t_routes

@ -0,0 +1,113 @@
#!/bin/bash
if ! command -v http > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y httpie
else
sudo apt update
sudo apt install -y httpie
fi
fi
if ! command -v hey > /dev/null; then
HEY_URL=https://hey-release.s3.us-east-2.amazonaws.com/hey_linux_amd64
wget $HEY_URL -O hey
chmod +x hey
if [[ $(whoami) == "root" ]]; then
mv hey /usr/bin/hey
else
sudo mv hey /usr/bin/hey
fi
fi
if ! command -v loadtest > /dev/null; then
if ! command -v npm > /dev/null; then
# if [[ $(whoami) == "root" ]]; then
# apt update
# apt install -y npm
# else
# sudo apt update
# sudo apt install -y npm
# fi
# installs NVM (Node Version Manager)
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
# download and install Node.js
nvm install 14
# verifies the right Node.js version is in the environment
node -v # should print `v14.21.3`
# verifies the right NPM version is in the environment
npm -v # should print `6.14.18`
fi
# Try pulling Emil's version of loadtest to support post binary files
# if [[ $(whoami) == "root" ]]; then
# npm install -y -g loadtest
# else
# sudo npm install -y -g loadtest
# fi
pushd ~
git clone https://github.com/emil916/loadtest.git
pushd loadtest
# if [[ $(whoami) == "root" ]]; then
npm install -g
# else
# sudo npm install -g
# fi
popd
popd
fi
if ! command -v gnuplot > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y gnuplot
else
sudo apt update
sudo apt install -y gnuplot
fi
fi
if ! command -v jq > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y jq
else
sudo apt update
sudo apt install -y jq
fi
fi
if ! command -v htop > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y htop
else
sudo apt update
sudo apt install -y htop
fi
fi
# For SOD:
# if ! command -v imagemagick > /dev/null; then
# if [ "$(whoami)" == "root" ]; then
# apt-get install -y imagemagick
# else
# sudo apt-get install -y imagemagick
# fi
# fi
# For GOCR, too many to check one-by-one, so uncomment below to install:
# if [[ "$(whoami)" == "root" ]]; then
# apt-get install -y netpbm pango1.0-tools wamerican fonts-roboto fonts-cascadia-code fonts-dejavu
# else
# sudo apt-get install -y netpbm pango1.0-tools wamerican fonts-roboto fonts-cascadia-code fonts-dejavu
# fi
source ~/.bashrc

@ -1,74 +0,0 @@
#!/bin/bash
if ! command -v http > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y httpie
else
sudo apt update
sudo apt install -y httpie
fi
fi
if ! command -v hey > /dev/null; then
HEY_URL=https://hey-release.s3.us-east-2.amazonaws.com/hey_linux_amd64
wget $HEY_URL -O hey
chmod +x hey
if [[ $(whoami) == "root" ]]; then
mv hey /usr/bin/hey
else
sudo mv hey /usr/bin/hey
fi
fi
if ! command -v loadtest > /dev/null; then
if ! command -v npm > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y npm
else
sudo apt update
sudo apt install -y npm
fi
fi
# Try pulling Emil's version of loadtest to support post binary files
# if [[ $(whoami) == "root" ]]; then
# npm install -y -g loadtest
# else
# sudo npm install -y -g loadtest
# fi
pushd ~
git clone https://github.com/emil916/loadtest.git
pushd loadtest
if [[ $(whoami) == "root" ]]; then
npm install -g
else
sudo npm install -g
fi
popd
popd
fi
if ! command -v gnuplot > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt-get update
apt-get install -y gnuplot
else
sudo apt-get update
sudo apt-get install -y gnuplot
fi
fi
if ! command -v jq > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y jq
else
sudo apt update
sudo apt install -y jq
fi
fi

@ -1,48 +0,0 @@
#!/bin/bash
if ! command -v gnuplot > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt-get update
apt-get install -y gnuplot
else
sudo apt-get update
sudo apt-get install -y gnuplot
fi
fi
if ! command -v jq > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y jq
else
sudo apt update
sudo apt install -y jq
fi
fi
if ! command -v htop > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y htop
else
sudo apt update
sudo apt install -y htop
fi
fi
# For SOD:
# if ! command -v imagemagick > /dev/null; then
# if [ "$(whoami)" == "root" ]; then
# apt-get install -y imagemagick
# else
# sudo apt-get install -y imagemagick
# fi
# fi
# For GOCR, too many to check one-by-one, so uncomment below to install:
# if [[ "$(whoami)" == "root" ]]; then
# apt-get install -y netpbm pango1.0-tools wamerican fonts-roboto fonts-cascadia-code fonts-dejavu
# else
# sudo apt-get install -y netpbm pango1.0-tools wamerican fonts-roboto fonts-cascadia-code fonts-dejavu
# fi

@ -1,84 +0,0 @@
#!/bin/bash
if ! command -v http > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y httpie
else
sudo apt update
sudo apt install -y httpie
fi
fi
if ! command -v hey > /dev/null; then
HEY_URL=https://hey-release.s3.us-east-2.amazonaws.com/hey_linux_amd64
wget $HEY_URL -O hey
chmod +x hey
if [[ $(whoami) == "root" ]]; then
mv hey /usr/bin/hey
else
sudo mv hey /usr/bin/hey
fi
fi
if ! command -v loadtest > /dev/null; then
if ! command -v npm > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y npm
else
sudo apt update
sudo apt install -y npm
fi
fi
# Try pulling Emil's version of loadtest to support post binary files
# if [[ $(whoami) == "root" ]]; then
# npm install -y -g loadtest
# else
# sudo npm install -y -g loadtest
# fi
pushd ~
git clone https://github.com/emil916/loadtest.git
pushd loadtest
if [[ $(whoami) == "root" ]]; then
npm install -g
else
sudo npm install -g
fi
popd
popd
fi
if ! command -v gnuplot > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt-get update
apt-get install -y gnuplot
else
sudo apt-get update
sudo apt-get install -y gnuplot
fi
fi
if ! command -v jq > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y jq
else
sudo apt update
sudo apt install -y jq
fi
fi
if ! command -v htop > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y htop
else
sudo apt update
sudo apt install -y htop
fi
fi

@ -1,6 +1,6 @@
#!/bin/bash
# shellcheck disable=SC1091,SC2034,SC2153,SC2154
# shellcheck disable=SC1091,SC2034,SC2153,SC2154,SC2155
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success rate
# Success - The percentage of requests that complete by their deadlines
@ -22,6 +22,7 @@ source panic.sh || exit 1
source path_join.sh || exit 1
source percentiles_table.sh || exit 1
source experiment_globals.sh || exit 1
source multi_tenancy_init.sh || exit 1
source generate_spec_json.sh || exit 1
validate_dependencies hey loadtest gnuplot jq
@ -51,22 +52,54 @@ run_experiments() {
local expected=${expected_execs[$workload]}
local deadline=${deadlines[$workload]}
local arg=${args[$workload]}
local con=${concurrencies[$workload]}
local rps=${rpss[$workload]}
echo "CON for $workload" : "$con"
echo "RPS for $workload" : "$rps"
# local con=${concurrencies[$workload]}
# local rps=${rpss[$workload]}
local load=${loads[$workload]}
local pid
local -a pids # Run concurrently
local -A pid_workloads # Run concurrently
# Cast the result to int for HEY (Loadtest is okay floats, HEY is not)
if [ "$loadgen" = "hey" ]; then
local con=$((load * NWORKERS * deadline / expected / 100))
if [ "$con" = 0 ]; then con=1; fi
# printf -v con %.0f "$con"
echo "CON set for $workload" : "$con"
local arg_opt_hey=${arg_opts_hey[$workload]}
hey $HEY_OPTS -z "$DURATION_sec"s -c "$con" -t 0 -o csv -m POST "$arg_opt_hey" "$arg" "http://${hostname}:$port/$route" > "$results_directory/$workload.csv" 2> "$results_directory/$workload-err.dat" &
hey -H "Expect: " $HEY_OPTS -z "$DURATION_sec"s -c "$con" -t 0 -o csv -m POST "$arg_opt_hey" "$arg" "http://${hostname}:$port/$route" > "$results_directory/$workload.csv" 2> "$results_directory/$workload-err.dat" &
elif [ "$loadgen" = "loadtest" ]; then
local con=1 #$((NWORKERS * deadline / expected))
local rps=$(echo "scale=2; x = $load * $NWORKERS * 1000000 / $expected / 100; x" | bc)
# if [ "$(echo "$rps > $NWORKERS" | bc)" -eq 1 ]; then con=$NWORKERS; fi
echo "CON set for $workload" : "$con"
echo "RPS set for $workload" : "$rps"
local arg_opt_lt=${arg_opts_lt[$workload]}
loadtest -t "$DURATION_sec" -c "$con" --rps "$rps" "$arg_opt_lt" "$arg" "http://${hostname}:${port}/$route" > "$results_directory/$workload.dat" 2> "$results_directory/$workload-err.dat" &
[ "$LOADTEST_LOG_RANDOM" = true ] && lograndom=--lograndom
if [ "$LOADTEST_REQUEST_TIMEOUT" = true ]; then
deadline_ms=$((deadline/1000 + 1))
echo "Timeout set for $workload" : "$deadline_ms"
req_timeout="-d $deadline_ms"
fi
[ "$NUCLIO_MODE_ENABLED" = true ] && keep_alive=-k
step=2500
it=1
while (( $(bc <<< "$rps > $step") )); do
echo " Loadtest #$it: rps of $step/$rps"
# shellcheck disable=SC2086
loadtest -H "Expect: " -t $DURATION_sec -c $con --rps $step $arg_opt_lt $arg $req_timeout $lograndom $keep_alive "http://${hostname}:${port}/$route" >> "$results_directory/$workload.dat" 2>> "$results_directory/$workload-err.dat" &
rps=$(bc <<< "$rps - $step")
pid="$!"
pids+=("$pid")
pid_workloads+=([$pid]="$workload-step-$it")
((it++))
done
echo " Loadtest #$it: rps of $rps (last)"
# shellcheck disable=SC2086
loadtest -H "Expect: " -t $DURATION_sec -c $con --rps $rps $arg_opt_lt $arg $req_timeout $lograndom $keep_alive "http://${hostname}:${port}/$route" >> "$results_directory/$workload.dat" 2>> "$results_directory/$workload-err.dat" &
fi
pid="$!"
pids+=("$pid")
@ -191,8 +224,9 @@ process_client_results_loadtest() {
assert_process_client_results_args "$@"
local -r results_directory="$1"
local sum_of_durations=0
printf "Processing Loadtest Results: "
printf "Processing Loadtest Results: \n"
# Write headers to CSVs
for t_id in "${TENANT_IDS[@]}"; do
@ -204,6 +238,9 @@ process_client_results_loadtest() {
for workload in "${workloads[@]}"; do
local t_id=${workload_tids[$workload]}
local var=${workload_vars[$workload]}
# local expected=${expected_execs[$workload]}
# local load=${loads[$workload]}
# local rps=$(echo "scale=2; x = $load * $NWORKERS * 1000000 / $expected / 100; x" | bc)
if [ ! -s "$results_directory/$workload-err.dat" ]; then
# The error file is empty. So remove it.
@ -217,12 +254,12 @@ process_client_results_loadtest() {
fi
# Get Number of 200s and then calculate Success Rate (percent of requests that return 200)
total=$(grep "Completed requests:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13)
total_failed=$(grep "Total errors:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13)
denied=$(grep "429:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)
missed_dl=$(grep "408:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)
killed=$(grep "409:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)
misc_err=$(grep "\-1:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)
total=$(awk '/Completed requests:/ {sum += $13} END {print sum}' "$results_directory/${workload}.dat")
total_failed=$(awk '/Total errors:/ {sum += $13} END {print sum}' "$results_directory/${workload}.dat")
denied=$(awk '/429:/ {sum += $12} END {print sum}' "$results_directory/${workload}.dat")
missed_dl=$(awk '/408:/ {sum += $12} END {print sum}' "$results_directory/${workload}.dat")
killed=$(awk '/409:/ {sum += $12} END {print sum}' "$results_directory/${workload}.dat")
misc_err=$(awk '/(-1:|503:)/ {sum += $12} END {print sum}' "$results_directory/${workload}.dat")
all200=$((total - total_failed))
# ((all200 == 0)) && continue # If all errors, skip line
@ -230,21 +267,24 @@ process_client_results_loadtest() {
printf "%s,%3.1f,%d,%d,%d,%d,%d,%d,%d\n" "$var" "$success_rate" "$total" "$all200" "$total_failed" "$denied" "$missed_dl" "$killed" "$misc_err" >> "$results_directory/success_$t_id.csv"
# Throughput is calculated as the mean number of successful requests per second
duration=$(grep "Total time:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13)
printf -v duration %.0f "$duration"
duration=$(awk '/Total time:/ {sum += $13; count++} END {print sum / count}' "$results_directory/${workload}.dat")
sum_of_durations=$(echo "scale=2; x = $sum_of_durations+$duration; x" | bc)
# printf -v duration %.2f "$duration"
# throughput=$(grep "Requests per second" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 14 | tail -n 1) # throughput of ALL
throughput=$(echo "$all200/$duration" | bc)
printf "%s,%d\n" "$var" "$throughput" >> "$results_directory/throughput_$t_id.csv"
throughput=$(echo "$total/$duration" | bc)
goodput=$(echo "$all200/$duration" | bc)
printf "%s,%.1f\n" "$var" "$success_rate" >> "$results_directory/throughput_$t_id.csv"
# Generate Latency Data
min=$(echo "$(grep "Minimum latency:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13)*1000" | bc)
p50=$(echo "$(grep 50% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)*1000" | bc)
p90=$(echo "$(grep 90% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)*1000" | bc)
p99=$(echo "$(grep 99% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)*1000" | bc)
p100=$(echo "$(grep 100% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12 | tail -n 1)*1000" | bc)
mean=$(echo "scale=1;$(grep "Mean latency:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13)*1000" | bc)
printf "%s,%d,%d,%.1f,%d,%d,%d,%d\n" "$var" "$total" "$min" "$mean" "$p50" "$p90" "$p99" "$p100" >> "$results_directory/latency_$t_id.csv"
min=$(awk '/Minimum latency/ {sum += $13; count++} END {print int(sum*1000/count)}' "$results_directory/${workload}.dat")
mean=$(awk '/Mean latency:/ {sum += $13; count++} END {print int(sum*1000/count)}' "$results_directory/${workload}.dat")
p50=$(awk '/50%/ {sum += $12; count++} END {print int(sum*1000/count)}' "$results_directory/${workload}.dat")
p90=$(awk '/90%/ {sum += $12; count++} END {print int(sum*1000/count)}' "$results_directory/${workload}.dat")
p99=$(awk '/99%/ {sum += $12; count++} END {print int(sum*1000/count)}' "$results_directory/${workload}.dat")
p100=$(awk '/100%/ {sum += $12; count++} END {print int(sum*1000/count)}' "$results_directory/${workload}.dat")
printf "%s,%d,%d,%d,%d,%d,%d,%d\n" "$var" "$total" "$min" "$mean" "$p50" "$p90" "$p99" "$p100" >> "$results_directory/latency_$t_id.csv"
printf "Workload %20s duration: %.2f sec\n" "$workload" "$duration"
done
for t_id in "${TENANT_IDS[@]}"; do
@ -259,6 +299,8 @@ process_client_results_loadtest() {
panic "failed to generate gnuplots"
}
local ave_duration=$(echo "scale=2; x = $sum_of_durations/${#workloads[@]}; x" | bc)
printf "Experiments average duration: %.2f sec\n" "$ave_duration"
printf "[OK]\n"
return 0
}
@ -278,7 +320,7 @@ process_server_results() {
# Write headers to CSVs
for t_id in "${TENANT_IDS[@]}"; do
printf "Workload,Scs%%,TOTAL,SrvScs,All200,AllFail,DenyBE,DenyG,xDenyBE,xDenyG,MisD_Glb,MisD_Loc,MisD_WB,Shed_Glb,Shed_Loc,Shed_WB,Misc\n" >> "$results_directory/success_$t_id.csv"
printf "Workload,Scs%%,TOTAL,SrvScs,All200,AllFail,DenyBE,DenyG,xDenyBE,xDenyG,MisD_Glb,MisD_Loc,MisD_WB,Shed_Glb,Shed_Loc,Misc,#Guar,#BE\n" >> "$results_directory/success_$t_id.csv"
printf "Workload,Throughput\n" >> "$results_directory/throughput_$t_id.csv"
percentiles_table_header "$results_directory/latency_$t_id.csv" "Workload"
@ -302,7 +344,7 @@ process_server_results() {
for metric in "${SANDBOX_METRICS[@]}"; do
awk -F, '
{workload = sprintf("%s-%s", $'"$SANDBOX_TENANT_NAME_FIELD"', substr($'"$SANDBOX_ROUTE_FIELD"',2))}
workload == "'"$workload"'" {printf("%d,%d\n", $'"${SANDBOX_METRICS_FIELDS[$metric]}"' / $'"$SANDBOX_CPU_FREQ_FIELD"', $'"$SANDBOX_RESPONSE_CODE_FIELD"')}
workload == "'"$workload"'" {printf("%d,%d,%d\n", $'"${SANDBOX_METRICS_FIELDS[$metric]}"' / $'"$SANDBOX_CPU_FREQ_FIELD"', $'"$SANDBOX_RESPONSE_CODE_FIELD"', $'"$SANDBOX_GUARANTEE_TYPE_FIELD"')}
' < "$results_directory/$SERVER_LOG_FILE" | sort -g > "$results_directory/$workload/${metric}_sorted.csv"
percentiles_table_row "$results_directory/$workload/${metric}_sorted.csv" "$results_directory/${metric}_$t_id.csv" "$workload"
@ -311,11 +353,11 @@ process_server_results() {
# rm "$results_directory/$workload/${metric}_sorted.csv"
done
awk -F, '$2 == 200 {printf("%d,%d\n", $1, $2)}' < "$results_directory/$workload/running_user_sorted.csv" > "$results_directory/$workload/running_user_200_sorted.csv"
awk -F, '$2 == 200 {printf("%d,%d\n", $1, $2, $3)}' < "$results_directory/$workload/running_user_sorted.csv" > "$results_directory/$workload/running_user_200_sorted.csv"
percentiles_table_row "$results_directory/$workload/running_user_200_sorted.csv" "$results_directory/running_user_200_$t_id.csv" "$workload"
# awk -F, '$1 > 0 {printf("%d,%d\n", $1, $2)}' < "$results_directory/$workload/running_user_sorted.csv" > "$results_directory/$workload/running_user_nonzero_sorted.csv"
# awk -F, '$1 > 0 {printf("%d,%d\n", $1, $2, $3)}' < "$results_directory/$workload/running_user_sorted.csv" > "$results_directory/$workload/running_user_nonzero_sorted.csv"
# percentiles_table_row "$results_directory/$workload/running_user_nonzero_sorted.csv" "$results_directory/running_user_nonzero_$t_id.csv" "$workload"
awk -F, '$2 == 200 {printf("%d,%d\n", $1, $2)}' < "$results_directory/$workload/total_sorted.csv" > "$results_directory/$workload/total_200_sorted.csv"
awk -F, '$2 == 200 {printf("%d,%d\n", $1, $2, $3)}' < "$results_directory/$workload/total_sorted.csv" > "$results_directory/$workload/total_200_sorted.csv"
percentiles_table_row "$results_directory/$workload/total_200_sorted.csv" "$results_directory/total_200_$t_id.csv" "$workload"
# Memory Allocation
@ -337,9 +379,11 @@ process_server_results() {
$2 == 4082 {mis_dl_wb++}
$2 == 4090 {shed_glob++}
$2 == 4091 {shed_local++}
$2 == 4092 {shed_wb++}
$2 == 4093 {misc++}
END{printf "'"$var"',%3.1f,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", (all200*100/NR), NR, ok, all200, total_failed, denied_any, denied_gtd, x_denied_any, x_denied_gtd, mis_dl_glob, mis_dl_local, mis_dl_wb, shed_glob, shed_local, shed_wb, misc}
$2 == 4293 {misc++}
$3 == 1 {guaranteed++}
$3 == 2 {besteffort++}
END{printf "'"$var"',%3.1f,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", (all200*100/NR), NR, ok, all200, total_failed, denied_any, denied_gtd, x_denied_any, x_denied_gtd, mis_dl_glob, mis_dl_local, mis_dl_wb, shed_glob, shed_local, misc, guaranteed, besteffort}
' < "$results_directory/$workload/total_sorted.csv" >> "$results_directory/success_$t_id.csv"
# Throughput is calculated on the client side, so ignore the below line

@ -0,0 +1,126 @@
# shellcheck shell=bash
# shellcheck disable=SC2034,SC2153,SC2154,SC2155
if [ -n "$__run_init_sh__" ]; then return; fi
__run_init_sh__=$(date)
# Globals to fill during run_init in run.sh, to use in base and generate_spec
declare -A ports=()
declare -A repl_periods=()
declare -A max_budgets=()
declare -A reservations=()
declare -A wasm_paths=()
declare -A expected_execs=()
declare -A deadlines=()
declare -A resp_content_types=()
declare -A arg_opts_hey=()
declare -A arg_opts_lt=()
declare -A args=()
declare -A loads=()
declare -a workloads=()
declare -A workload_tids=()
declare -A workload_deadlines=()
declare -A workload_vars=()
assert_run_experiments_args() {
if (($# != 3)); then
panic "invalid number of arguments \"$#\""
return 1
elif [[ -z "$1" ]]; then
panic "hostname \"$1\" was empty"
return 1
elif [[ ! -d "$2" ]]; then
panic "directory \"$2\" does not exist"
return 1
elif [[ -z "$3" ]]; then
panic "load gen \"$3\" was empty"
return 1
fi
}
assert_process_client_results_args() {
if (($# != 1)); then
error_msg "invalid number of arguments ($#, expected 1)"
return 1
elif ! [[ -d "$1" ]]; then
error_msg "directory $1 does not exist"
return 1
fi
}
assert_process_server_results_args() {
if (($# != 1)); then
panic "invalid number of arguments \"$#\""
return 1
elif [[ ! -d "$1" ]]; then
panic "directory \"$1\" does not exist"
return 1
fi
}
load_value() {
local result=$1
if [ "$result" = "?" ]; then
result=$2
fi
echo "$result"
}
run_init() {
for var in "${VARYING[@]}"; do
for t_idx in "${!TENANT_IDS[@]}"; do
local tenant_id=${TENANT_IDS[$t_idx]}
local tenant=$(printf "%s-%03d" "$tenant_id" "$var")
local port=$((INIT_PORTS[t_idx]+var))
[ "$NUCLIO_MODE_ENABLED" == true ] && port=${INIT_PORTS[t_idx]}
local repl_period=$(load_value "${MTDS_REPL_PERIODS_us[$t_idx]}" "$var")
local budget=$(load_value "${MTDS_MAX_BUDGETS_us[$t_idx]}" "$var")
local reservation=$(load_value "${MTDBF_RESERVATIONS_p[$t_idx]}" "$var")
ports+=([$tenant]=$port)
repl_periods+=([$tenant]=$repl_period)
max_budgets+=([$tenant]=$budget)
reservations+=([$tenant]=$reservation)
local t_routes r_expected_execs r_deadlines r_arg_opts_hey r_arg_opts_lt r_args r_loads
IFS=' ' read -r -a t_routes <<< "${ROUTES[$t_idx]}"
IFS=' ' read -r -a r_wasm_paths <<< "${WASM_PATHS[$t_idx]}"
IFS=' ' read -r -a r_expected_execs <<< "${EXPECTED_EXEC_TIMES_us[$t_idx]}"
IFS=' ' read -r -a r_dl_to_exec_ratios <<< "${DEADLINE_TO_EXEC_RATIOs[$t_idx]}"
IFS=' ' read -r -a r_resp_content_types <<< "${RESP_CONTENT_TYPES[$t_idx]}"
IFS=' ' read -r -a r_arg_opts_hey <<< "${ARG_OPTS_HEY[$t_idx]}"
IFS=' ' read -r -a r_arg_opts_lt <<< "${ARG_OPTS_LT[$t_idx]}"
IFS=' ' read -r -a r_args <<< "${ARGS[$t_idx]}"
IFS=' ' read -r -a r_loads <<< "${LOADS[$t_idx]}"
for r_idx in "${!t_routes[@]}"; do
local route=${t_routes[$r_idx]}
local wasm_path=${r_wasm_paths[$r_idx]}
local expected=$(load_value "${r_expected_execs[$r_idx]}" "$var")
# local deadline=${r_deadlines[$r_idx]}
local dl_to_exec_ratio=${r_dl_to_exec_ratios[$r_idx]}
local deadline=$((expected*dl_to_exec_ratio/100))
local resp_content_type=${r_resp_content_types[$r_idx]}
local arg_opt_hey=${r_arg_opts_hey[$r_idx]}
local arg_opt_lt=${r_arg_opts_lt[$r_idx]}
local arg=$(load_value "${r_args[$r_idx]}" "$var")
local load=$(load_value "${r_loads[$r_idx]}" "$var")
local workload="$tenant-$route"
wasm_paths+=([$workload]=$wasm_path)
expected_execs+=([$workload]=$expected)
deadlines+=([$workload]=$deadline)
resp_content_types+=([$workload]=$resp_content_type)
arg_opts_hey+=([$workload]=$arg_opt_hey)
arg_opts_lt+=([$workload]=$arg_opt_lt)
args+=([$workload]=$arg)
loads+=([$workload]=$load)
workloads+=("$workload")
workload_tids+=([$workload]=$tenant_id)
workload_deadlines+=([$workload]=$deadline)
workload_vars+=([$workload]=$var)
done
done
done
}

@ -0,0 +1 @@
out.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 606 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.9 MiB

@ -0,0 +1,32 @@
SLEDGE_BINARY_DIR=../../runtime/bin
HOSTNAME=localhost
PORT=10000
HEY_OPTS=-disable-compression -disable-keepalive -disable-redirects
default: run
clean:
rm -rf res/*
run:
SLEDGE_SCHEDULER=MTDBF LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} ${SLEDGE_BINARY_DIR}/sledgert spec.json
debug:
SLEDGE_SCHEDULER=MTDBF LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} gdb ${SLEDGE_BINARY_DIR}/sledgert \
--eval-command="handle SIGUSR1 noprint nostop" \
--eval-command="handle SIGPIPE noprint nostop" \
--eval-command="set pagination off" \
--eval-command="set print pretty" \
--eval-command="run spec.json"
valgrind:
SLEDGE_DISABLE_PREEMPTION=true SLEDGE_NWORKERS=1 LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} valgrind --leak-check=full --max-stackframe=11150456 --run-libc-freeres=no --run-cxx-freeres=no ${SLEDGE_BINARY_DIR}/sledgert spec.json
client:
curl -H 'Expect:' -H "Content-Type: image/png" --data-binary "@./0_depth.png" --output "out.png" "${HOSTNAME}:${PORT}/depth_to_xyz"
client-hey:
hey ${HEY_OPTS} -n 90 -c 90 -t 0 -m POST -D "./0_depth.png" "http://${HOSTNAME}:${PORT}/depth_to_xyz"
client-loadtest:
loadtest -n 90 -c 90 -T "image/png" -m POST -b "./0_depth.png" "http://${HOSTNAME}:${PORT}/depth_to_xyz"

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 MiB

@ -0,0 +1,78 @@
reset
set term jpeg size 1000,500
set output "latency.jpg"
#set xlabel "Reservation Utilization %"
#set ylabel "Latency (us)"
set key left top
set xrange [-5:]
set yrange [0:]
set style histogram columnstacked
set key horizontal
set macros
# Placement of the a,b,c,d labels in the graphs
POS = "at graph 0.05,1.03 font ',10'"
# x- and ytics for each row resp. column
NOXTICS = "unset xlabel"
XTICS = "set xlabel 'Reservation Utilization %'"
NOYTICS = "unset ylabel"
YTICS = "set ylabel 'Latency (us)'"
# Margins for each row resp. column
TMARGIN = "set tmargin at screen 0.90; set bmargin at screen 0.55"
BMARGIN = "set tmargin at screen 0.55; set bmargin at screen 0.20"
LMARGIN = "set lmargin at screen 0.15; set rmargin at screen 0.55"
RMARGIN = "set lmargin at screen 0.55; set rmargin at screen 0.95"
# plot \
# for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:7 title 'Tenant '.t_id.' p99' w lp, \
# for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:6 title 'Tenant '.t_id.' p90' w lp, \
# for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:5 title 'Tenant '.t_id.' p50' w lp, \
# for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:4 title 'Tenant '.t_id.' mean' w lp, \
# for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:3 title 'Tenant '.t_id.' min' w lp
### Start multiplot (2x2 layout)
set multiplot layout 2,2 rowsfirst
# --- GRAPH a
set label 1 'p99' @POS
@NOXTICS; @YTICS
#@TMARGIN; @LMARGIN
plot for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:7 title 'Tenant '.t_id w lp
# --- GRAPH b
set label 1 'p90' @POS
@NOXTICS; @NOYTICS
#@TMARGIN; @RMARGIN
plot for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:6 notitle w lp
# --- GRAPH c
set label 1 'p50' @POS
@XTICS; @YTICS
#@BMARGIN; @LMARGIN
plot for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:5 notitle w lp
# --- GRAPH d
set label 1 'mean' @POS
@XTICS; @NOYTICS
#@BMARGIN; @RMARGIN
plot for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:4 notitle w lp
unset multiplot
### End multiplot
# plot \
# 'latency_A.dat' using 1:7 title 'A p99' lt 1 lc 1 w lp, \
# 'latency_A.dat' using 1:6 title 'A p90' lt 2 lc 1 w lp, \
# 'latency_A.dat' using 1:5 title 'A p50' lt 3 lc 1 w lp, \
# 'latency_A.dat' using 1:4 title 'A mean' lt 4 lc 1 w lp, \
# 'latency_A.dat' using 1:3 title 'A min' lt 5 lc 1 w lp,\
# 'latency_B.dat' using 1:7 title 'B p99' lt 1 lc 2 w lp, \
# 'latency_B.dat' using 1:6 title 'B p90' lt 2 lc 2 w lp, \
# 'latency_B.dat' using 1:5 title 'B p50' lt 3 lc 2 w lp, \
# 'latency_B.dat' using 1:4 title 'B mean' lt 4 lc 2 w lp, \
# 'latency_B.dat' using 1:3 title 'B min' lt 5 lc 2 w lp
# 'latency_A.dat' using 1:8 title 'A p100' linetype 0 linecolor 1 with linespoints, \
# 'latency_B.dat' using 1:8 title 'B p100' linetype 0 linecolor 2 with linespoints, \

@ -0,0 +1,12 @@
#!/bin/bash
SLEDGE_CMUSOD_DIR="/users/emil/sledge-server/tests/cmu-sod"
# SLEDGE_CMUSOD_DIR="/home/gwu/sledge/tests/cmu-sod"
pidof sledgert >/dev/null
if [[ $? -ne 0 ]] ; then
now=$(date)
echo "" >> $SLEDGE_CMUSOD_DIR/server_log.txt
echo "Restarting Sledge: $now" >> $SLEDGE_CMUSOD_DIR/server_log.txt
make -C $SLEDGE_CMUSOD_DIR run &>> $SLEDGE_CMUSOD_DIR/server_log.txt &
fi

@ -0,0 +1,17 @@
Enter this command and copy & paste the following two lines:
crontab -e
* * * * * /users/emil/sledge-server/tests/cmu-sod/auto_start.sh
* * * * * ( sleep 30 ; /users/emil/sledge-server/tests/cmu-sod/auto_start.sh )
To stop:
sudo service cron stop
To start:
sudo service cron start
To remove type:
crontab -e
and then remove the above two lines.

@ -0,0 +1,19 @@
import time
import requests
url = 'http://URL:10000/depth_to_xyz'
img = None
payload = open('0_depth.png', 'rb')
response = requests.post(url, data=payload)
img = response.content
print("single request works!")
# time.sleep(1)
for i in range(100):
payload = open('0_depth.png', 'rb')
response = requests.post(url, data=payload)
img = response.content
# time.sleep(1)
print(f"multi request #{i} works!")

@ -0,0 +1,11 @@
import time
import requests
url = 'http://URL:10000/depth_to_xyz'
img = None
payload = open('0_depth.png', 'rb')
response = requests.post(url, data=payload)
img = response.content

@ -0,0 +1,31 @@
# import numpy as np
import requests
import threading
import time
from flask import Flask, Response
url = 'http://URL:10000/depth_to_xyz'
# app = Flask(__name__)
img = None
def get_img():
global img
while True:
print("start")
try:
payload = open('0_depth.png', 'rb')
response = requests.post(url, data=payload)
img = response.content
print("got img")
time.sleep(0.01)
except:
print("failed")
time.sleep(5)
thread = threading.Thread(target=get_img)
thread.daemon = True
thread.start()
thread.join()

@ -0,0 +1,50 @@
#!/bin/bash
# shellcheck disable=SC1091,SC2034,SC2155
source ../bash_libraries/multi_tenancy_base.sh || exit 1
# Configure SERVER parameters: (this is to skip the .env config file)
export SLEDGE_SCHEDULER=MTDBF
export SLEDGE_DISABLE_PREEMPTION=false
export SLEDGE_SANDBOX_PERF_LOG=perf.log
export SLEDGE_HTTP_SESSION_PERF_LOG=http_perf.log
# export SLEDGE_NWORKERS=1
# export SLEDGE_PROC_MHZ=2100
# export EXTRA_EXEC_PERCENTILE=10
# The global configs for the scripts
declare -r CLIENT_TERMINATE_SERVER=false
declare -r DURATION_sec=30
declare -r ESTIMATIONS_PERCENTILE=60
declare -r NWORKERS=${SLEDGE_NWORKERS:-1}
# Tenant configs:
declare -ar TENANT_IDS=("cmu")
declare -ar INIT_PORTS=(10000)
declare -ar ROUTES=("depth_to_xyz")
declare -ar MTDS_REPL_PERIODS_us=(0)
declare -ar MTDS_MAX_BUDGETS_us=(0)
declare -ar MTDBF_RESERVATIONS_p=(0)
# Per route configs:
declare -ar WASM_PATHS=("depth_to_xyz.wasm.so")
declare -ar RESP_CONTENT_TYPES=("image/png")
declare -ar EXPECTED_EXEC_TIMES_us=("950000")
declare -ir DEADLINE_TO_EXEC_RATIO=500 # percentage
# For HEY -d is text, -D is file input. For LoadTest -P is text, -b is file input.
declare -ar ARG_OPTS_HEY=("-D")
declare -ar ARG_OPTS_LT=("-b")
declare -ar ARGS=("./0_depth.png")
# 100=FULL load, 50=HALF load ...
declare -ar LOADS=("100")
# When trying varying values, you must set ONE value from the above params to ? (question mark)
# For example, for varying the loads, try: LOADS=("50 ?" "100")
# declare -ar VARYING=(0) # no variation, single experiment
declare -ar VARYING=(0)
run_init
generate_spec_json
framework_init "$@"

@ -0,0 +1,44 @@
[
{
"name": "Admin",
"port": 55555,
"replenishment-period-us": 0,
"max-budget-us": 0,
"reservation-percentile": 0,
"routes": [
{
"route": "/admin",
"path": "fibonacci.wasm.so",
"admissions-percentile": 50,
"expected-execution-us": 1000,
"relative-deadline-us": 10000,
"http-resp-content-type": "text/plain"
},
{
"route": "/terminator",
"path": "fibonacci.wasm.so",
"admissions-percentile": 50,
"expected-execution-us": 1000,
"relative-deadline-us": 10000,
"http-resp-content-type": "text/plain"
}
]
},
{
"name": "cmu-000",
"port": 10000,
"replenishment-period-us": 0,
"max-budget-us": 0,
"reservation-percentile": 0,
"routes": [
{
"route": "/depth_to_xyz",
"path": "depth_to_xyz.wasm.so",
"admissions-percentile": 60,
"expected-execution-us": 950000,
"relative-deadline-us": 4750000,
"http-resp-content-type": "image/png"
}
]
}
]

@ -0,0 +1,16 @@
reset
set term jpeg
set output "success.jpg"
#set xlabel "Reservation Utilization %"
set xlabel "Extra exececution slack %"
set ylabel "Deadline success rate %"
set xrange [-5:]
set yrange [0:110]
plot for [t_id in tenant_ids] 'success_'.t_id.'.dat' using 1:2 title t_id w lp
#plot 'success_A.dat' using 1:2 title 'Tenant A success rate' linetype 1 linecolor 1 with linespoints,\
# 'success_B.dat' using 1:2 title 'Tenant B success rate' lt 2 lc 2 w lp

@ -0,0 +1,17 @@
{
"name": "tenant",
"port": 0,
"replenishment-period-us": 0,
"max-budget-us": 0,
"reservation-percentile": 0,
"routes": [
{
"route": "/route",
"path": "fibonacci.wasm.so",
"admissions-percentile": 0,
"expected-execution-us": 0,
"relative-deadline-us": 0,
"http-resp-content-type": "text/plain"
}
]
}

@ -0,0 +1,15 @@
reset
set term jpeg
set output "throughput.jpg"
set xlabel "Reservation Utilization %"
set ylabel "Requests/sec"
set xrange [-5:]
set yrange [0:]
plot for [t_id in tenant_ids] 'throughput_'.t_id.'.dat' using 1:2 title 'Tenant '.t_id w lp
#plot 'throughput_A.dat' using 1:2 title 'Tenant A Throughput' linetype 1 linecolor 1 with linespoints,\
# 'throughput_B.dat' using 1:2 title 'Tenant B Throughput' linetype 2 linecolor 2 with linespoints
Loading…
Cancel
Save