Merge pull request #380 from gwsystems/mt-dbf

Merging this since it is mainly bash scripts added for multi tenancy testbed.
master
Emil 2 years ago committed by GitHub
commit b85e13fb5f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -112,8 +112,10 @@ wasi_context_init(wasi_options_t *options)
} }
} }
/* Seed Random */ /* Seed Random
srandom(time(NULL)); * Commented out as a temporary fix for the mutex blocking delay srandom causes in libc.
*/
// srandom(time(NULL));
/* TODO: Preopens */ /* TODO: Preopens */

@ -1,12 +1,30 @@
# shellcheck shell=bash # shellcheck shell=bash
# shellcheck disable=SC2034 # shellcheck disable=SC2034,SC2153,SC2154,SC2155
if [ -n "$__experiment_server_globals_sh__" ]; then return; fi if [ -n "$__experiment_server_globals_sh__" ]; then return; fi
__experiment_server_globals_sh__=$(date) __experiment_server_globals_sh__=$(date)
# The global configs for the scripts # The global configs for the scripts
declare -gr SERVER_LOG_FILE="perf.log" declare -gr SERVER_LOG_FILE="perf.log"
declare -gr SERVER_HTTP_LOG_FILE="http_perf.log" declare -gr SERVER_HTTP_LOG_FILE="http_perf.log"
declare -gr NWORKERS=$(($(nproc)-2)) declare -gr HEY_OPTS="-disable-compression -disable-keepalive -disable-redirects"
# Globals to fill during run_init in run.sh, to use in base and generate_spec
declare -A ports=()
declare -A repl_periods=()
declare -A max_budgets=()
declare -A wasm_paths=()
declare -A expected_execs=()
declare -A deadlines=()
declare -A resp_content_types=()
declare -A arg_opts_hey=()
declare -A arg_opts_lt=()
declare -A args=()
declare -A concurrencies=()
declare -A rpss=()
declare -a workloads=()
declare -A workload_tids=()
declare -A workload_deadlines=()
declare -A workload_vars=()
# Sandbox Perf Log Globals: # Sandbox Perf Log Globals:
declare -ga SANDBOX_METRICS=(total queued uninitialized allocated initialized runnable interrupted preempted running_sys running_user asleep returned complete error) declare -ga SANDBOX_METRICS=(total queued uninitialized allocated initialized runnable interrupted preempted running_sys running_user asleep returned complete error)
@ -77,3 +95,74 @@ assert_process_server_results_args() {
return 1 return 1
fi fi
} }
load_value() {
local result=$1
if [ "$result" = "?" ]; then
result=$2
fi
echo "$result"
}
run_init() {
for var in "${VARYING[@]}"; do
for t_idx in "${!TENANT_IDS[@]}"; do
local tenant_id=${TENANT_IDS[$t_idx]}
local tenant=$(printf "%s-%03d" "$tenant_id" "$var")
local port=$((INIT_PORTS[t_idx]+var))
local repl_period=$(load_value ${MTDS_REPL_PERIODS_us[$t_idx]} $var)
local budget=$(load_value ${MTDS_MAX_BUDGETS_us[$t_idx]} $var)
# TENANTS+=("$tenant")
ports+=([$tenant]=$port)
repl_periods+=([$tenant]=$repl_period)
max_budgets+=([$tenant]=$budget)
local t_routes r_expected_execs r_deadlines r_arg_opts_hey r_arg_opts_lt r_args r_loads
IFS=' ' read -r -a t_routes <<< "${ROUTES[$t_idx]}"
IFS=' ' read -r -a r_wasm_paths <<< "${WASM_PATHS[$t_idx]}"
IFS=' ' read -r -a r_expected_execs <<< "${EXPECTED_EXEC_TIMES_us[$t_idx]}"
IFS=' ' read -r -a r_deadlines <<< "${DEADLINES_us[$t_idx]}"
IFS=' ' read -r -a r_resp_content_types <<< "${RESP_CONTENT_TYPES[$t_idx]}"
IFS=' ' read -r -a r_arg_opts_hey <<< "${ARG_OPTS_HEY[$t_idx]}"
IFS=' ' read -r -a r_arg_opts_lt <<< "${ARG_OPTS_LT[$t_idx]}"
IFS=' ' read -r -a r_args <<< "${ARGS[$t_idx]}"
IFS=' ' read -r -a r_loads <<< "${LOADS[$t_idx]}"
for r_idx in "${!t_routes[@]}"; do
local route=${t_routes[$r_idx]}
local wasm_path=${r_wasm_paths[$r_idx]}
local expected=${r_expected_execs[$r_idx]}
local deadline=${r_deadlines[$r_idx]}
local resp_content_type=${r_resp_content_types[$r_idx]}
local arg_opt_hey=${r_arg_opts_hey[$r_idx]}
local arg_opt_lt=${r_arg_opts_lt[$r_idx]}
local arg=${r_args[$r_idx]}
local load=$(load_value ${r_loads[$r_idx]} $var)
local workload="$tenant-$route"
# Divide as float, cast the result to int (Loadtest is okay floats, HEY is not)
local con=$(echo "x = $NWORKERS * $deadline / $expected * $load / 100; x/1" | bc)
local rps=$((1000000 * con / deadline))
# local rps=$(echo "x = 1000000 * $con / $deadline; x/1" | bc)
wasm_paths+=([$workload]=$wasm_path)
expected_execs+=([$workload]=$expected)
deadlines+=([$workload]=$deadline)
resp_content_types+=([$workload]=$resp_content_type)
arg_opts_hey+=([$workload]=$arg_opt_hey)
arg_opts_lt+=([$workload]=$arg_opt_lt)
args+=([$workload]=$arg)
concurrencies+=([$workload]=$con)
rpss+=([$workload]=$rps)
workloads+=("$workload")
workload_tids+=([$workload]=$tenant_id)
workload_deadlines+=([$workload]=$deadline)
workload_vars+=([$workload]=$var)
done
done
done
}

@ -30,7 +30,11 @@ generate_gnuplots() {
shopt -s nullglob shopt -s nullglob
for gnuplot_file in "$experiment_directory"/*.gnuplot; do for gnuplot_file in "$experiment_directory"/*.gnuplot; do
if [ -z "$TENANT_IDS" ]; then
gnuplot "$gnuplot_file" gnuplot "$gnuplot_file"
else
gnuplot -e "tenant_ids='${TENANT_IDS[*]}'" "$gnuplot_file"
fi
done done
cd "$experiment_directory" || exit cd "$experiment_directory" || exit
} }

@ -3,61 +3,54 @@
if [ -n "$__generate_spec_json_sh__" ]; then return; fi if [ -n "$__generate_spec_json_sh__" ]; then return; fi
__generate_spec_json_sh__=$(date) __generate_spec_json_sh__=$(date)
generate_spec_json() { generate_spec_json() {
printf "Generating 'spec.json'\n" printf "Generating 'spec.json'\n"
for tenant in "${TENANTS[@]}"; do for var in "${VARYING[@]}"; do
port=${PORTS[$tenant]} for t_idx in "${!TENANT_IDS[@]}"; do
repl_period=${MTDS_REPLENISH_PERIODS_us[$tenant]} local jq_str
budget=${MTDS_MAX_BUDGETS_us[$tenant]} local tenant=$(printf "%s-%03d" "${TENANT_IDS[$t_idx]}" "$var")
# reservation=${MTDBF_RESERVATIONS_percen[${tenant}]} local port=${ports[$tenant]}
route=${ROUTES[$tenant]} local repl_period=${repl_periods[$tenant]}
workload="$tenant-$route" local budget=${max_budgets[$tenant]}
deadline=${DEADLINES_us[$workload]}
expected=${EXPECTED_EXECUTIONS_us[$workload]}
# Generates unique module specs on different ports using the given 'ru's jq_str=". + {
jq ". + { \
\"name\": \"$tenant\",\ \"name\": \"$tenant\",\
\"port\": $port,\ \"port\": $port,\
\"replenishment-period-us\": $repl_period, \ \"replenishment-period-us\": $repl_period,\
\"max-budget-us\": $budget} | \ \"max-budget-us\": $budget,\
(.routes[] = \ \"routes\": ["
.routes[] + { \
local t_routes
IFS=' ' read -r -a t_routes <<< ${ROUTES[$t_idx]}
for index in "${!t_routes[@]}"; do
local route=${t_routes[$index]}
local workload="$tenant-$route"
local wasm_path=${wasm_paths[$workload]}
local resp_content_type=${resp_content_types[$workload]}
local expected=${expected_execs[$workload]}
local deadline=${deadlines[$workload]}
jq_str+=".routes[] + {\
\"route\": \"/$route\",\ \"route\": \"/$route\",\
\"path\": \"$wasm_path\",\
\"admissions-percentile\": $ESTIMATIONS_PERCENTILE,\ \"admissions-percentile\": $ESTIMATIONS_PERCENTILE,\
\"expected-execution-us\": $expected,\ \"expected-execution-us\": $expected,\
\"relative-deadline-us\": $deadline \"relative-deadline-us\": $deadline,\
}) \ \"http-resp-content-type\": \"$resp_content_type\"}"
" \
< "./template.json" \
> "./result_${tenant}.json"
# \"reservation-percentile\": $reservation, \
done
if [ "$CLIENT_TERMINATE_SERVER" == true ]; then if [ "$index" != $((${#t_routes[@]}-1)) ]; then
jq ". + { \ jq_str+=","
\"name\": \"Admin\",\
\"port\": 55555,\
\"replenishment-period-us\": 0, \
\"max-budget-us\": 0} | \
(.routes = [\
.routes[] + { \
\"route\": \"/main\",\
\"admissions-percentile\": 70,\
\"expected-execution-us\": 1000,\
\"relative-deadline-us\": 10000}, \
.routes[] + { \
\"route\": \"/terminator\",\
\"admissions-percentile\": 70,\
\"expected-execution-us\": 1000,\
\"relative-deadline-us\": 10000 }\
]) \
" \
< "./template.json" \
> "./result_admin.json"
fi fi
done
jq_str+="]}"
jq "$jq_str" < "./template.json" > "./result_${tenant}.json"
done
done
jq_admin_spec
# Merges all of the multiple specs for a single module # Merges all of the multiple specs for a single module
jq -s '. | sort_by(.name)' ./result_*.json > "./spec.json" jq -s '. | sort_by(.name)' ./result_*.json > "./spec.json"

@ -0,0 +1,74 @@
#!/bin/bash
if ! command -v http > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y httpie
else
sudo apt update
sudo apt install -y httpie
fi
fi
if ! command -v hey > /dev/null; then
HEY_URL=https://hey-release.s3.us-east-2.amazonaws.com/hey_linux_amd64
wget $HEY_URL -O hey
chmod +x hey
if [[ $(whoami) == "root" ]]; then
mv hey /usr/bin/hey
else
sudo mv hey /usr/bin/hey
fi
fi
if ! command -v loadtest > /dev/null; then
if ! command -v npm > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y npm
else
sudo apt update
sudo apt install -y npm
fi
fi
# Try pulling Emil's version of loadtest to support post binary files
# if [[ $(whoami) == "root" ]]; then
# npm install -y -g loadtest
# else
# sudo npm install -y -g loadtest
# fi
pushd ~
git clone https://github.com/emil916/loadtest.git
pushd loadtest
if [[ $(whoami) == "root" ]]; then
npm install -g
else
sudo npm install -g
fi
popd
popd
fi
if ! command -v gnuplot > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt-get update
apt-get install -y gnuplot
else
sudo apt-get update
sudo apt-get install -y gnuplot
fi
fi
if ! command -v jq > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y jq
else
sudo apt update
sudo apt install -y jq
fi
fi

@ -0,0 +1,48 @@
#!/bin/bash
if ! command -v gnuplot > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt-get update
apt-get install -y gnuplot
else
sudo apt-get update
sudo apt-get install -y gnuplot
fi
fi
if ! command -v jq > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y jq
else
sudo apt update
sudo apt install -y jq
fi
fi
if ! command -v htop > /dev/null; then
if [[ $(whoami) == "root" ]]; then
apt update
apt install -y htop
else
sudo apt update
sudo apt install -y htop
fi
fi
# For SOD:
# if ! command -v imagemagick > /dev/null; then
# if [ "$(whoami)" == "root" ]; then
# apt-get install -y imagemagick
# else
# sudo apt-get install -y imagemagick
# fi
# fi
# For GOCR, too many to check one-by-one, so uncomment below to install:
# if [[ "$(whoami)" == "root" ]]; then
# apt-get install -y netpbm pango1.0-tools wamerican fonts-roboto fonts-cascadia-code fonts-dejavu
# else
# sudo apt-get install -y netpbm pango1.0-tools wamerican fonts-roboto fonts-cascadia-code fonts-dejavu
# fi

@ -33,11 +33,23 @@ if ! command -v loadtest > /dev/null; then
fi fi
fi fi
# Try pulling Emil's version of loadtest to support post binary files
# if [[ $(whoami) == "root" ]]; then
# npm install -y -g loadtest
# else
# sudo npm install -y -g loadtest
# fi
pushd ~
git clone https://github.com/emil916/loadtest.git
pushd loadtest
if [[ $(whoami) == "root" ]]; then if [[ $(whoami) == "root" ]]; then
npm install -y -g loadtest npm install -g
else else
sudo npm install -y -g loadtest sudo npm install -g
fi fi
popd
popd
fi fi
if ! command -v gnuplot > /dev/null; then if ! command -v gnuplot > /dev/null; then
@ -61,18 +73,12 @@ if ! command -v jq > /dev/null; then
fi fi
fi fi
# For SOD: if ! command -v htop > /dev/null; then
if ! command -v imagemagick > /dev/null; then if [[ $(whoami) == "root" ]]; then
if [ "$(whoami)" == "root" ]; then apt update
apt-get install -y imagemagick apt install -y htop
else else
sudo apt-get install -y imagemagick sudo apt update
sudo apt install -y htop
fi fi
fi fi
# For GOCR, too many to check one-by-one, so uncomment below to install:
# if [[ "$(whoami)" == "root" ]]; then
# apt-get install -y netpbm pango1.0-tools wamerican fonts-roboto fonts-cascadia-code fonts-dejavu
# else
# sudo apt-get install -y netpbm pango1.0-tools wamerican fonts-roboto fonts-cascadia-code fonts-dejavu
# fi

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
# shellcheck disable=SC1091,SC2034 # shellcheck disable=SC1091,SC2034,SC2153,SC2154
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success rate # This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success rate
# Success - The percentage of requests that complete by their deadlines # Success - The percentage of requests that complete by their deadlines
@ -8,9 +8,10 @@
# Latency - the rount-trip resonse time (us) of successful requests at the p50, p90, p99, and p100 percentiles # Latency - the rount-trip resonse time (us) of successful requests at the p50, p90, p99, and p100 percentiles
# Add bash_libraries directory to path # Add bash_libraries directory to path
__run_sh__base_path="$(dirname "$(realpath --logical "${BASH_SOURCE[0]}")")" __run_sh__base_path="$(dirname "$(realpath --logical "$0")")"
__run_sh__bash_libraries_relative_path="../bash_libraries" # __run_sh__base_path="$(dirname "$(realpath --logical "${BASH_SOURCE[0]}")")"
__run_sh__bash_libraries_absolute_path=$(cd "$__run_sh__base_path" && cd "$__run_sh__bash_libraries_relative_path" && pwd) # __run_sh__bash_libraries_relative_path="../bash_libraries"
__run_sh__bash_libraries_absolute_path="$(dirname "$(realpath --logical "${BASH_SOURCE[0]}")")"
export PATH="$__run_sh__bash_libraries_absolute_path:$PATH" export PATH="$__run_sh__bash_libraries_absolute_path:$PATH"
source csv_to_dat.sh || exit 1 source csv_to_dat.sh || exit 1
@ -21,66 +22,9 @@ source panic.sh || exit 1
source path_join.sh || exit 1 source path_join.sh || exit 1
source percentiles_table.sh || exit 1 source percentiles_table.sh || exit 1
source experiment_globals.sh || exit 1 source experiment_globals.sh || exit 1
source generate_spec_json.sh || exit 1
validate_dependencies hey loadtest jq validate_dependencies hey loadtest gnuplot jq
# The global configs for the scripts
declare -r CLIENT_TERMINATE_SERVER=false
declare -r ITERATIONS=10000
declare -r DURATION_sec=1
declare -r ESTIMATIONS_PERCENTILE=70
declare -r TENANT_NG="NNN"
declare -r TENANT_GR="GWU"
declare -ar TENANTS=("$TENANT_NG" "$TENANT_GR")
declare -Ar ARGS=(
[$TENANT_NG]="30"
[$TENANT_GR]="30"
)
# Make sure not to use 55555 (Reserved for Admin)
declare -Ar PORTS=(
[$TENANT_NG]=10030
[$TENANT_GR]=20030
)
# No need for slashes
declare -Ar ROUTES=(
[$TENANT_NG]="fib"
[$TENANT_GR]="fib"
)
declare -Ar MTDS_REPLENISH_PERIODS_us=(
[$TENANT_NG]=0
[$TENANT_GR]=16000
)
declare -Ar MTDS_MAX_BUDGETS_us=(
[$TENANT_NG]=0
[$TENANT_GR]=144000
)
declare -Ar MTDBF_RESERVATIONS_percen=(
[$TENANT_NG]=0
[$TENANT_GR]=0
)
declare -ar WORKLOADS=("${TENANT_NG}-${ROUTES[$TENANT_NG]}" "${TENANT_GR}-${ROUTES[$TENANT_GR]}")
declare -Ar EXPECTED_EXECUTIONS_us=(
[${WORKLOADS[0]}]=4000
[${WORKLOADS[1]}]=4000
)
declare -Ar DEADLINES_us=(
[${WORKLOADS[0]}]=16000
[${WORKLOADS[1]}]=16000
)
# Generate the spec.json file from the given arguments above
. "$__run_sh__bash_libraries_absolute_path/generate_spec_json.sh"
# Execute the experiments concurrently # Execute the experiments concurrently
run_experiments() { run_experiments() {
@ -90,81 +34,68 @@ run_experiments() {
local -r results_directory="$2" local -r results_directory="$2"
local -r loadgen="$3" local -r loadgen="$3"
# The duration in seconds that the low priority task should run before the high priority task starts
local -ir OFFSET=1
printf "Running Experiments with %s\n" "$loadgen" printf "Running Experiments with %s\n" "$loadgen"
# Run concurrently for var in "${VARYING[@]}"; do
local app_gr_PID for t_idx in "${!TENANT_IDS[@]}"; do
local app_ng_PID local tenant_id=${TENANT_IDS[$t_idx]}
local tenant=$(printf "%s-%03d" "$tenant_id" "$var")
local port=${ports[$tenant]}
local -r port_ng=${PORTS[$TENANT_NG]} local t_routes
local -r port_gr=${PORTS[$TENANT_GR]} IFS=' ' read -r -a t_routes <<< "${ROUTES[$t_idx]}"
local -r route_ng=${ROUTES[$TENANT_NG]} for index in "${!t_routes[@]}"; do
local -r route_gr=${ROUTES[$TENANT_GR]} local route=${t_routes[$index]}
local workload="$tenant-$route"
local expected=${expected_execs[$workload]}
local deadline=${deadlines[$workload]}
local arg=${args[$workload]}
local con=${concurrencies[$workload]}
local rps=${rpss[$workload]}
local -r workload_ng=${WORKLOADS[0]} echo "CON for $workload" : "$con"
local -r workload_gr=${WORKLOADS[1]} echo "RPS for $workload" : "$rps"
local -r deadline_ng=${DEADLINES_us[$workload_ng]} local pid
local -r deadline_gr=${DEADLINES_us[$workload_gr]} local -a pids # Run concurrently
local -A pid_workloads # Run concurrently
local -r con_ng=$((NWORKERS*10))
local -r con_gr=$((NWORKERS*2))
local -r rps_ng=$((1000000*con_ng/deadline_ng))
local -r rps_gr=$((1000000*con_gr/deadline_gr))
local -r arg_ng=${ARGS[$TENANT_NG]}
local -r arg_gr=${ARGS[$TENANT_GR]}
if [ "$loadgen" = "hey" ]; then if [ "$loadgen" = "hey" ]; then
hey -disable-compression -disable-keepalive -disable-redirects -z $((DURATION_sec+OFFSET))s -n "$ITERATIONS" -c $con_ng -t 0 -o csv -m POST -d "$arg_ng" "http://${hostname}:$port_ng/$route_ng" > "$results_directory/$workload_ng.csv" 2> "$results_directory/$workload_ng-err.dat" & local arg_opt_hey=${arg_opts_hey[$workload]}
app_ng_PID="$!" hey $HEY_OPTS -z "$DURATION_sec"s -c "$con" -t 0 -o csv -m POST "$arg_opt_hey" "$arg" "http://${hostname}:$port/$route" > "$results_directory/$workload.csv" 2> "$results_directory/$workload-err.dat" &
sleep "$OFFSET"s
hey -disable-compression -disable-keepalive -disable-redirects -z "$DURATION_sec"s -n "$ITERATIONS" -c $con_gr -t 0 -o csv -m POST -d "$arg_gr" "http://${hostname}:$port_gr/$route_gr" > "$results_directory/$workload_gr.csv" 2> "$results_directory/$workload_gr-err.dat" &
app_gr_PID="$!"
elif [ "$loadgen" = "loadtest" ]; then elif [ "$loadgen" = "loadtest" ]; then
loadtest -t $((DURATION_sec+OFFSET)) -c $con_ng --rps $rps_ng -P "$arg_ng" "http://${hostname}:$port_ng/$route_ng" > "$results_directory/$workload_ng.dat" 2> "$results_directory/$workload_ng-err.dat" & local arg_opt_lt=${arg_opts_lt[$workload]}
app_ng_PID="$!" loadtest -t "$DURATION_sec" -c "$con" --rps "$rps" "$arg_opt_lt" "$arg" "http://${hostname}:${port}/$route" > "$results_directory/$workload.dat" 2> "$results_directory/$workload-err.dat" &
sleep "$OFFSET"s
loadtest -t "$DURATION_sec" -c $con_gr --rps $rps_gr -P "$arg_gr" "http://${hostname}:$port_gr/$route_gr" > "$results_directory/$workload_gr.dat" 2> "$results_directory/$workload_gr-err.dat" &
app_gr_PID="$!"
fi fi
pid="$!"
pids+=("$pid")
pid_workloads+=([$pid]=$workload)
done
done
wait -f "$app_gr_PID" || { for ((i=${#pids[@]}-1; i>=0; i--)); do
printf "\t%s: [ERR]\n" "$workload_gr" local pid=${pids[$i]}
panic "failed to wait -f ${app_gr_PID}" local pid_workload=${pid_workloads[$pid]}
wait -f "$pid" || {
printf "\t%s: [ERR]\n" "$pid_workload"
panic "failed to wait -f $pid"
return 1 return 1
} }
[ "$loadgen" = "hey" ] && (get_result_count "$results_directory/$workload_gr.csv" || { [ "$loadgen" = "hey" ] && (get_result_count "$results_directory/$pid_workload.csv" || {
printf "\t%s: [ERR]\n" "$workload_gr" printf "\t%s: [ERR]\n" "$pid_workload"
panic "$workload_gr has zero requests." panic "$pid_workload has zero requests."
return 1 return 1
}) })
printf "\t%s: [OK]\n" "$workload_gr" printf "\t%s: [OK]\n" "$pid_workload"
done
wait -f "$app_ng_PID" || { unset pids pid_workloads
printf "\t%s: [ERR]\n" "$workload_ng" done
panic "failed to wait -f ${app_ng_PID}"
return 1
}
[ "$loadgen" = "hey" ] && (get_result_count "$results_directory/$workload_ng.csv" || {
printf "\t%s: [ERR]\n" "$workload_ng"
panic "$workload_ng has zero requests."
return 1
})
printf "\t%s: [OK]\n" "$workload_ng"
if [ "$CLIENT_TERMINATE_SERVER" == true ]; then if [ "$CLIENT_TERMINATE_SERVER" == true ]; then
printf "Sent a Terminator to the server\n" printf "Sent a Terminator to the server\n"
echo "55" | http "$hostname":55555/terminator &> /dev/null echo "5" | http "$hostname":55555/terminator &> /dev/null
fi fi
return 0 return 0
@ -179,18 +110,25 @@ process_client_results_hey() {
printf "Processing HEY Results: " printf "Processing HEY Results: "
# Write headers to CSVs # Write headers to CSVs
printf "Workload,Scs%%,TOTAL,ClientScs,All200,AllFail,Deny,MisDL,Shed,MiscErr\n" >> "$results_directory/success.csv" for t_id in "${TENANT_IDS[@]}"; do
printf "Workload,Throughput\n" >> "$results_directory/throughput.csv" printf "Workload,Scs%%,TOTAL,ClientScs,All200,AllFail,Deny,MisDL,Shed,MiscErr\n" >> "$results_directory/success_$t_id.csv"
percentiles_table_header "$results_directory/latency.csv" "Workload" printf "Workload,Throughput\n" >> "$results_directory/throughput_$t_id.csv"
# percentiles_table_header "$results_directory/latency-200.csv" "Workload" percentiles_table_header "$results_directory/latency_$t_id.csv" "Workload"
done
for workload in "${WORKLOADS[@]}"; do
local -i deadline=${DEADLINES_us[$workload]} for workload in "${workloads[@]}"; do
local t_id=${workload_tids[$workload]}
local deadline=${workload_deadlines[$workload]}
local var=${workload_vars[$workload]}
# Some requests come back with an "Unsolicited response ..." See issue #185 # Some requests come back with an "Unsolicited response ..." See issue #185
misc_err=$(wc -l < "$results_directory/$workload-err.dat") misc_err=$(wc -l < "$results_directory/$workload-err.dat")
if [ ! -s "$results_directory/$workload-err.dat" ]; then
# The error file is empty. So remove it.
rm "$results_directory/$workload-err.dat"
fi
# Calculate Success Rate for csv (percent of requests that return 200 within deadline) # Calculate Success Rate for csv (percent of requests that return 200 within deadline)
awk -v misc_err="$misc_err" -F, ' awk -v misc_err="$misc_err" -F, '
$7 == 200 && ($1 * 1000000) <= '"$deadline"' {ok++} $7 == 200 && ($1 * 1000000) <= '"$deadline"' {ok++}
@ -199,8 +137,8 @@ process_client_results_hey() {
$7 == 429 {denied++} $7 == 429 {denied++}
$7 == 408 {missed_dl++} $7 == 408 {missed_dl++}
$7 == 409 {killed++} $7 == 409 {killed++}
END{printf "'"$workload"',%3.1f,%d,%d,%d,%d,%d,%d,%d,%d\n", (ok/(NR-1+misc_err)*100), (NR-1+misc_err), ok, all200, (total_failed-1+misc_err), denied, missed_dl, killed, misc_err} END{printf "'"$var"',%3.1f,%d,%d,%d,%d,%d,%d,%d,%d\n", (all200*100/(NR-1+misc_err)), (NR-1+misc_err), ok, all200, (total_failed-1+misc_err), denied, missed_dl, killed, misc_err}
' < "$results_directory/$workload.csv" >> "$results_directory/success.csv" ' < "$results_directory/$workload.csv" >> "$results_directory/success_$t_id.csv"
# Convert from s to us, and sort # Convert from s to us, and sort
awk -F, 'NR > 1 {print ($1 * 1000000)}' < "$results_directory/$workload.csv" \ awk -F, 'NR > 1 {print ($1 * 1000000)}' < "$results_directory/$workload.csv" \
@ -224,18 +162,25 @@ process_client_results_hey() {
# Throughput is calculated as the mean number of successful requests per second # Throughput is calculated as the mean number of successful requests per second
throughput=$(echo "$oks/$duration" | bc) throughput=$(echo "$oks/$duration" | bc)
printf "%s,%d\n" "$workload" "$throughput" >> "$results_directory/throughput.csv" printf "%s,%d\n" "$var" "$throughput" >> "$results_directory/throughput_$t_id.csv"
# Generate Latency Data for csv # Generate Latency Data for csv
percentiles_table_row "$results_directory/$workload-response.csv" "$results_directory/latency.csv" "$workload" percentiles_table_row "$results_directory/$workload-response.csv" "$results_directory/latency_$t_id.csv" "$var"
# percentiles_table_row "$results_directory/$workload-response-200.csv" "$results_directory/latency-200.csv" "$workload"
# Delete scratch file used for sorting/counting # Delete scratch file used for sorting/counting
rm "$results_directory/$workload-response.csv" "$results_directory/$workload-response-200.csv" rm "$results_directory/$workload-response.csv" "$results_directory/$workload-response-200.csv"
done done
for t_id in "${TENANT_IDS[@]}"; do
# Transform csvs to dat files for gnuplot # Transform csvs to dat files for gnuplot
csv_to_dat "$results_directory/success.csv" "$results_directory/throughput.csv" "$results_directory/latency.csv" csv_to_dat "$results_directory/success_$t_id.csv" "$results_directory/throughput_$t_id.csv" "$results_directory/latency_$t_id.csv"
rm "$results_directory/success.csv" "$results_directory/throughput.csv" "$results_directory/latency.csv" rm "$results_directory/success_$t_id.csv" "$results_directory/throughput_$t_id.csv" "$results_directory/latency_$t_id.csv"
done
# Generate gnuplots
generate_gnuplots "$results_directory" "$__run_sh__base_path" || {
printf "[ERR]\n"
panic "failed to generate gnuplots"
}
printf "[OK]\n" printf "[OK]\n"
return 0 return 0
@ -250,11 +195,20 @@ process_client_results_loadtest() {
printf "Processing Loadtest Results: " printf "Processing Loadtest Results: "
# Write headers to CSVs # Write headers to CSVs
printf "Workload,Scs%%,TOTAL,All200,AllFail,Deny,MisDL,Shed,MiscErr\n" >> "$results_directory/success.csv" for t_id in "${TENANT_IDS[@]}"; do
printf "Workload,Throughput\n" >> "$results_directory/throughput.csv" printf "Workload,Scs%%,TOTAL,All200,AllFail,Deny,MisDL,Shed,MiscErr\n" >> "$results_directory/success_$t_id.csv"
percentiles_table_header "$results_directory/latency.csv" "Workload" printf "Workload,Throughput\n" >> "$results_directory/throughput_$t_id.csv"
percentiles_table_header "$results_directory/latency_$t_id.csv" "Workload"
done
for workload in "${WORKLOADS[@]}"; do for workload in "${workloads[@]}"; do
local t_id=${workload_tids[$workload]}
local var=${workload_vars[$workload]}
if [ ! -s "$results_directory/$workload-err.dat" ]; then
# The error file is empty. So remove it.
rm "$results_directory/$workload-err.dat"
fi
if [[ ! -f "$results_directory/$workload.dat" ]]; then if [[ ! -f "$results_directory/$workload.dat" ]]; then
printf "[ERR]\n" printf "[ERR]\n"
@ -263,40 +217,47 @@ process_client_results_loadtest() {
fi fi
# Get Number of 200s and then calculate Success Rate (percent of requests that return 200) # Get Number of 200s and then calculate Success Rate (percent of requests that return 200)
# If using loadtest -n option (not -t), then use all200/iterations instead of all200/total.
total=$(grep "Completed requests:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13) total=$(grep "Completed requests:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13)
total_failed=$(grep "Total errors:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13) total_failed=$(grep "Total errors:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13)
denied=$(grep "429:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12) denied=$(grep "429:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)
missed_dl=$(grep "408:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12) missed_dl=$(grep "408:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)
killed=$(grep "409:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12) killed=$(grep "409:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)
misc_err=$(grep "\-1:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12) misc_err=$(grep "\-1:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)
all200=$((total-total_failed)) all200=$((total - total_failed))
# ((all200 == 0)) && continue # If all errors, skip line # ((all200 == 0)) && continue # If all errors, skip line
success_rate=$(echo "scale=2; $all200/$total*100"|bc) success_rate=$(echo "scale=2; $all200*100/$total" | bc)
printf "%s,%3.1f,%d,%d,%d,%d,%d,%d,%d\n" "$workload" "$success_rate" "$total" "$all200" "$total_failed" "$denied" "$missed_dl" "$killed" "$misc_err" >> "$results_directory/success.csv" printf "%s,%3.1f,%d,%d,%d,%d,%d,%d,%d\n" "$var" "$success_rate" "$total" "$all200" "$total_failed" "$denied" "$missed_dl" "$killed" "$misc_err" >> "$results_directory/success_$t_id.csv"
# Throughput is calculated as the mean number of successful requests per second # Throughput is calculated as the mean number of successful requests per second
duration=$(grep "Total time:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13) duration=$(grep "Total time:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13)
printf -v duration %.0f "$duration" printf -v duration %.0f "$duration"
# throughput=$(grep "Requests per second" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 14 | tail -n 1) # throughput of ALL # throughput=$(grep "Requests per second" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 14 | tail -n 1) # throughput of ALL
throughput=$(echo "$all200/$duration" | bc) throughput=$(echo "$all200/$duration" | bc)
printf "%s,%d\n" "$workload" "$throughput" >> "$results_directory/throughput.csv" printf "%s,%d\n" "$var" "$throughput" >> "$results_directory/throughput_$t_id.csv"
# Generate Latency Data # Generate Latency Data
min=0 # not provided by loadtest min=$(echo "$(grep "Minimum latency:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13)*1000" | bc)
p50=$(echo "$(grep 50% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)*1000" |bc) p50=$(echo "$(grep 50% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)*1000" | bc)
p90=$(echo "$(grep 90% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)*1000" |bc) p90=$(echo "$(grep 90% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)*1000" | bc)
p99=$(echo "$(grep 99% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)*1000" |bc) p99=$(echo "$(grep 99% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)*1000" | bc)
p100=$(echo "$(grep 100% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12 | tail -n 1)*1000" |bc) p100=$(echo "$(grep 100% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12 | tail -n 1)*1000" | bc)
mean=$(echo "scale=1;$(grep "Mean latency:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13)*1000" |bc) mean=$(echo "scale=1;$(grep "Mean latency:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13)*1000" | bc)
printf "%s,%d,%d,%.1f,%d,%d,%d,%d\n" "$workload" "$total" "$min" "$mean" "$p50" "$p90" "$p99" "$p100" >> "$results_directory/latency.csv" printf "%s,%d,%d,%.1f,%d,%d,%d,%d\n" "$var" "$total" "$min" "$mean" "$p50" "$p90" "$p99" "$p100" >> "$results_directory/latency_$t_id.csv"
done done
for t_id in "${TENANT_IDS[@]}"; do
# Transform csvs to dat files for gnuplot # Transform csvs to dat files for gnuplot
csv_to_dat "$results_directory/success.csv" "$results_directory/throughput.csv" "$results_directory/latency.csv" csv_to_dat "$results_directory/success_$t_id.csv" "$results_directory/throughput_$t_id.csv" "$results_directory/latency_$t_id.csv"
rm "$results_directory/success.csv" "$results_directory/throughput.csv" "$results_directory/latency.csv" rm "$results_directory/success_$t_id.csv" "$results_directory/throughput_$t_id.csv" "$results_directory/latency_$t_id.csv"
done
# Generate gnuplots
generate_gnuplots "$results_directory" "$__run_sh__base_path" || {
printf "[ERR]\n"
panic "failed to generate gnuplots"
}
printf "[OK]\n" printf "[OK]\n"
return 0 return 0
@ -309,30 +270,34 @@ process_server_results() {
printf "Processing Server Results: \n" printf "Processing Server Results: \n"
num_of_lines=$(wc -l < "$results_directory/$SERVER_LOG_FILE") local -r num_of_lines=$(wc -l < "$results_directory/$SERVER_LOG_FILE")
if [ "$num_of_lines" == 1 ]; then if [ "$num_of_lines" == 1 ]; then
printf "No results to process! Exiting the script." printf "No results to process! Exiting the script."
return 1 return 1
fi fi
# Write headers to CSVs # Write headers to CSVs
printf "Workload,Scs%%,TOTAL,SrvScs,All200,AllFail,DenyAny,DenyG,MisDL_Glb,MisDL_Loc,Shed_Glb,Shed_Loc,QueFull\n" >> "$results_directory/success.csv" for t_id in "${TENANT_IDS[@]}"; do
printf "Workload,Throughput\n" >> "$results_directory/throughput.csv" printf "Workload,Scs%%,TOTAL,SrvScs,All200,AllFail,DenyBE,DenyG,xDenyBE,xDenyG,MisD_Glb,MisD_Loc,MisD_WB,Shed_Glb,Shed_Loc,Shed_WB,Misc\n" >> "$results_directory/success_$t_id.csv"
percentiles_table_header "$results_directory/latency.csv" "Workload" printf "Workload,Throughput\n" >> "$results_directory/throughput_$t_id.csv"
percentiles_table_header "$results_directory/latency_$t_id.csv" "Workload"
# Write headers to CSVs # Write headers to CSVs
for metric in "${SANDBOX_METRICS[@]}"; do for metric in "${SANDBOX_METRICS[@]}"; do
percentiles_table_header "$results_directory/$metric.csv" "Workload" percentiles_table_header "$results_directory/${metric}_$t_id.csv" "Workload"
done
percentiles_table_header "$results_directory/running_user_200_$t_id.csv" "Workload"
# percentiles_table_header "$results_directory/running_user_nonzero_$t_id.csv" "Workload"
percentiles_table_header "$results_directory/total_200_$t_id.csv" "Workload"
# percentiles_table_header "$results_directory/memalloc_$t_id.csv" "Workload"
done done
percentiles_table_header "$results_directory/running_user_200.csv" "Workload"
percentiles_table_header "$results_directory/running_user_nonzero.csv" "Workload"
percentiles_table_header "$results_directory/total_200.csv" "Workload"
# percentiles_table_header "$results_directory/memalloc.csv" "module"
for workload in "${WORKLOADS[@]}"; do for workload in "${workloads[@]}"; do
mkdir "$results_directory/$workload" mkdir -p "$results_directory/$workload"
local -i deadline=${DEADLINES_us[$workload]} local t_id=${workload_tids[$workload]}
local deadline=${workload_deadlines[$workload]}
local var=${workload_vars[$workload]}
for metric in "${SANDBOX_METRICS[@]}"; do for metric in "${SANDBOX_METRICS[@]}"; do
awk -F, ' awk -F, '
@ -340,24 +305,23 @@ process_server_results() {
workload == "'"$workload"'" {printf("%d,%d\n", $'"${SANDBOX_METRICS_FIELDS[$metric]}"' / $'"$SANDBOX_CPU_FREQ_FIELD"', $'"$SANDBOX_RESPONSE_CODE_FIELD"')} workload == "'"$workload"'" {printf("%d,%d\n", $'"${SANDBOX_METRICS_FIELDS[$metric]}"' / $'"$SANDBOX_CPU_FREQ_FIELD"', $'"$SANDBOX_RESPONSE_CODE_FIELD"')}
' < "$results_directory/$SERVER_LOG_FILE" | sort -g > "$results_directory/$workload/${metric}_sorted.csv" ' < "$results_directory/$SERVER_LOG_FILE" | sort -g > "$results_directory/$workload/${metric}_sorted.csv"
percentiles_table_row "$results_directory/$workload/${metric}_sorted.csv" "$results_directory/${metric}.csv" "$workload" percentiles_table_row "$results_directory/$workload/${metric}_sorted.csv" "$results_directory/${metric}_$t_id.csv" "$workload"
# Delete scratch file used for sorting/counting # Delete scratch file used for sorting/counting
# rm "$results_directory/$workload/${metric}_sorted.csv" # rm "$results_directory/$workload/${metric}_sorted.csv"
done done
awk -F, '$2 == 200 {printf("%d,%d\n", $1, $2)}' < "$results_directory/$workload/running_user_sorted.csv" > "$results_directory/$workload/running_user_200_sorted.csv" awk -F, '$2 == 200 {printf("%d,%d\n", $1, $2)}' < "$results_directory/$workload/running_user_sorted.csv" > "$results_directory/$workload/running_user_200_sorted.csv"
percentiles_table_row "$results_directory/$workload/running_user_200_sorted.csv" "$results_directory/running_user_200.csv" "$workload" percentiles_table_row "$results_directory/$workload/running_user_200_sorted.csv" "$results_directory/running_user_200_$t_id.csv" "$workload"
awk -F, '$1 > 0 {printf("%d,%d\n", $1, $2)}' < "$results_directory/$workload/running_user_sorted.csv" > "$results_directory/$workload/running_user_nonzero_sorted.csv" # awk -F, '$1 > 0 {printf("%d,%d\n", $1, $2)}' < "$results_directory/$workload/running_user_sorted.csv" > "$results_directory/$workload/running_user_nonzero_sorted.csv"
percentiles_table_row "$results_directory/$workload/running_user_nonzero_sorted.csv" "$results_directory/running_user_nonzero.csv" "$workload" # percentiles_table_row "$results_directory/$workload/running_user_nonzero_sorted.csv" "$results_directory/running_user_nonzero_$t_id.csv" "$workload"
awk -F, '$2 == 200 {printf("%d,%d\n", $1, $2)}' < "$results_directory/$workload/total_sorted.csv" > "$results_directory/$workload/total_200_sorted.csv" awk -F, '$2 == 200 {printf("%d,%d\n", $1, $2)}' < "$results_directory/$workload/total_sorted.csv" > "$results_directory/$workload/total_200_sorted.csv"
percentiles_table_row "$results_directory/$workload/total_200_sorted.csv" "$results_directory/total_200.csv" "$workload" percentiles_table_row "$results_directory/$workload/total_200_sorted.csv" "$results_directory/total_200_$t_id.csv" "$workload"
# Memory Allocation # Memory Allocation
# awk -F, '$2 == "'"$workload"'" {printf("%.0f\n", $MEMORY_FIELD)}' < "$results_directory/$SERVER_LOG_FILE" | sort -g > "$results_directory/$workload/memalloc_sorted.csv" # awk -F, '$2 == "'"$workload"'" {printf("%.0f\n", $MEMORY_FIELD)}' < "$results_directory/$SERVER_LOG_FILE" | sort -g > "$results_directory/$workload/memalloc_sorted.csv"
# percentiles_table_row "$results_directory/$workload/memalloc_sorted.csv" "$results_directory/memalloc.csv" "$workload" "%1.0f" # percentiles_table_row "$results_directory/$workload/memalloc_sorted.csv" "$results_directory/memalloc_$t_id.csv.csv" "$workload" "%1.0f"
# Calculate Success Rate for csv (percent of requests that complete), $1 and deadline are both in us, so not converting # Calculate Success Rate for csv (percent of requests that complete), $1 and deadline are both in us, so not converting
awk -F, ' awk -F, '
@ -366,40 +330,52 @@ process_server_results() {
$2 != 200 {total_failed++} $2 != 200 {total_failed++}
$2 == 4290 {denied_any++} $2 == 4290 {denied_any++}
$2 == 4291 {denied_gtd++} $2 == 4291 {denied_gtd++}
$2 == 4295 {x_denied_any++}
$2 == 4296 {x_denied_gtd++}
$2 == 4080 {mis_dl_glob++} $2 == 4080 {mis_dl_glob++}
$2 == 4081 {mis_dl_local++} $2 == 4081 {mis_dl_local++}
$2 == 4082 {mis_dl_wb++}
$2 == 4090 {shed_glob++} $2 == 4090 {shed_glob++}
$2 == 4091 {shed_local++} $2 == 4091 {shed_local++}
$2 == 999 {global_full++} $2 == 4092 {shed_wb++}
END{printf "'"$workload"',%3.1f,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", (ok/NR*100), NR, ok, all200, total_failed, denied_any, denied_gtd, mis_dl_glob, mis_dl_local, shed_glob, shed_local, global_full} $2 == 4093 {misc++}
' < "$results_directory/$workload/total_sorted.csv" >> "$results_directory/success.csv" END{printf "'"$var"',%3.1f,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", (all200*100/NR), NR, ok, all200, total_failed, denied_any, denied_gtd, x_denied_any, x_denied_gtd, mis_dl_glob, mis_dl_local, mis_dl_wb, shed_glob, shed_local, shed_wb, misc}
' < "$results_directory/$workload/total_sorted.csv" >> "$results_directory/success_$t_id.csv"
# Throughput is calculated on the client side, so ignore the below line # Throughput is calculated on the client side, so ignore the below line
printf "%s,%d\n" "$workload" "1" >> "$results_directory/throughput.csv" printf "%s,%d\n" "$var" "1" >> "$results_directory/throughput_$t_id.csv"
# Generate Latency Data for csv # Generate Latency Data for csv
percentiles_table_row "$results_directory/$workload/total_sorted.csv" "$results_directory/latency.csv" "$workload" percentiles_table_row "$results_directory/$workload/total_sorted.csv" "$results_directory/latency_$t_id.csv" "$var"
# Delete scratch file used for sorting/counting # Delete scratch file used for sorting/counting
# rm "$results_directory/$workload/memalloc_sorted.csv" # rm "$results_directory/$workload/memalloc_sorted.csv"
# Delete directory # Delete directory
# rm -rf "${results_directory:?}/${workload:?}" # rm -rf "${results_directory:?}/${workload:?}"
done
for t_id in "${TENANT_IDS[@]}"; do
for metric in "${SANDBOX_METRICS[@]}"; do
csv_to_dat "$results_directory/${metric}_$t_id.csv"
rm "$results_directory/${metric}_$t_id.csv"
done done
csv_to_dat "$results_directory/running_user_200_$t_id.csv" "$results_directory/total_200_$t_id.csv" # "$results_directory/running_user_nonzero_$t_id.csv"
rm "$results_directory/running_user_200_$t_id.csv" "$results_directory/total_200_$t_id.csv" # "$results_directory/running_user_nonzero_$t_id.csv"
# Transform csvs to dat files for gnuplot # Transform csvs to dat files for gnuplot
for metric in "${SANDBOX_METRICS[@]}"; do # csv_to_dat "$results_directory/memalloc$t_id.csv"
csv_to_dat "$results_directory/$metric.csv" csv_to_dat "$results_directory/success_$t_id.csv" "$results_directory/throughput_$t_id.csv" "$results_directory/latency_$t_id.csv"
rm "$results_directory/$metric.csv" # rm "$results_directory/memalloc$t_id.csv"
rm "$results_directory/success_$t_id.csv" "$results_directory/throughput_$t_id.csv" "$results_directory/latency_$t_id.csv"
done done
csv_to_dat "$results_directory/running_user_200.csv" "$results_directory/running_user_nonzero.csv" "$results_directory/total_200.csv"
rm "$results_directory/running_user_200.csv" "$results_directory/running_user_nonzero.csv" "$results_directory/total_200.csv"
# csv_to_dat "$results_directory/memalloc.csv" # Generate gnuplots
csv_to_dat "$results_directory/success.csv" "$results_directory/throughput.csv" "$results_directory/latency.csv" generate_gnuplots "$results_directory" "$__run_sh__base_path" || {
# rm "$results_directory/memalloc.csv" printf "[ERR]\n"
rm "$results_directory/success.csv" "$results_directory/throughput.csv" "$results_directory/latency.csv" panic "failed to generate gnuplots"
}
printf "[OK]\n" printf "[OK]\n"
return 0 return 0
@ -412,7 +388,7 @@ process_server_http_results() {
printf "Processing Server HTTP Results: \n" printf "Processing Server HTTP Results: \n"
num_of_lines=$(wc -l < "$results_directory/$SERVER_HTTP_LOG_FILE") local -r num_of_lines=$(wc -l < "$results_directory/$SERVER_HTTP_LOG_FILE")
if [ "$num_of_lines" == 1 ]; then if [ "$num_of_lines" == 1 ]; then
printf "No results to process! Exiting the script." printf "No results to process! Exiting the script."
return 1 return 1
@ -422,8 +398,9 @@ process_server_http_results() {
percentiles_table_header "$results_directory/$metric.csv" "workload" percentiles_table_header "$results_directory/$metric.csv" "workload"
done done
for workload in "${WORKLOADS[@]}"; do for workload in "${workloads[@]}"; do
mkdir -p "$results_directory/$workload" mkdir -p "$results_directory/$workload"
local var=${workload_vars[$workload]}
for metric in "${HTTP_METRICS[@]}"; do for metric in "${HTTP_METRICS[@]}"; do
awk -F, ' awk -F, '
@ -431,7 +408,7 @@ process_server_http_results() {
workload == "'"$workload"'" {printf("%.1f\n", $'"${HTTP_METRICS_FIELDS[$metric]}"' / $'"$HTTP_CPU_FREQ_FIELD"')} workload == "'"$workload"'" {printf("%.1f\n", $'"${HTTP_METRICS_FIELDS[$metric]}"' / $'"$HTTP_CPU_FREQ_FIELD"')}
' < "$results_directory/$SERVER_HTTP_LOG_FILE" | sort -g > "$results_directory/$workload/${metric}_sorted.csv" ' < "$results_directory/$SERVER_HTTP_LOG_FILE" | sort -g > "$results_directory/$workload/${metric}_sorted.csv"
percentiles_table_row "$results_directory/$workload/${metric}_sorted.csv" "$results_directory/${metric}.csv" "$workload" percentiles_table_row "$results_directory/$workload/${metric}_sorted.csv" "$results_directory/${metric}.csv" "$var"
# Delete directory # Delete directory
# rm -rf "${results_directory:?}/${workload:?}" # rm -rf "${results_directory:?}/${workload:?}"
@ -499,6 +476,3 @@ experiment_client() {
return 0 return 0
} }
generate_spec_json
framework_init "$@"

@ -1 +0,0 @@
out.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 606 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.9 MiB

@ -1,30 +0,0 @@
SLEDGE_BINARY_DIR=../../runtime/bin
HOSTNAME=localhost
PORT=10000
default: run
clean:
rm -rf res/*
run:
SLEDGE_SPINLOOP_PAUSE_ENABLED=true SLEDGE_SANDBOX_PERF_LOG=perf.log LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} ${SLEDGE_BINARY_DIR}/sledgert spec.json
debug:
SLEDGE_SPINLOOP_PAUSE_ENABLED=true SLEDGE_NWORKERS=1 LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} gdb ${SLEDGE_BINARY_DIR}/sledgert --eval-command="run spec.json"
valgrind:
SLEDGE_DISABLE_PREEMPTION=true SLEDGE_NWORKERS=1 LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} valgrind --leak-check=full --max-stackframe=11150456 --run-libc-freeres=no --run-cxx-freeres=no ${SLEDGE_BINARY_DIR}/sledgert spec.json
.PHONY: client
client:
cat ./0_depth.png | http "${HOSTNAME}:${PORT}/depth_to_xyz" > ./out.png
client-cloudlab:
cat ./0_depth.png | http "c220g2-011017.wisc.cloudlab.us:${PORT}/depth_to_xyz" > ./out.png
client-cmu:
cat ./0_depth.png | http "arena0.andrew.cmu.edu:${PORT}/depth_to_xyz" > ./out.png
multi:
hey -n 180 -c 180 -t 0 -o csv -m POST -D "./0_depth.png" "http://${HOSTNAME}:${PORT}/depth_to_xyz"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.5 MiB

@ -1,25 +0,0 @@
[
{
"name": "cmu",
"port": 10000,
"replenishment-period-us": 0,
"max-budget-us": 0,
"routes": [
{
"route": "/fib",
"path": "fibonacci.wasm.so",
"admissions-percentile": 50,
"expected-execution-us": 6000,
"relative-deadline-us": 20000,
"http-resp-content-type": "text/plain"
},
{
"route": "/depth_to_xyz",
"path": "depth_to_xyz.wasm.so",
"expected-execution-us": 5000,
"relative-deadline-us": 360000,
"http-resp-content-type": "img/png"
}
]
}
]

@ -1,20 +0,0 @@
import time
import requests
url = 'http://arena0.andrew.cmu.edu:10000/depth_to_xyz'
payload = open('0_depth.png', 'rb')
img = None
response = requests.post(url, data=payload)
img = response.content
time.sleep(1)
print("single request works!")
for i in range(100):
payload = open('0_depth.png', 'rb')
response = requests.post(url, data=payload)
img = response.content
time.sleep(1)
print(f"multi request #{i} works!")

@ -1,31 +0,0 @@
# import numpy as np
import requests
import threading
import time
from flask import Flask, Response
url = 'http://arena0.andrew.cmu.edu:10000/depth_to_xyz'
# app = Flask(__name__)
img = None
def get_img():
global img
while True:
print("start")
try:
payload = open('0_depth.png', 'rb')
response = requests.post(url, data=payload)
img = response.content
print("got img")
time.sleep(0.01)
except:
print("failed")
time.sleep(5)
thread = threading.Thread(target=get_img)
thread.daemon = True
thread.start()
thread.join()

@ -1,3 +0,0 @@
SLEDGE_SCHEDULER=MTDBF
SLEDGE_DISABLE_PREEMPTION=false
SLEDGE_SANDBOX_PERF_LOG=perf.log

@ -1,48 +0,0 @@
RUNTIME_DIR=../../../runtime/
SLEDGE_BINARY_DIR=${RUNTIME_DIR}/bin
SLEDGE_TESTS_DIR=${RUNTIME_DIR}/tests
HOSTNAME=localhost
DURATION_SEC=5
all: run
clean:
make -C ${RUNTIME_DIR} clean
make -C ${SLEDGE_TESTS_DIR} clean
rm -f ${SLEDGE_BINARY_DIR}/fibonacci.wasm.so
${SLEDGE_BINARY_DIR}/sledgert:
make -C ${RUNTIME_DIR} runtime
.PHONY: sledgert
sledgert: ${SLEDGE_BINARY_DIR}/sledgert
${SLEDGE_BINARY_DIR}/fibonacci.wasm.so:
make -C ../../../applications fibonacci.install
.PHONY: fibonacci
fibonacci: ${SLEDGE_BINARY_DIR}/fibonacci.wasm.so
run: sledgert fibonacci
LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} ${SLEDGE_BINARY_DIR}/sledgert spec.json
debug: sledgert fibonacci
SLEDGE_DISABLE_PREEMPTION=true SLEDGE_NWORKERS=1 \
LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} gdb ${SLEDGE_BINARY_DIR}/sledgert \
--eval-command="handle SIGUSR1 noprint nostop" \
--eval-command="handle SIGPIPE noprint nostop" \
--eval-command="set pagination off" \
--eval-command="set print pretty" \
--eval-command="run spec.json"
client-fib30-once:
echo 30 | http :10031/fib
client-fib40-once:
http :10030/fib?40
client-fib30-multi:
hey -z ${DURATION_SEC}s -cpus 2 -c 100 -t 0 -o csv -m POST -d "30\n" "http://${HOSTNAME}:10030/fib"
client-fib40-multi:
hey -z ${DURATION_SEC}s -cpus 2 -c 100 -t 0 -o csv -m POST -d "40\n" "http://${HOSTNAME}:10030/fib"

@ -1,15 +0,0 @@
# Unimodal Distribution
This experiment drives a unimodal distribution of two workloads from the same tenant, where one workload has some guarantees and the other does not.
First, the non-guaranteed workload is launched to acquire all the CPU. After a few second (e.g. 3s) the guaranteed workload begins. Depending on the amount of the reservation, the guaranteed workload should typically have a better success rate than the other one.
## Independent Variable
The Scheduling Policy: MT-EDF versus just EDF
## Dependent Variables
Replenishment Period and Max Budget of Tenants
Latency of high priority workload

@ -1,34 +0,0 @@
[
{
"name": "GWU",
"port": 20030,
"replenishment-period-us": 16000,
"max-budget-us": 144000,
"routes": [
{
"route": "/fib",
"path": "fibonacci.wasm.so",
"admissions-percentile": 70,
"expected-execution-us": 4000,
"relative-deadline-us": 16000,
"http-resp-content-type": "text/plain"
}
]
},
{
"name": "NNN",
"port": 10030,
"replenishment-period-us": 0,
"max-budget-us": 0,
"routes": [
{
"route": "/fib",
"path": "fibonacci.wasm.so",
"admissions-percentile": 70,
"expected-execution-us": 4000,
"relative-deadline-us": 16000,
"http-resp-content-type": "text/plain"
}
]
}
]

@ -0,0 +1,65 @@
SLEDGE_BINARY_DIR=../../runtime/bin
HOSTNAME=10.10.1.1
PORT1=10050
PORT2=20050
HEY_OPTS=-disable-compression -disable-keepalive -disable-redirects
default: run
clean:
rm -rf res/*
run:
SLEDGE_SIGALRM_HANDLER=TRIAGED SLEDGE_SCHEDULER=MTDBF SLEDGE_SPINLOOP_PAUSE_ENABLED=true SLEDGE_HTTP_SESSION_PERF_LOG=http_perf.log SLEDGE_SANDBOX_PERF_LOG=perf.log LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} ${SLEDGE_BINARY_DIR}/sledgert spec.json
debug:
SLEDGE_SCHEDULER=MTDBF SLEDGE_SPINLOOP_PAUSE_ENABLED=false SLEDGE_NWORKERS=18 LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} gdb ${SLEDGE_BINARY_DIR}/sledgert \
--eval-command="handle SIGUSR1 noprint nostop" \
--eval-command="handle SIGPIPE noprint nostop" \
--eval-command="set pagination off" \
--eval-command="set print pretty" \
--eval-command="run spec.json"
valgrind:
SLEDGE_DISABLE_PREEMPTION=true SLEDGE_NWORKERS=1 LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} valgrind --leak-check=full --max-stackframe=11150456 --run-libc-freeres=no --run-cxx-freeres=no ${SLEDGE_BINARY_DIR}/sledgert spec.json
client-localhost-xyz-once:
cat ./0_depth.png | http "localhost:${PORT1}/depth_to_xyz" > ./out.png
client-localhost-fib-once:
http localhost:${PORT2}/fib?30
client-xyz-once:
cat ./0_depth.png | http "${HOSTNAME}:${PORT1}/depth_to_xyz" > ./out.png
client-xyz-hey:
hey ${HEY_OPTS} -n 90 -c 90 -t 0 -m POST -D "./0_depth.png" "http://${HOSTNAME}:${PORT1}/depth_to_xyz"
#90=18*5, 4500=1s*72/14400 *90%
client-xyz-loadtest:
loadtest -n 90 -c 90 -T "image/png" -m POST -b "./0_depth.png" "http://${HOSTNAME}:${PORT1}/depth_to_xyz"
client-xyz-wrk:
wrk -s post_binary.lua -t 1 -c 1 -d 1s -R 1 "http://${HOSTNAME}:${PORT1}/depth_to_xyz" -- "0_depth.png"
client-fib-once:
# echo 30 | http ${HOSTNAME}:${PORT2}/fib
http ${HOSTNAME}:${PORT2}/fib?30
client-fib-curl:
curl -i "http://${HOSTNAME}:${PORT2}/fib?30"
#72=18*4, 4500=1s*72/14400 *90%
client-fib-loadtest:
loadtest -t 10 -c 72 --rps 4500 -P 30 "http://${HOSTNAME}:${PORT2}/fib"
client-fib-hey:
hey ${HEY_OPTS} -z 10s -c 72 -t 0 -o csv -m POST -d "30\n" "http://${HOSTNAME}:${PORT2}/fib"
client-fib-wrk:
wrk -t 1 -c 1 -d 5s -R 1 "http://${HOSTNAME}:${PORT2}/fib?30"
client-admin:
echo 5 | http ${HOSTNAME}:55555/admin

@ -0,0 +1,78 @@
reset
set term jpeg size 1000,500
set output "latency.jpg"
#set xlabel "Reservation Utilization %"
#set ylabel "Latency (us)"
set key left top
set xrange [-5:]
set yrange [0:]
set style histogram columnstacked
set key horizontal
set macros
# Placement of the a,b,c,d labels in the graphs
POS = "at graph 0.05,1.03 font ',10'"
# x- and ytics for each row resp. column
NOXTICS = "unset xlabel"
XTICS = "set xlabel 'Reservation Utilization %'"
NOYTICS = "unset ylabel"
YTICS = "set ylabel 'Latency (us)'"
# Margins for each row resp. column
TMARGIN = "set tmargin at screen 0.90; set bmargin at screen 0.55"
BMARGIN = "set tmargin at screen 0.55; set bmargin at screen 0.20"
LMARGIN = "set lmargin at screen 0.15; set rmargin at screen 0.55"
RMARGIN = "set lmargin at screen 0.55; set rmargin at screen 0.95"
# plot \
# for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:7 title 'Tenant '.t_id.' p99' w lp, \
# for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:6 title 'Tenant '.t_id.' p90' w lp, \
# for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:5 title 'Tenant '.t_id.' p50' w lp, \
# for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:4 title 'Tenant '.t_id.' mean' w lp, \
# for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:3 title 'Tenant '.t_id.' min' w lp
### Start multiplot (2x2 layout)
set multiplot layout 2,2 rowsfirst
# --- GRAPH a
set label 1 'p99' @POS
@NOXTICS; @YTICS
#@TMARGIN; @LMARGIN
plot for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:7 title 'Tenant '.t_id w lp
# --- GRAPH b
set label 1 'p90' @POS
@NOXTICS; @NOYTICS
#@TMARGIN; @RMARGIN
plot for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:6 notitle w lp
# --- GRAPH c
set label 1 'p50' @POS
@XTICS; @YTICS
#@BMARGIN; @LMARGIN
plot for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:5 notitle w lp
# --- GRAPH d
set label 1 'mean' @POS
@XTICS; @NOYTICS
#@BMARGIN; @RMARGIN
plot for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:4 notitle w lp
unset multiplot
### End multiplot
# plot \
# 'latency_A.dat' using 1:7 title 'A p99' lt 1 lc 1 w lp, \
# 'latency_A.dat' using 1:6 title 'A p90' lt 2 lc 1 w lp, \
# 'latency_A.dat' using 1:5 title 'A p50' lt 3 lc 1 w lp, \
# 'latency_A.dat' using 1:4 title 'A mean' lt 4 lc 1 w lp, \
# 'latency_A.dat' using 1:3 title 'A min' lt 5 lc 1 w lp,\
# 'latency_B.dat' using 1:7 title 'B p99' lt 1 lc 2 w lp, \
# 'latency_B.dat' using 1:6 title 'B p90' lt 2 lc 2 w lp, \
# 'latency_B.dat' using 1:5 title 'B p50' lt 3 lc 2 w lp, \
# 'latency_B.dat' using 1:4 title 'B mean' lt 4 lc 2 w lp, \
# 'latency_B.dat' using 1:3 title 'B min' lt 5 lc 2 w lp
# 'latency_A.dat' using 1:8 title 'A p100' linetype 0 linecolor 1 with linespoints, \
# 'latency_B.dat' using 1:8 title 'B p100' linetype 0 linecolor 2 with linespoints, \

@ -0,0 +1,25 @@
reset
set term jpeg
set output "latency.jpg"
set xlabel "Rerervation Utilization %"
set ylabel "Latency (us)"
set key left top
set xrange [-5:]
set yrange [0:]
set style histogram columnstacked
set key horizontal
plot \
for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:7 title 'latency_'.t_id.'.dat' lt 2 lc 2 w lp, \
for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:6 title 'latency_'.t_id.'.dat' lt 2 lc 2 w lp, \
for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:5 title 'latency_'.t_id.'.dat' lt 2 lc 2 w lp, \
for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:4 title 'latency_'.t_id.'.dat' lt 2 lc 2 w lp, \
for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:3 title 'latency_'.t_id.'.dat' lt 2 lc 2 w lp
# 'latency_A.dat' using 1:8 title 'A p100' linetype 0 linecolor 1 with linespoints, \
# 'latency_B.dat' using 1:8 title 'B p100' linetype 0 linecolor 2 with linespoints, \

@ -0,0 +1,53 @@
#!/bin/bash
# shellcheck disable=SC1091,SC2034,SC2155
source ../bash_libraries/multi_tenancy_base.sh || exit 1
# To reduce post processing time, provide local-only meaningful metrics:
# Comment the following line in order to use ALL the metrics!
declare -a SANDBOX_METRICS=(total running_sys running_user)
# declare -r APP_WASM="sample_app.wasm.so"
declare -r FIBONACCI_WASM="fibonacci.wasm.so"
# The global configs for the scripts
declare -r CLIENT_TERMINATE_SERVER=true
declare -r DURATION_sec=30
declare -r ESTIMATIONS_PERCENTILE=60
declare -r NWORKERS=$(($(nproc)-2)) # all cores - 2
# Tenant configs:
declare -ar TENANT_IDS=("long" "short")
declare -ar INIT_PORTS=(10000 20000)
# declare -ar ROUTES=("sample_app_route1 sample_app_route2" "fib")
declare -ar ROUTES=("fib1 fib2" "fib")
declare -ar MTDS_REPL_PERIODS_us=(0 0)
declare -ar MTDS_MAX_BUDGETS_us=(0 0)
# Per route configs:
declare -ar WASM_PATHS=("$FIBONACCI_WASM $FIBONACCI_WASM" "$FIBONACCI_WASM")
declare -ar RESP_CONTENT_TYPES=("text/plain text/plain" "text/plain") # image data: "image/png"
declare -ar EXPECTED_EXEC_TIMES_us=("64500 3600" "3600")
declare -ar DEADLINES_us=("322500 18000" "18000")
# For image data:
# declare -ar ARG_OPTS_HEY=("-D" "-d")
# declare -ar ARG_OPTS_LT=("-b" "-P")
# declare -ar ARGS=("./0_depth.png" "30")
declare -ar ARG_OPTS_HEY=("-d -d" "-d")
declare -ar ARG_OPTS_LT=("-P -P" "-P")
declare -ar ARGS=("36 30" "30")
# 100=FULL, 50=HALF etc.
declare -ar LOADS=("50 70" "100")
# When trying varying values, you must pick ONE value from the above params to ? (question mark)
# For example, for varying the reservations, try: declare -ar LOADS=("50 ?" "100")
declare -ar VARYING=(0) # no variation, single experiment
# declare -ar VARYING=(5 50 100)
run_init
generate_spec_json
# framework_init "$@"

@ -0,0 +1,72 @@
[
{
"name": "Admin",
"port": 55555,
"replenishment-period-us": 0,
"max-budget-us": 0,
"reservation-percentile": 0,
"routes": [
{
"route": "/admin",
"path": "fibonacci.wasm.so",
"admissions-percentile": 50,
"expected-execution-us": 1000,
"relative-deadline-us": 10000,
"http-resp-content-type": "text/plain"
},
{
"route": "/terminator",
"path": "fibonacci.wasm.so",
"admissions-percentile": 50,
"expected-execution-us": 1000,
"relative-deadline-us": 10000,
"http-resp-content-type": "text/plain"
}
],
"extra-exec-percentile": 0
},
{
"name": "long-000",
"port": 10000,
"replenishment-period-us": 0,
"max-budget-us": 0,
"reservation-percentile": 0,
"routes": [
{
"route": "/fib1",
"path": "fibonacci.wasm.so",
"admissions-percentile": 60,
"expected-execution-us": 64500,
"relative-deadline-us": 322500,
"http-resp-content-type": "text/plain"
},
{
"route": "/fib2",
"path": "fibonacci.wasm.so",
"admissions-percentile": 60,
"expected-execution-us": 3600,
"relative-deadline-us": 18000,
"http-resp-content-type": "text/plain"
}
],
"extra-exec-percentile": 0
},
{
"name": "short-000",
"port": 20000,
"replenishment-period-us": 0,
"max-budget-us": 0,
"reservation-percentile": 0,
"routes": [
{
"route": "/fib",
"path": "fibonacci.wasm.so",
"admissions-percentile": 60,
"expected-execution-us": 3600,
"relative-deadline-us": 18000,
"http-resp-content-type": "text/plain"
}
],
"extra-exec-percentile": 0
}
]

@ -0,0 +1,15 @@
reset
set term jpeg
set output "success.jpg"
set xlabel "Reservation Utilization %"
set ylabel "Deadline success rate %"
set xrange [-5:]
set yrange [0:110]
plot for [t_id in tenant_ids] 'success_'.t_id.'.dat' using 1:2 title t_id w lp
#plot 'success_A.dat' using 1:2 title 'Tenant A success rate' linetype 1 linecolor 1 with linespoints,\
# 'success_B.dat' using 1:2 title 'Tenant B success rate' lt 2 lc 2 w lp

@ -3,6 +3,7 @@
"port": 0, "port": 0,
"replenishment-period-us": 0, "replenishment-period-us": 0,
"max-budget-us": 0, "max-budget-us": 0,
"reservation-percentile": 0,
"routes": [ "routes": [
{ {
"route": "/route", "route": "/route",

@ -0,0 +1,15 @@
reset
set term jpeg
set output "throughput.jpg"
set xlabel "Reservation Utilization %"
set ylabel "Requests/sec"
set xrange [-5:]
set yrange [0:]
plot for [t_id in tenant_ids] 'throughput_'.t_id.'.dat' using 1:2 title 'Tenant '.t_id w lp
#plot 'throughput_A.dat' using 1:2 title 'Tenant A Throughput' linetype 1 linecolor 1 with linespoints,\
# 'throughput_B.dat' using 1:2 title 'Tenant B Throughput' linetype 2 linecolor 2 with linespoints
Loading…
Cancel
Save