diff --git a/runtime/src/libc/wasi_impl_serverless.c b/runtime/src/libc/wasi_impl_serverless.c index d53af20..c7c6234 100644 --- a/runtime/src/libc/wasi_impl_serverless.c +++ b/runtime/src/libc/wasi_impl_serverless.c @@ -112,8 +112,10 @@ wasi_context_init(wasi_options_t *options) } } - /* Seed Random */ - srandom(time(NULL)); + /* Seed Random + * Commented out as a temporary fix for the mutex blocking delay srandom causes in libc. + */ + // srandom(time(NULL)); /* TODO: Preopens */ diff --git a/tests/bash_libraries/experiment_globals.sh b/tests/bash_libraries/experiment_globals.sh index 6beb561..7296a77 100644 --- a/tests/bash_libraries/experiment_globals.sh +++ b/tests/bash_libraries/experiment_globals.sh @@ -1,12 +1,30 @@ # shellcheck shell=bash -# shellcheck disable=SC2034 +# shellcheck disable=SC2034,SC2153,SC2154,SC2155 if [ -n "$__experiment_server_globals_sh__" ]; then return; fi __experiment_server_globals_sh__=$(date) # The global configs for the scripts declare -gr SERVER_LOG_FILE="perf.log" declare -gr SERVER_HTTP_LOG_FILE="http_perf.log" -declare -gr NWORKERS=$(($(nproc)-2)) +declare -gr HEY_OPTS="-disable-compression -disable-keepalive -disable-redirects" + +# Globals to fill during run_init in run.sh, to use in base and generate_spec +declare -A ports=() +declare -A repl_periods=() +declare -A max_budgets=() +declare -A wasm_paths=() +declare -A expected_execs=() +declare -A deadlines=() +declare -A resp_content_types=() +declare -A arg_opts_hey=() +declare -A arg_opts_lt=() +declare -A args=() +declare -A concurrencies=() +declare -A rpss=() +declare -a workloads=() +declare -A workload_tids=() +declare -A workload_deadlines=() +declare -A workload_vars=() # Sandbox Perf Log Globals: declare -ga SANDBOX_METRICS=(total queued uninitialized allocated initialized runnable interrupted preempted running_sys running_user asleep returned complete error) @@ -77,3 +95,74 @@ assert_process_server_results_args() { return 1 fi } + +load_value() { + local result=$1 + if [ "$result" = "?" ]; then + result=$2 + fi + echo "$result" +} + +run_init() { + for var in "${VARYING[@]}"; do + for t_idx in "${!TENANT_IDS[@]}"; do + local tenant_id=${TENANT_IDS[$t_idx]} + local tenant=$(printf "%s-%03d" "$tenant_id" "$var") + local port=$((INIT_PORTS[t_idx]+var)) + local repl_period=$(load_value ${MTDS_REPL_PERIODS_us[$t_idx]} $var) + local budget=$(load_value ${MTDS_MAX_BUDGETS_us[$t_idx]} $var) + + # TENANTS+=("$tenant") + ports+=([$tenant]=$port) + repl_periods+=([$tenant]=$repl_period) + max_budgets+=([$tenant]=$budget) + + local t_routes r_expected_execs r_deadlines r_arg_opts_hey r_arg_opts_lt r_args r_loads + + IFS=' ' read -r -a t_routes <<< "${ROUTES[$t_idx]}" + IFS=' ' read -r -a r_wasm_paths <<< "${WASM_PATHS[$t_idx]}" + IFS=' ' read -r -a r_expected_execs <<< "${EXPECTED_EXEC_TIMES_us[$t_idx]}" + IFS=' ' read -r -a r_deadlines <<< "${DEADLINES_us[$t_idx]}" + IFS=' ' read -r -a r_resp_content_types <<< "${RESP_CONTENT_TYPES[$t_idx]}" + + IFS=' ' read -r -a r_arg_opts_hey <<< "${ARG_OPTS_HEY[$t_idx]}" + IFS=' ' read -r -a r_arg_opts_lt <<< "${ARG_OPTS_LT[$t_idx]}" + IFS=' ' read -r -a r_args <<< "${ARGS[$t_idx]}" + IFS=' ' read -r -a r_loads <<< "${LOADS[$t_idx]}" + + for r_idx in "${!t_routes[@]}"; do + local route=${t_routes[$r_idx]} + local wasm_path=${r_wasm_paths[$r_idx]} + local expected=${r_expected_execs[$r_idx]} + local deadline=${r_deadlines[$r_idx]} + local resp_content_type=${r_resp_content_types[$r_idx]} + local arg_opt_hey=${r_arg_opts_hey[$r_idx]} + local arg_opt_lt=${r_arg_opts_lt[$r_idx]} + local arg=${r_args[$r_idx]} + local load=$(load_value ${r_loads[$r_idx]} $var) + + local workload="$tenant-$route" + + # Divide as float, cast the result to int (Loadtest is okay floats, HEY is not) + local con=$(echo "x = $NWORKERS * $deadline / $expected * $load / 100; x/1" | bc) + local rps=$((1000000 * con / deadline)) + # local rps=$(echo "x = 1000000 * $con / $deadline; x/1" | bc) + + wasm_paths+=([$workload]=$wasm_path) + expected_execs+=([$workload]=$expected) + deadlines+=([$workload]=$deadline) + resp_content_types+=([$workload]=$resp_content_type) + arg_opts_hey+=([$workload]=$arg_opt_hey) + arg_opts_lt+=([$workload]=$arg_opt_lt) + args+=([$workload]=$arg) + concurrencies+=([$workload]=$con) + rpss+=([$workload]=$rps) + workloads+=("$workload") + workload_tids+=([$workload]=$tenant_id) + workload_deadlines+=([$workload]=$deadline) + workload_vars+=([$workload]=$var) + done + done + done +} diff --git a/tests/bash_libraries/generate_gnuplots.sh b/tests/bash_libraries/generate_gnuplots.sh index 45826ce..b74e69d 100644 --- a/tests/bash_libraries/generate_gnuplots.sh +++ b/tests/bash_libraries/generate_gnuplots.sh @@ -30,7 +30,11 @@ generate_gnuplots() { shopt -s nullglob for gnuplot_file in "$experiment_directory"/*.gnuplot; do - gnuplot "$gnuplot_file" + if [ -z "$TENANT_IDS" ]; then + gnuplot "$gnuplot_file" + else + gnuplot -e "tenant_ids='${TENANT_IDS[*]}'" "$gnuplot_file" + fi done cd "$experiment_directory" || exit } diff --git a/tests/bash_libraries/generate_spec_json.sh b/tests/bash_libraries/generate_spec_json.sh index 490eacc..9ce25c1 100644 --- a/tests/bash_libraries/generate_spec_json.sh +++ b/tests/bash_libraries/generate_spec_json.sh @@ -3,61 +3,54 @@ if [ -n "$__generate_spec_json_sh__" ]; then return; fi __generate_spec_json_sh__=$(date) - generate_spec_json() { printf "Generating 'spec.json'\n" - for tenant in "${TENANTS[@]}"; do - port=${PORTS[$tenant]} - repl_period=${MTDS_REPLENISH_PERIODS_us[$tenant]} - budget=${MTDS_MAX_BUDGETS_us[$tenant]} - # reservation=${MTDBF_RESERVATIONS_percen[${tenant}]} - route=${ROUTES[$tenant]} - workload="$tenant-$route" - deadline=${DEADLINES_us[$workload]} - expected=${EXPECTED_EXECUTIONS_us[$workload]} + for var in "${VARYING[@]}"; do + for t_idx in "${!TENANT_IDS[@]}"; do + local jq_str + local tenant=$(printf "%s-%03d" "${TENANT_IDS[$t_idx]}" "$var") + local port=${ports[$tenant]} + local repl_period=${repl_periods[$tenant]} + local budget=${max_budgets[$tenant]} + + jq_str=". + { + \"name\": \"$tenant\",\ + \"port\": $port,\ + \"replenishment-period-us\": $repl_period,\ + \"max-budget-us\": $budget,\ + \"routes\": [" + + local t_routes + IFS=' ' read -r -a t_routes <<< ${ROUTES[$t_idx]} + + for index in "${!t_routes[@]}"; do + local route=${t_routes[$index]} + local workload="$tenant-$route" + local wasm_path=${wasm_paths[$workload]} + local resp_content_type=${resp_content_types[$workload]} + local expected=${expected_execs[$workload]} + local deadline=${deadlines[$workload]} + + jq_str+=".routes[] + {\ + \"route\": \"/$route\",\ + \"path\": \"$wasm_path\",\ + \"admissions-percentile\": $ESTIMATIONS_PERCENTILE,\ + \"expected-execution-us\": $expected,\ + \"relative-deadline-us\": $deadline,\ + \"http-resp-content-type\": \"$resp_content_type\"}" + + if [ "$index" != $((${#t_routes[@]}-1)) ]; then + jq_str+="," + fi + done + jq_str+="]}" - # Generates unique module specs on different ports using the given 'ru's - jq ". + { \ - \"name\": \"$tenant\",\ - \"port\": $port,\ - \"replenishment-period-us\": $repl_period, \ - \"max-budget-us\": $budget} | \ - (.routes[] = \ - .routes[] + { \ - \"route\": \"/$route\",\ - \"admissions-percentile\": $ESTIMATIONS_PERCENTILE,\ - \"expected-execution-us\": $expected,\ - \"relative-deadline-us\": $deadline - }) \ - " \ - < "./template.json" \ - > "./result_${tenant}.json" - # \"reservation-percentile\": $reservation, \ + jq "$jq_str" < "./template.json" > "./result_${tenant}.json" + done done - if [ "$CLIENT_TERMINATE_SERVER" == true ]; then - jq ". + { \ - \"name\": \"Admin\",\ - \"port\": 55555,\ - \"replenishment-period-us\": 0, \ - \"max-budget-us\": 0} | \ - (.routes = [\ - .routes[] + { \ - \"route\": \"/main\",\ - \"admissions-percentile\": 70,\ - \"expected-execution-us\": 1000,\ - \"relative-deadline-us\": 10000}, \ - .routes[] + { \ - \"route\": \"/terminator\",\ - \"admissions-percentile\": 70,\ - \"expected-execution-us\": 1000,\ - \"relative-deadline-us\": 10000 }\ - ]) \ - " \ - < "./template.json" \ - > "./result_admin.json" - fi + jq_admin_spec # Merges all of the multiple specs for a single module jq -s '. | sort_by(.name)' ./result_*.json > "./spec.json" diff --git a/tests/bash_libraries/install_tools_client.sh b/tests/bash_libraries/install_tools_client.sh new file mode 100755 index 0000000..8a02960 --- /dev/null +++ b/tests/bash_libraries/install_tools_client.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +if ! command -v http > /dev/null; then + if [[ $(whoami) == "root" ]]; then + apt update + apt install -y httpie + else + sudo apt update + sudo apt install -y httpie + fi +fi + +if ! command -v hey > /dev/null; then + HEY_URL=https://hey-release.s3.us-east-2.amazonaws.com/hey_linux_amd64 + wget $HEY_URL -O hey + chmod +x hey + + if [[ $(whoami) == "root" ]]; then + mv hey /usr/bin/hey + else + sudo mv hey /usr/bin/hey + fi +fi + +if ! command -v loadtest > /dev/null; then + if ! command -v npm > /dev/null; then + if [[ $(whoami) == "root" ]]; then + apt update + apt install -y npm + else + sudo apt update + sudo apt install -y npm + fi + fi + + # Try pulling Emil's version of loadtest to support post binary files + # if [[ $(whoami) == "root" ]]; then + # npm install -y -g loadtest + # else + # sudo npm install -y -g loadtest + # fi + + pushd ~ + git clone https://github.com/emil916/loadtest.git + pushd loadtest + if [[ $(whoami) == "root" ]]; then + npm install -g + else + sudo npm install -g + fi + popd + popd +fi + +if ! command -v gnuplot > /dev/null; then + if [[ $(whoami) == "root" ]]; then + apt-get update + apt-get install -y gnuplot + else + sudo apt-get update + sudo apt-get install -y gnuplot + fi +fi + + +if ! command -v jq > /dev/null; then + if [[ $(whoami) == "root" ]]; then + apt update + apt install -y jq + else + sudo apt update + sudo apt install -y jq + fi +fi diff --git a/tests/bash_libraries/install_tools_server.sh b/tests/bash_libraries/install_tools_server.sh new file mode 100755 index 0000000..98eb5e9 --- /dev/null +++ b/tests/bash_libraries/install_tools_server.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +if ! command -v gnuplot > /dev/null; then + if [[ $(whoami) == "root" ]]; then + apt-get update + apt-get install -y gnuplot + else + sudo apt-get update + sudo apt-get install -y gnuplot + fi +fi + + +if ! command -v jq > /dev/null; then + if [[ $(whoami) == "root" ]]; then + apt update + apt install -y jq + else + sudo apt update + sudo apt install -y jq + fi +fi + +if ! command -v htop > /dev/null; then + if [[ $(whoami) == "root" ]]; then + apt update + apt install -y htop + else + sudo apt update + sudo apt install -y htop + fi +fi + +# For SOD: +# if ! command -v imagemagick > /dev/null; then +# if [ "$(whoami)" == "root" ]; then +# apt-get install -y imagemagick +# else +# sudo apt-get install -y imagemagick +# fi +# fi + +# For GOCR, too many to check one-by-one, so uncomment below to install: +# if [[ "$(whoami)" == "root" ]]; then +# apt-get install -y netpbm pango1.0-tools wamerican fonts-roboto fonts-cascadia-code fonts-dejavu +# else +# sudo apt-get install -y netpbm pango1.0-tools wamerican fonts-roboto fonts-cascadia-code fonts-dejavu +# fi diff --git a/tests/bash_libraries/install_tools.sh b/tests/bash_libraries/install_tools_single.sh similarity index 65% rename from tests/bash_libraries/install_tools.sh rename to tests/bash_libraries/install_tools_single.sh index 4d9df7a..366d979 100755 --- a/tests/bash_libraries/install_tools.sh +++ b/tests/bash_libraries/install_tools_single.sh @@ -33,11 +33,23 @@ if ! command -v loadtest > /dev/null; then fi fi + # Try pulling Emil's version of loadtest to support post binary files + # if [[ $(whoami) == "root" ]]; then + # npm install -y -g loadtest + # else + # sudo npm install -y -g loadtest + # fi + + pushd ~ + git clone https://github.com/emil916/loadtest.git + pushd loadtest if [[ $(whoami) == "root" ]]; then - npm install -y -g loadtest + npm install -g else - sudo npm install -y -g loadtest + sudo npm install -g fi + popd + popd fi if ! command -v gnuplot > /dev/null; then @@ -61,18 +73,12 @@ if ! command -v jq > /dev/null; then fi fi -# For SOD: -if ! command -v imagemagick > /dev/null; then - if [ "$(whoami)" == "root" ]; then - apt-get install -y imagemagick +if ! command -v htop > /dev/null; then + if [[ $(whoami) == "root" ]]; then + apt update + apt install -y htop else - sudo apt-get install -y imagemagick + sudo apt update + sudo apt install -y htop fi -fi - -# For GOCR, too many to check one-by-one, so uncomment below to install: -# if [[ "$(whoami)" == "root" ]]; then -# apt-get install -y netpbm pango1.0-tools wamerican fonts-roboto fonts-cascadia-code fonts-dejavu -# else -# sudo apt-get install -y netpbm pango1.0-tools wamerican fonts-roboto fonts-cascadia-code fonts-dejavu -# fi +fi \ No newline at end of file diff --git a/tests/mt_unimodal/run.sh b/tests/bash_libraries/multi_tenancy_base.sh similarity index 52% rename from tests/mt_unimodal/run.sh rename to tests/bash_libraries/multi_tenancy_base.sh index 9bd8877..6d082f2 100755 --- a/tests/mt_unimodal/run.sh +++ b/tests/bash_libraries/multi_tenancy_base.sh @@ -1,6 +1,6 @@ #!/bin/bash -# shellcheck disable=SC1091,SC2034 +# shellcheck disable=SC1091,SC2034,SC2153,SC2154 # This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success rate # Success - The percentage of requests that complete by their deadlines @@ -8,9 +8,10 @@ # Latency - the rount-trip resonse time (us) of successful requests at the p50, p90, p99, and p100 percentiles # Add bash_libraries directory to path -__run_sh__base_path="$(dirname "$(realpath --logical "${BASH_SOURCE[0]}")")" -__run_sh__bash_libraries_relative_path="../bash_libraries" -__run_sh__bash_libraries_absolute_path=$(cd "$__run_sh__base_path" && cd "$__run_sh__bash_libraries_relative_path" && pwd) +__run_sh__base_path="$(dirname "$(realpath --logical "$0")")" +# __run_sh__base_path="$(dirname "$(realpath --logical "${BASH_SOURCE[0]}")")" +# __run_sh__bash_libraries_relative_path="../bash_libraries" +__run_sh__bash_libraries_absolute_path="$(dirname "$(realpath --logical "${BASH_SOURCE[0]}")")" export PATH="$__run_sh__bash_libraries_absolute_path:$PATH" source csv_to_dat.sh || exit 1 @@ -21,66 +22,9 @@ source panic.sh || exit 1 source path_join.sh || exit 1 source percentiles_table.sh || exit 1 source experiment_globals.sh || exit 1 +source generate_spec_json.sh || exit 1 -validate_dependencies hey loadtest jq - -# The global configs for the scripts -declare -r CLIENT_TERMINATE_SERVER=false -declare -r ITERATIONS=10000 -declare -r DURATION_sec=1 -declare -r ESTIMATIONS_PERCENTILE=70 - -declare -r TENANT_NG="NNN" -declare -r TENANT_GR="GWU" -declare -ar TENANTS=("$TENANT_NG" "$TENANT_GR") - -declare -Ar ARGS=( - [$TENANT_NG]="30" - [$TENANT_GR]="30" -) - -# Make sure not to use 55555 (Reserved for Admin) -declare -Ar PORTS=( - [$TENANT_NG]=10030 - [$TENANT_GR]=20030 -) - -# No need for slashes -declare -Ar ROUTES=( - [$TENANT_NG]="fib" - [$TENANT_GR]="fib" -) - -declare -Ar MTDS_REPLENISH_PERIODS_us=( - [$TENANT_NG]=0 - [$TENANT_GR]=16000 -) - -declare -Ar MTDS_MAX_BUDGETS_us=( - [$TENANT_NG]=0 - [$TENANT_GR]=144000 -) - -declare -Ar MTDBF_RESERVATIONS_percen=( - [$TENANT_NG]=0 - [$TENANT_GR]=0 -) - -declare -ar WORKLOADS=("${TENANT_NG}-${ROUTES[$TENANT_NG]}" "${TENANT_GR}-${ROUTES[$TENANT_GR]}") - -declare -Ar EXPECTED_EXECUTIONS_us=( - [${WORKLOADS[0]}]=4000 - [${WORKLOADS[1]}]=4000 -) - -declare -Ar DEADLINES_us=( - [${WORKLOADS[0]}]=16000 - [${WORKLOADS[1]}]=16000 -) - - -# Generate the spec.json file from the given arguments above -. "$__run_sh__bash_libraries_absolute_path/generate_spec_json.sh" +validate_dependencies hey loadtest gnuplot jq # Execute the experiments concurrently run_experiments() { @@ -90,81 +34,68 @@ run_experiments() { local -r results_directory="$2" local -r loadgen="$3" - # The duration in seconds that the low priority task should run before the high priority task starts - local -ir OFFSET=1 - printf "Running Experiments with %s\n" "$loadgen" - # Run concurrently - local app_gr_PID - local app_ng_PID - - local -r port_ng=${PORTS[$TENANT_NG]} - local -r port_gr=${PORTS[$TENANT_GR]} - - local -r route_ng=${ROUTES[$TENANT_NG]} - local -r route_gr=${ROUTES[$TENANT_GR]} - - local -r workload_ng=${WORKLOADS[0]} - local -r workload_gr=${WORKLOADS[1]} - - local -r deadline_ng=${DEADLINES_us[$workload_ng]} - local -r deadline_gr=${DEADLINES_us[$workload_gr]} - - local -r con_ng=$((NWORKERS*10)) - local -r con_gr=$((NWORKERS*2)) - - local -r rps_ng=$((1000000*con_ng/deadline_ng)) - local -r rps_gr=$((1000000*con_gr/deadline_gr)) - - local -r arg_ng=${ARGS[$TENANT_NG]} - local -r arg_gr=${ARGS[$TENANT_GR]} - - if [ "$loadgen" = "hey" ]; then - hey -disable-compression -disable-keepalive -disable-redirects -z $((DURATION_sec+OFFSET))s -n "$ITERATIONS" -c $con_ng -t 0 -o csv -m POST -d "$arg_ng" "http://${hostname}:$port_ng/$route_ng" > "$results_directory/$workload_ng.csv" 2> "$results_directory/$workload_ng-err.dat" & - app_ng_PID="$!" - - sleep "$OFFSET"s - - hey -disable-compression -disable-keepalive -disable-redirects -z "$DURATION_sec"s -n "$ITERATIONS" -c $con_gr -t 0 -o csv -m POST -d "$arg_gr" "http://${hostname}:$port_gr/$route_gr" > "$results_directory/$workload_gr.csv" 2> "$results_directory/$workload_gr-err.dat" & - app_gr_PID="$!" - elif [ "$loadgen" = "loadtest" ]; then - loadtest -t $((DURATION_sec+OFFSET)) -c $con_ng --rps $rps_ng -P "$arg_ng" "http://${hostname}:$port_ng/$route_ng" > "$results_directory/$workload_ng.dat" 2> "$results_directory/$workload_ng-err.dat" & - app_ng_PID="$!" - - sleep "$OFFSET"s - - loadtest -t "$DURATION_sec" -c $con_gr --rps $rps_gr -P "$arg_gr" "http://${hostname}:$port_gr/$route_gr" > "$results_directory/$workload_gr.dat" 2> "$results_directory/$workload_gr-err.dat" & - app_gr_PID="$!" - fi + for var in "${VARYING[@]}"; do + for t_idx in "${!TENANT_IDS[@]}"; do + local tenant_id=${TENANT_IDS[$t_idx]} + local tenant=$(printf "%s-%03d" "$tenant_id" "$var") + local port=${ports[$tenant]} + + local t_routes + IFS=' ' read -r -a t_routes <<< "${ROUTES[$t_idx]}" + + for index in "${!t_routes[@]}"; do + local route=${t_routes[$index]} + local workload="$tenant-$route" + local expected=${expected_execs[$workload]} + local deadline=${deadlines[$workload]} + local arg=${args[$workload]} + local con=${concurrencies[$workload]} + local rps=${rpss[$workload]} + + echo "CON for $workload" : "$con" + echo "RPS for $workload" : "$rps" + + local pid + local -a pids # Run concurrently + local -A pid_workloads # Run concurrently + + if [ "$loadgen" = "hey" ]; then + local arg_opt_hey=${arg_opts_hey[$workload]} + hey $HEY_OPTS -z "$DURATION_sec"s -c "$con" -t 0 -o csv -m POST "$arg_opt_hey" "$arg" "http://${hostname}:$port/$route" > "$results_directory/$workload.csv" 2> "$results_directory/$workload-err.dat" & + elif [ "$loadgen" = "loadtest" ]; then + local arg_opt_lt=${arg_opts_lt[$workload]} + loadtest -t "$DURATION_sec" -c "$con" --rps "$rps" "$arg_opt_lt" "$arg" "http://${hostname}:${port}/$route" > "$results_directory/$workload.dat" 2> "$results_directory/$workload-err.dat" & + fi + pid="$!" + pids+=("$pid") + pid_workloads+=([$pid]=$workload) + done + done - wait -f "$app_gr_PID" || { - printf "\t%s: [ERR]\n" "$workload_gr" - panic "failed to wait -f ${app_gr_PID}" - return 1 - } - [ "$loadgen" = "hey" ] && (get_result_count "$results_directory/$workload_gr.csv" || { - printf "\t%s: [ERR]\n" "$workload_gr" - panic "$workload_gr has zero requests." - return 1 - }) - printf "\t%s: [OK]\n" "$workload_gr" + for ((i=${#pids[@]}-1; i>=0; i--)); do + local pid=${pids[$i]} + local pid_workload=${pid_workloads[$pid]} + wait -f "$pid" || { + printf "\t%s: [ERR]\n" "$pid_workload" + panic "failed to wait -f $pid" + return 1 + } + [ "$loadgen" = "hey" ] && (get_result_count "$results_directory/$pid_workload.csv" || { + printf "\t%s: [ERR]\n" "$pid_workload" + panic "$pid_workload has zero requests." + return 1 + }) + printf "\t%s: [OK]\n" "$pid_workload" + done - wait -f "$app_ng_PID" || { - printf "\t%s: [ERR]\n" "$workload_ng" - panic "failed to wait -f ${app_ng_PID}" - return 1 - } - [ "$loadgen" = "hey" ] && (get_result_count "$results_directory/$workload_ng.csv" || { - printf "\t%s: [ERR]\n" "$workload_ng" - panic "$workload_ng has zero requests." - return 1 - }) - printf "\t%s: [OK]\n" "$workload_ng" + unset pids pid_workloads + done if [ "$CLIENT_TERMINATE_SERVER" == true ]; then printf "Sent a Terminator to the server\n" - echo "55" | http "$hostname":55555/terminator &> /dev/null + echo "5" | http "$hostname":55555/terminator &> /dev/null fi return 0 @@ -179,18 +110,25 @@ process_client_results_hey() { printf "Processing HEY Results: " # Write headers to CSVs - printf "Workload,Scs%%,TOTAL,ClientScs,All200,AllFail,Deny,MisDL,Shed,MiscErr\n" >> "$results_directory/success.csv" - printf "Workload,Throughput\n" >> "$results_directory/throughput.csv" - percentiles_table_header "$results_directory/latency.csv" "Workload" - # percentiles_table_header "$results_directory/latency-200.csv" "Workload" - - for workload in "${WORKLOADS[@]}"; do - - local -i deadline=${DEADLINES_us[$workload]} + for t_id in "${TENANT_IDS[@]}"; do + printf "Workload,Scs%%,TOTAL,ClientScs,All200,AllFail,Deny,MisDL,Shed,MiscErr\n" >> "$results_directory/success_$t_id.csv" + printf "Workload,Throughput\n" >> "$results_directory/throughput_$t_id.csv" + percentiles_table_header "$results_directory/latency_$t_id.csv" "Workload" + done + for workload in "${workloads[@]}"; do + local t_id=${workload_tids[$workload]} + local deadline=${workload_deadlines[$workload]} + local var=${workload_vars[$workload]} + # Some requests come back with an "Unsolicited response ..." See issue #185 misc_err=$(wc -l < "$results_directory/$workload-err.dat") + if [ ! -s "$results_directory/$workload-err.dat" ]; then + # The error file is empty. So remove it. + rm "$results_directory/$workload-err.dat" + fi + # Calculate Success Rate for csv (percent of requests that return 200 within deadline) awk -v misc_err="$misc_err" -F, ' $7 == 200 && ($1 * 1000000) <= '"$deadline"' {ok++} @@ -199,8 +137,8 @@ process_client_results_hey() { $7 == 429 {denied++} $7 == 408 {missed_dl++} $7 == 409 {killed++} - END{printf "'"$workload"',%3.1f,%d,%d,%d,%d,%d,%d,%d,%d\n", (ok/(NR-1+misc_err)*100), (NR-1+misc_err), ok, all200, (total_failed-1+misc_err), denied, missed_dl, killed, misc_err} - ' < "$results_directory/$workload.csv" >> "$results_directory/success.csv" + END{printf "'"$var"',%3.1f,%d,%d,%d,%d,%d,%d,%d,%d\n", (all200*100/(NR-1+misc_err)), (NR-1+misc_err), ok, all200, (total_failed-1+misc_err), denied, missed_dl, killed, misc_err} + ' < "$results_directory/$workload.csv" >> "$results_directory/success_$t_id.csv" # Convert from s to us, and sort awk -F, 'NR > 1 {print ($1 * 1000000)}' < "$results_directory/$workload.csv" \ @@ -224,18 +162,25 @@ process_client_results_hey() { # Throughput is calculated as the mean number of successful requests per second throughput=$(echo "$oks/$duration" | bc) - printf "%s,%d\n" "$workload" "$throughput" >> "$results_directory/throughput.csv" + printf "%s,%d\n" "$var" "$throughput" >> "$results_directory/throughput_$t_id.csv" # Generate Latency Data for csv - percentiles_table_row "$results_directory/$workload-response.csv" "$results_directory/latency.csv" "$workload" - # percentiles_table_row "$results_directory/$workload-response-200.csv" "$results_directory/latency-200.csv" "$workload" + percentiles_table_row "$results_directory/$workload-response.csv" "$results_directory/latency_$t_id.csv" "$var" # Delete scratch file used for sorting/counting rm "$results_directory/$workload-response.csv" "$results_directory/$workload-response-200.csv" done - # Transform csvs to dat files for gnuplot - csv_to_dat "$results_directory/success.csv" "$results_directory/throughput.csv" "$results_directory/latency.csv" - rm "$results_directory/success.csv" "$results_directory/throughput.csv" "$results_directory/latency.csv" + for t_id in "${TENANT_IDS[@]}"; do + # Transform csvs to dat files for gnuplot + csv_to_dat "$results_directory/success_$t_id.csv" "$results_directory/throughput_$t_id.csv" "$results_directory/latency_$t_id.csv" + rm "$results_directory/success_$t_id.csv" "$results_directory/throughput_$t_id.csv" "$results_directory/latency_$t_id.csv" + done + + # Generate gnuplots + generate_gnuplots "$results_directory" "$__run_sh__base_path" || { + printf "[ERR]\n" + panic "failed to generate gnuplots" + } printf "[OK]\n" return 0 @@ -250,11 +195,20 @@ process_client_results_loadtest() { printf "Processing Loadtest Results: " # Write headers to CSVs - printf "Workload,Scs%%,TOTAL,All200,AllFail,Deny,MisDL,Shed,MiscErr\n" >> "$results_directory/success.csv" - printf "Workload,Throughput\n" >> "$results_directory/throughput.csv" - percentiles_table_header "$results_directory/latency.csv" "Workload" + for t_id in "${TENANT_IDS[@]}"; do + printf "Workload,Scs%%,TOTAL,All200,AllFail,Deny,MisDL,Shed,MiscErr\n" >> "$results_directory/success_$t_id.csv" + printf "Workload,Throughput\n" >> "$results_directory/throughput_$t_id.csv" + percentiles_table_header "$results_directory/latency_$t_id.csv" "Workload" + done - for workload in "${WORKLOADS[@]}"; do + for workload in "${workloads[@]}"; do + local t_id=${workload_tids[$workload]} + local var=${workload_vars[$workload]} + + if [ ! -s "$results_directory/$workload-err.dat" ]; then + # The error file is empty. So remove it. + rm "$results_directory/$workload-err.dat" + fi if [[ ! -f "$results_directory/$workload.dat" ]]; then printf "[ERR]\n" @@ -263,40 +217,47 @@ process_client_results_loadtest() { fi # Get Number of 200s and then calculate Success Rate (percent of requests that return 200) - # If using loadtest -n option (not -t), then use all200/iterations instead of all200/total. total=$(grep "Completed requests:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13) total_failed=$(grep "Total errors:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13) denied=$(grep "429:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12) missed_dl=$(grep "408:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12) killed=$(grep "409:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12) misc_err=$(grep "\-1:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12) - all200=$((total-total_failed)) + all200=$((total - total_failed)) # ((all200 == 0)) && continue # If all errors, skip line - success_rate=$(echo "scale=2; $all200/$total*100"|bc) - printf "%s,%3.1f,%d,%d,%d,%d,%d,%d,%d\n" "$workload" "$success_rate" "$total" "$all200" "$total_failed" "$denied" "$missed_dl" "$killed" "$misc_err" >> "$results_directory/success.csv" + success_rate=$(echo "scale=2; $all200*100/$total" | bc) + printf "%s,%3.1f,%d,%d,%d,%d,%d,%d,%d\n" "$var" "$success_rate" "$total" "$all200" "$total_failed" "$denied" "$missed_dl" "$killed" "$misc_err" >> "$results_directory/success_$t_id.csv" # Throughput is calculated as the mean number of successful requests per second duration=$(grep "Total time:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13) printf -v duration %.0f "$duration" # throughput=$(grep "Requests per second" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 14 | tail -n 1) # throughput of ALL throughput=$(echo "$all200/$duration" | bc) - printf "%s,%d\n" "$workload" "$throughput" >> "$results_directory/throughput.csv" + printf "%s,%d\n" "$var" "$throughput" >> "$results_directory/throughput_$t_id.csv" # Generate Latency Data - min=0 # not provided by loadtest - p50=$(echo "$(grep 50% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)*1000" |bc) - p90=$(echo "$(grep 90% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)*1000" |bc) - p99=$(echo "$(grep 99% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)*1000" |bc) - p100=$(echo "$(grep 100% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12 | tail -n 1)*1000" |bc) - mean=$(echo "scale=1;$(grep "Mean latency:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13)*1000" |bc) - - printf "%s,%d,%d,%.1f,%d,%d,%d,%d\n" "$workload" "$total" "$min" "$mean" "$p50" "$p90" "$p99" "$p100" >> "$results_directory/latency.csv" + min=$(echo "$(grep "Minimum latency:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13)*1000" | bc) + p50=$(echo "$(grep 50% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)*1000" | bc) + p90=$(echo "$(grep 90% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)*1000" | bc) + p99=$(echo "$(grep 99% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12)*1000" | bc) + p100=$(echo "$(grep 100% "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 12 | tail -n 1)*1000" | bc) + mean=$(echo "scale=1;$(grep "Mean latency:" "$results_directory/${workload}.dat" | tr -s ' ' | cut -d ' ' -f 13)*1000" | bc) + + printf "%s,%d,%d,%.1f,%d,%d,%d,%d\n" "$var" "$total" "$min" "$mean" "$p50" "$p90" "$p99" "$p100" >> "$results_directory/latency_$t_id.csv" done - # Transform csvs to dat files for gnuplot - csv_to_dat "$results_directory/success.csv" "$results_directory/throughput.csv" "$results_directory/latency.csv" - rm "$results_directory/success.csv" "$results_directory/throughput.csv" "$results_directory/latency.csv" + for t_id in "${TENANT_IDS[@]}"; do + # Transform csvs to dat files for gnuplot + csv_to_dat "$results_directory/success_$t_id.csv" "$results_directory/throughput_$t_id.csv" "$results_directory/latency_$t_id.csv" + rm "$results_directory/success_$t_id.csv" "$results_directory/throughput_$t_id.csv" "$results_directory/latency_$t_id.csv" + done + + # Generate gnuplots + generate_gnuplots "$results_directory" "$__run_sh__base_path" || { + printf "[ERR]\n" + panic "failed to generate gnuplots" + } printf "[OK]\n" return 0 @@ -309,55 +270,58 @@ process_server_results() { printf "Processing Server Results: \n" - num_of_lines=$(wc -l < "$results_directory/$SERVER_LOG_FILE") + local -r num_of_lines=$(wc -l < "$results_directory/$SERVER_LOG_FILE") if [ "$num_of_lines" == 1 ]; then printf "No results to process! Exiting the script." return 1 fi # Write headers to CSVs - printf "Workload,Scs%%,TOTAL,SrvScs,All200,AllFail,DenyAny,DenyG,MisDL_Glb,MisDL_Loc,Shed_Glb,Shed_Loc,QueFull\n" >> "$results_directory/success.csv" - printf "Workload,Throughput\n" >> "$results_directory/throughput.csv" - percentiles_table_header "$results_directory/latency.csv" "Workload" + for t_id in "${TENANT_IDS[@]}"; do + printf "Workload,Scs%%,TOTAL,SrvScs,All200,AllFail,DenyBE,DenyG,xDenyBE,xDenyG,MisD_Glb,MisD_Loc,MisD_WB,Shed_Glb,Shed_Loc,Shed_WB,Misc\n" >> "$results_directory/success_$t_id.csv" + printf "Workload,Throughput\n" >> "$results_directory/throughput_$t_id.csv" + percentiles_table_header "$results_directory/latency_$t_id.csv" "Workload" - # Write headers to CSVs - for metric in "${SANDBOX_METRICS[@]}"; do - percentiles_table_header "$results_directory/$metric.csv" "Workload" + # Write headers to CSVs + for metric in "${SANDBOX_METRICS[@]}"; do + percentiles_table_header "$results_directory/${metric}_$t_id.csv" "Workload" + done + percentiles_table_header "$results_directory/running_user_200_$t_id.csv" "Workload" + # percentiles_table_header "$results_directory/running_user_nonzero_$t_id.csv" "Workload" + percentiles_table_header "$results_directory/total_200_$t_id.csv" "Workload" + # percentiles_table_header "$results_directory/memalloc_$t_id.csv" "Workload" done - percentiles_table_header "$results_directory/running_user_200.csv" "Workload" - percentiles_table_header "$results_directory/running_user_nonzero.csv" "Workload" - percentiles_table_header "$results_directory/total_200.csv" "Workload" - # percentiles_table_header "$results_directory/memalloc.csv" "module" - for workload in "${WORKLOADS[@]}"; do - mkdir "$results_directory/$workload" - - local -i deadline=${DEADLINES_us[$workload]} + for workload in "${workloads[@]}"; do + mkdir -p "$results_directory/$workload" + local t_id=${workload_tids[$workload]} + local deadline=${workload_deadlines[$workload]} + local var=${workload_vars[$workload]} + for metric in "${SANDBOX_METRICS[@]}"; do awk -F, ' {workload = sprintf("%s-%s", $'"$SANDBOX_TENANT_NAME_FIELD"', substr($'"$SANDBOX_ROUTE_FIELD"',2))} workload == "'"$workload"'" {printf("%d,%d\n", $'"${SANDBOX_METRICS_FIELDS[$metric]}"' / $'"$SANDBOX_CPU_FREQ_FIELD"', $'"$SANDBOX_RESPONSE_CODE_FIELD"')} ' < "$results_directory/$SERVER_LOG_FILE" | sort -g > "$results_directory/$workload/${metric}_sorted.csv" - percentiles_table_row "$results_directory/$workload/${metric}_sorted.csv" "$results_directory/${metric}.csv" "$workload" + percentiles_table_row "$results_directory/$workload/${metric}_sorted.csv" "$results_directory/${metric}_$t_id.csv" "$workload" # Delete scratch file used for sorting/counting # rm "$results_directory/$workload/${metric}_sorted.csv" done awk -F, '$2 == 200 {printf("%d,%d\n", $1, $2)}' < "$results_directory/$workload/running_user_sorted.csv" > "$results_directory/$workload/running_user_200_sorted.csv" - percentiles_table_row "$results_directory/$workload/running_user_200_sorted.csv" "$results_directory/running_user_200.csv" "$workload" - awk -F, '$1 > 0 {printf("%d,%d\n", $1, $2)}' < "$results_directory/$workload/running_user_sorted.csv" > "$results_directory/$workload/running_user_nonzero_sorted.csv" - percentiles_table_row "$results_directory/$workload/running_user_nonzero_sorted.csv" "$results_directory/running_user_nonzero.csv" "$workload" + percentiles_table_row "$results_directory/$workload/running_user_200_sorted.csv" "$results_directory/running_user_200_$t_id.csv" "$workload" + # awk -F, '$1 > 0 {printf("%d,%d\n", $1, $2)}' < "$results_directory/$workload/running_user_sorted.csv" > "$results_directory/$workload/running_user_nonzero_sorted.csv" + # percentiles_table_row "$results_directory/$workload/running_user_nonzero_sorted.csv" "$results_directory/running_user_nonzero_$t_id.csv" "$workload" awk -F, '$2 == 200 {printf("%d,%d\n", $1, $2)}' < "$results_directory/$workload/total_sorted.csv" > "$results_directory/$workload/total_200_sorted.csv" - percentiles_table_row "$results_directory/$workload/total_200_sorted.csv" "$results_directory/total_200.csv" "$workload" - + percentiles_table_row "$results_directory/$workload/total_200_sorted.csv" "$results_directory/total_200_$t_id.csv" "$workload" # Memory Allocation # awk -F, '$2 == "'"$workload"'" {printf("%.0f\n", $MEMORY_FIELD)}' < "$results_directory/$SERVER_LOG_FILE" | sort -g > "$results_directory/$workload/memalloc_sorted.csv" - # percentiles_table_row "$results_directory/$workload/memalloc_sorted.csv" "$results_directory/memalloc.csv" "$workload" "%1.0f" + # percentiles_table_row "$results_directory/$workload/memalloc_sorted.csv" "$results_directory/memalloc_$t_id.csv.csv" "$workload" "%1.0f" # Calculate Success Rate for csv (percent of requests that complete), $1 and deadline are both in us, so not converting awk -F, ' @@ -366,40 +330,52 @@ process_server_results() { $2 != 200 {total_failed++} $2 == 4290 {denied_any++} $2 == 4291 {denied_gtd++} + $2 == 4295 {x_denied_any++} + $2 == 4296 {x_denied_gtd++} $2 == 4080 {mis_dl_glob++} $2 == 4081 {mis_dl_local++} + $2 == 4082 {mis_dl_wb++} $2 == 4090 {shed_glob++} $2 == 4091 {shed_local++} - $2 == 999 {global_full++} - END{printf "'"$workload"',%3.1f,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", (ok/NR*100), NR, ok, all200, total_failed, denied_any, denied_gtd, mis_dl_glob, mis_dl_local, shed_glob, shed_local, global_full} - ' < "$results_directory/$workload/total_sorted.csv" >> "$results_directory/success.csv" + $2 == 4092 {shed_wb++} + $2 == 4093 {misc++} + END{printf "'"$var"',%3.1f,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", (all200*100/NR), NR, ok, all200, total_failed, denied_any, denied_gtd, x_denied_any, x_denied_gtd, mis_dl_glob, mis_dl_local, mis_dl_wb, shed_glob, shed_local, shed_wb, misc} + ' < "$results_directory/$workload/total_sorted.csv" >> "$results_directory/success_$t_id.csv" # Throughput is calculated on the client side, so ignore the below line - printf "%s,%d\n" "$workload" "1" >> "$results_directory/throughput.csv" + printf "%s,%d\n" "$var" "1" >> "$results_directory/throughput_$t_id.csv" # Generate Latency Data for csv - percentiles_table_row "$results_directory/$workload/total_sorted.csv" "$results_directory/latency.csv" "$workload" + percentiles_table_row "$results_directory/$workload/total_sorted.csv" "$results_directory/latency_$t_id.csv" "$var" # Delete scratch file used for sorting/counting # rm "$results_directory/$workload/memalloc_sorted.csv" # Delete directory # rm -rf "${results_directory:?}/${workload:?}" - done - # Transform csvs to dat files for gnuplot - for metric in "${SANDBOX_METRICS[@]}"; do - csv_to_dat "$results_directory/$metric.csv" - rm "$results_directory/$metric.csv" + for t_id in "${TENANT_IDS[@]}"; do + for metric in "${SANDBOX_METRICS[@]}"; do + csv_to_dat "$results_directory/${metric}_$t_id.csv" + rm "$results_directory/${metric}_$t_id.csv" + done + + csv_to_dat "$results_directory/running_user_200_$t_id.csv" "$results_directory/total_200_$t_id.csv" # "$results_directory/running_user_nonzero_$t_id.csv" + rm "$results_directory/running_user_200_$t_id.csv" "$results_directory/total_200_$t_id.csv" # "$results_directory/running_user_nonzero_$t_id.csv" + + # Transform csvs to dat files for gnuplot + # csv_to_dat "$results_directory/memalloc$t_id.csv" + csv_to_dat "$results_directory/success_$t_id.csv" "$results_directory/throughput_$t_id.csv" "$results_directory/latency_$t_id.csv" + # rm "$results_directory/memalloc$t_id.csv" + rm "$results_directory/success_$t_id.csv" "$results_directory/throughput_$t_id.csv" "$results_directory/latency_$t_id.csv" done - csv_to_dat "$results_directory/running_user_200.csv" "$results_directory/running_user_nonzero.csv" "$results_directory/total_200.csv" - rm "$results_directory/running_user_200.csv" "$results_directory/running_user_nonzero.csv" "$results_directory/total_200.csv" - # csv_to_dat "$results_directory/memalloc.csv" - csv_to_dat "$results_directory/success.csv" "$results_directory/throughput.csv" "$results_directory/latency.csv" - # rm "$results_directory/memalloc.csv" - rm "$results_directory/success.csv" "$results_directory/throughput.csv" "$results_directory/latency.csv" + # Generate gnuplots + generate_gnuplots "$results_directory" "$__run_sh__base_path" || { + printf "[ERR]\n" + panic "failed to generate gnuplots" + } printf "[OK]\n" return 0 @@ -412,7 +388,7 @@ process_server_http_results() { printf "Processing Server HTTP Results: \n" - num_of_lines=$(wc -l < "$results_directory/$SERVER_HTTP_LOG_FILE") + local -r num_of_lines=$(wc -l < "$results_directory/$SERVER_HTTP_LOG_FILE") if [ "$num_of_lines" == 1 ]; then printf "No results to process! Exiting the script." return 1 @@ -422,8 +398,9 @@ process_server_http_results() { percentiles_table_header "$results_directory/$metric.csv" "workload" done - for workload in "${WORKLOADS[@]}"; do + for workload in "${workloads[@]}"; do mkdir -p "$results_directory/$workload" + local var=${workload_vars[$workload]} for metric in "${HTTP_METRICS[@]}"; do awk -F, ' @@ -431,7 +408,7 @@ process_server_http_results() { workload == "'"$workload"'" {printf("%.1f\n", $'"${HTTP_METRICS_FIELDS[$metric]}"' / $'"$HTTP_CPU_FREQ_FIELD"')} ' < "$results_directory/$SERVER_HTTP_LOG_FILE" | sort -g > "$results_directory/$workload/${metric}_sorted.csv" - percentiles_table_row "$results_directory/$workload/${metric}_sorted.csv" "$results_directory/${metric}.csv" "$workload" + percentiles_table_row "$results_directory/$workload/${metric}_sorted.csv" "$results_directory/${metric}.csv" "$var" # Delete directory # rm -rf "${results_directory:?}/${workload:?}" @@ -499,6 +476,3 @@ experiment_client() { return 0 } - -generate_spec_json -framework_init "$@" diff --git a/tests/cmu-sod/.gitignore b/tests/cmu-sod/.gitignore deleted file mode 100644 index 019d491..0000000 --- a/tests/cmu-sod/.gitignore +++ /dev/null @@ -1 +0,0 @@ -out.png diff --git a/tests/cmu-sod/0_depth.png b/tests/cmu-sod/0_depth.png deleted file mode 100644 index 242a89b..0000000 Binary files a/tests/cmu-sod/0_depth.png and /dev/null differ diff --git a/tests/cmu-sod/0_rgb.png b/tests/cmu-sod/0_rgb.png deleted file mode 100644 index 84da58a..0000000 Binary files a/tests/cmu-sod/0_rgb.png and /dev/null differ diff --git a/tests/cmu-sod/Makefile b/tests/cmu-sod/Makefile deleted file mode 100644 index c8d9b1a..0000000 --- a/tests/cmu-sod/Makefile +++ /dev/null @@ -1,30 +0,0 @@ -SLEDGE_BINARY_DIR=../../runtime/bin -HOSTNAME=localhost -PORT=10000 - -default: run - -clean: - rm -rf res/* - -run: - SLEDGE_SPINLOOP_PAUSE_ENABLED=true SLEDGE_SANDBOX_PERF_LOG=perf.log LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} ${SLEDGE_BINARY_DIR}/sledgert spec.json - -debug: - SLEDGE_SPINLOOP_PAUSE_ENABLED=true SLEDGE_NWORKERS=1 LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} gdb ${SLEDGE_BINARY_DIR}/sledgert --eval-command="run spec.json" - -valgrind: - SLEDGE_DISABLE_PREEMPTION=true SLEDGE_NWORKERS=1 LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} valgrind --leak-check=full --max-stackframe=11150456 --run-libc-freeres=no --run-cxx-freeres=no ${SLEDGE_BINARY_DIR}/sledgert spec.json - -.PHONY: client -client: - cat ./0_depth.png | http "${HOSTNAME}:${PORT}/depth_to_xyz" > ./out.png - -client-cloudlab: - cat ./0_depth.png | http "c220g2-011017.wisc.cloudlab.us:${PORT}/depth_to_xyz" > ./out.png - -client-cmu: - cat ./0_depth.png | http "arena0.andrew.cmu.edu:${PORT}/depth_to_xyz" > ./out.png - -multi: - hey -n 180 -c 180 -t 0 -o csv -m POST -D "./0_depth.png" "http://${HOSTNAME}:${PORT}/depth_to_xyz" diff --git a/tests/cmu-sod/expected_result.png b/tests/cmu-sod/expected_result.png deleted file mode 100644 index 48198ea..0000000 Binary files a/tests/cmu-sod/expected_result.png and /dev/null differ diff --git a/tests/cmu-sod/spec.json b/tests/cmu-sod/spec.json deleted file mode 100644 index dfa5432..0000000 --- a/tests/cmu-sod/spec.json +++ /dev/null @@ -1,25 +0,0 @@ -[ - { - "name": "cmu", - "port": 10000, - "replenishment-period-us": 0, - "max-budget-us": 0, - "routes": [ - { - "route": "/fib", - "path": "fibonacci.wasm.so", - "admissions-percentile": 50, - "expected-execution-us": 6000, - "relative-deadline-us": 20000, - "http-resp-content-type": "text/plain" - }, - { - "route": "/depth_to_xyz", - "path": "depth_to_xyz.wasm.so", - "expected-execution-us": 5000, - "relative-deadline-us": 360000, - "http-resp-content-type": "img/png" - } - ] - } -] diff --git a/tests/cmu-sod/test_requests.py b/tests/cmu-sod/test_requests.py deleted file mode 100644 index c74b328..0000000 --- a/tests/cmu-sod/test_requests.py +++ /dev/null @@ -1,20 +0,0 @@ -import time -import requests - -url = 'http://arena0.andrew.cmu.edu:10000/depth_to_xyz' - -payload = open('0_depth.png', 'rb') - -img = None - -response = requests.post(url, data=payload) -img = response.content -time.sleep(1) -print("single request works!") - -for i in range(100): - payload = open('0_depth.png', 'rb') - response = requests.post(url, data=payload) - img = response.content - time.sleep(1) - print(f"multi request #{i} works!") diff --git a/tests/cmu-sod/test_threads.py b/tests/cmu-sod/test_threads.py deleted file mode 100644 index 3c7f5a3..0000000 --- a/tests/cmu-sod/test_threads.py +++ /dev/null @@ -1,31 +0,0 @@ -# import numpy as np -import requests -import threading -import time - -from flask import Flask, Response - -url = 'http://arena0.andrew.cmu.edu:10000/depth_to_xyz' - -# app = Flask(__name__) - -img = None - -def get_img(): - global img - while True: - print("start") - try: - payload = open('0_depth.png', 'rb') - response = requests.post(url, data=payload) - img = response.content - print("got img") - time.sleep(0.01) - except: - print("failed") - time.sleep(5) - -thread = threading.Thread(target=get_img) -thread.daemon = True -thread.start() -thread.join() diff --git a/tests/common/mt/mtdbf_preemption.env b/tests/common/mt/mtdbf_preemption.env deleted file mode 100644 index d415269..0000000 --- a/tests/common/mt/mtdbf_preemption.env +++ /dev/null @@ -1,3 +0,0 @@ -SLEDGE_SCHEDULER=MTDBF -SLEDGE_DISABLE_PREEMPTION=false -SLEDGE_SANDBOX_PERF_LOG=perf.log diff --git a/tests/mt_unimodal/Makefile b/tests/mt_unimodal/Makefile deleted file mode 100644 index e99c18c..0000000 --- a/tests/mt_unimodal/Makefile +++ /dev/null @@ -1,48 +0,0 @@ -RUNTIME_DIR=../../../runtime/ -SLEDGE_BINARY_DIR=${RUNTIME_DIR}/bin -SLEDGE_TESTS_DIR=${RUNTIME_DIR}/tests -HOSTNAME=localhost -DURATION_SEC=5 - -all: run - -clean: - make -C ${RUNTIME_DIR} clean - make -C ${SLEDGE_TESTS_DIR} clean - rm -f ${SLEDGE_BINARY_DIR}/fibonacci.wasm.so - -${SLEDGE_BINARY_DIR}/sledgert: - make -C ${RUNTIME_DIR} runtime - -.PHONY: sledgert -sledgert: ${SLEDGE_BINARY_DIR}/sledgert - -${SLEDGE_BINARY_DIR}/fibonacci.wasm.so: - make -C ../../../applications fibonacci.install - -.PHONY: fibonacci -fibonacci: ${SLEDGE_BINARY_DIR}/fibonacci.wasm.so - -run: sledgert fibonacci - LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} ${SLEDGE_BINARY_DIR}/sledgert spec.json - -debug: sledgert fibonacci - SLEDGE_DISABLE_PREEMPTION=true SLEDGE_NWORKERS=1 \ - LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} gdb ${SLEDGE_BINARY_DIR}/sledgert \ - --eval-command="handle SIGUSR1 noprint nostop" \ - --eval-command="handle SIGPIPE noprint nostop" \ - --eval-command="set pagination off" \ - --eval-command="set print pretty" \ - --eval-command="run spec.json" - -client-fib30-once: - echo 30 | http :10031/fib - -client-fib40-once: - http :10030/fib?40 - -client-fib30-multi: - hey -z ${DURATION_SEC}s -cpus 2 -c 100 -t 0 -o csv -m POST -d "30\n" "http://${HOSTNAME}:10030/fib" - -client-fib40-multi: - hey -z ${DURATION_SEC}s -cpus 2 -c 100 -t 0 -o csv -m POST -d "40\n" "http://${HOSTNAME}:10030/fib" diff --git a/tests/mt_unimodal/README.md b/tests/mt_unimodal/README.md deleted file mode 100644 index 24390b4..0000000 --- a/tests/mt_unimodal/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# Unimodal Distribution - -This experiment drives a unimodal distribution of two workloads from the same tenant, where one workload has some guarantees and the other does not. - - -First, the non-guaranteed workload is launched to acquire all the CPU. After a few second (e.g. 3s) the guaranteed workload begins. Depending on the amount of the reservation, the guaranteed workload should typically have a better success rate than the other one. - -## Independent Variable - -The Scheduling Policy: MT-EDF versus just EDF - -## Dependent Variables - -Replenishment Period and Max Budget of Tenants -Latency of high priority workload diff --git a/tests/mt_unimodal/spec.json b/tests/mt_unimodal/spec.json deleted file mode 100644 index 62f433c..0000000 --- a/tests/mt_unimodal/spec.json +++ /dev/null @@ -1,34 +0,0 @@ -[ - { - "name": "GWU", - "port": 20030, - "replenishment-period-us": 16000, - "max-budget-us": 144000, - "routes": [ - { - "route": "/fib", - "path": "fibonacci.wasm.so", - "admissions-percentile": 70, - "expected-execution-us": 4000, - "relative-deadline-us": 16000, - "http-resp-content-type": "text/plain" - } - ] - }, - { - "name": "NNN", - "port": 10030, - "replenishment-period-us": 0, - "max-budget-us": 0, - "routes": [ - { - "route": "/fib", - "path": "fibonacci.wasm.so", - "admissions-percentile": 70, - "expected-execution-us": 4000, - "relative-deadline-us": 16000, - "http-resp-content-type": "text/plain" - } - ] - } -] diff --git a/tests/multi-tenancy-sample/Makefile b/tests/multi-tenancy-sample/Makefile new file mode 100644 index 0000000..426ae0d --- /dev/null +++ b/tests/multi-tenancy-sample/Makefile @@ -0,0 +1,65 @@ +SLEDGE_BINARY_DIR=../../runtime/bin +HOSTNAME=10.10.1.1 +PORT1=10050 +PORT2=20050 +HEY_OPTS=-disable-compression -disable-keepalive -disable-redirects + +default: run + +clean: + rm -rf res/* + +run: + SLEDGE_SIGALRM_HANDLER=TRIAGED SLEDGE_SCHEDULER=MTDBF SLEDGE_SPINLOOP_PAUSE_ENABLED=true SLEDGE_HTTP_SESSION_PERF_LOG=http_perf.log SLEDGE_SANDBOX_PERF_LOG=perf.log LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} ${SLEDGE_BINARY_DIR}/sledgert spec.json + +debug: + SLEDGE_SCHEDULER=MTDBF SLEDGE_SPINLOOP_PAUSE_ENABLED=false SLEDGE_NWORKERS=18 LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} gdb ${SLEDGE_BINARY_DIR}/sledgert \ + --eval-command="handle SIGUSR1 noprint nostop" \ + --eval-command="handle SIGPIPE noprint nostop" \ + --eval-command="set pagination off" \ + --eval-command="set print pretty" \ + --eval-command="run spec.json" + +valgrind: + SLEDGE_DISABLE_PREEMPTION=true SLEDGE_NWORKERS=1 LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} valgrind --leak-check=full --max-stackframe=11150456 --run-libc-freeres=no --run-cxx-freeres=no ${SLEDGE_BINARY_DIR}/sledgert spec.json + +client-localhost-xyz-once: + cat ./0_depth.png | http "localhost:${PORT1}/depth_to_xyz" > ./out.png + +client-localhost-fib-once: + http localhost:${PORT2}/fib?30 + + +client-xyz-once: + cat ./0_depth.png | http "${HOSTNAME}:${PORT1}/depth_to_xyz" > ./out.png + +client-xyz-hey: + hey ${HEY_OPTS} -n 90 -c 90 -t 0 -m POST -D "./0_depth.png" "http://${HOSTNAME}:${PORT1}/depth_to_xyz" + +#90=18*5, 4500=1s*72/14400 *90% +client-xyz-loadtest: + loadtest -n 90 -c 90 -T "image/png" -m POST -b "./0_depth.png" "http://${HOSTNAME}:${PORT1}/depth_to_xyz" + +client-xyz-wrk: + wrk -s post_binary.lua -t 1 -c 1 -d 1s -R 1 "http://${HOSTNAME}:${PORT1}/depth_to_xyz" -- "0_depth.png" + + +client-fib-once: + # echo 30 | http ${HOSTNAME}:${PORT2}/fib + http ${HOSTNAME}:${PORT2}/fib?30 + +client-fib-curl: + curl -i "http://${HOSTNAME}:${PORT2}/fib?30" + +#72=18*4, 4500=1s*72/14400 *90% +client-fib-loadtest: + loadtest -t 10 -c 72 --rps 4500 -P 30 "http://${HOSTNAME}:${PORT2}/fib" + +client-fib-hey: + hey ${HEY_OPTS} -z 10s -c 72 -t 0 -o csv -m POST -d "30\n" "http://${HOSTNAME}:${PORT2}/fib" + +client-fib-wrk: + wrk -t 1 -c 1 -d 5s -R 1 "http://${HOSTNAME}:${PORT2}/fib?30" + +client-admin: + echo 5 | http ${HOSTNAME}:55555/admin diff --git a/tests/multi-tenancy-sample/latency.gnuplot b/tests/multi-tenancy-sample/latency.gnuplot new file mode 100644 index 0000000..4bd60f2 --- /dev/null +++ b/tests/multi-tenancy-sample/latency.gnuplot @@ -0,0 +1,78 @@ +reset + +set term jpeg size 1000,500 +set output "latency.jpg" + +#set xlabel "Reservation Utilization %" +#set ylabel "Latency (us)" + +set key left top + +set xrange [-5:] +set yrange [0:] + +set style histogram columnstacked +set key horizontal + +set macros +# Placement of the a,b,c,d labels in the graphs +POS = "at graph 0.05,1.03 font ',10'" + +# x- and ytics for each row resp. column +NOXTICS = "unset xlabel" +XTICS = "set xlabel 'Reservation Utilization %'" +NOYTICS = "unset ylabel" +YTICS = "set ylabel 'Latency (us)'" + +# Margins for each row resp. column +TMARGIN = "set tmargin at screen 0.90; set bmargin at screen 0.55" +BMARGIN = "set tmargin at screen 0.55; set bmargin at screen 0.20" +LMARGIN = "set lmargin at screen 0.15; set rmargin at screen 0.55" +RMARGIN = "set lmargin at screen 0.55; set rmargin at screen 0.95" + +# plot \ +# for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:7 title 'Tenant '.t_id.' p99' w lp, \ +# for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:6 title 'Tenant '.t_id.' p90' w lp, \ +# for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:5 title 'Tenant '.t_id.' p50' w lp, \ +# for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:4 title 'Tenant '.t_id.' mean' w lp, \ +# for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:3 title 'Tenant '.t_id.' min' w lp + +### Start multiplot (2x2 layout) +set multiplot layout 2,2 rowsfirst +# --- GRAPH a +set label 1 'p99' @POS +@NOXTICS; @YTICS +#@TMARGIN; @LMARGIN +plot for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:7 title 'Tenant '.t_id w lp +# --- GRAPH b +set label 1 'p90' @POS +@NOXTICS; @NOYTICS +#@TMARGIN; @RMARGIN +plot for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:6 notitle w lp +# --- GRAPH c +set label 1 'p50' @POS +@XTICS; @YTICS +#@BMARGIN; @LMARGIN +plot for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:5 notitle w lp +# --- GRAPH d +set label 1 'mean' @POS +@XTICS; @NOYTICS +#@BMARGIN; @RMARGIN +plot for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:4 notitle w lp +unset multiplot +### End multiplot + +# plot \ +# 'latency_A.dat' using 1:7 title 'A p99' lt 1 lc 1 w lp, \ +# 'latency_A.dat' using 1:6 title 'A p90' lt 2 lc 1 w lp, \ +# 'latency_A.dat' using 1:5 title 'A p50' lt 3 lc 1 w lp, \ +# 'latency_A.dat' using 1:4 title 'A mean' lt 4 lc 1 w lp, \ +# 'latency_A.dat' using 1:3 title 'A min' lt 5 lc 1 w lp,\ +# 'latency_B.dat' using 1:7 title 'B p99' lt 1 lc 2 w lp, \ +# 'latency_B.dat' using 1:6 title 'B p90' lt 2 lc 2 w lp, \ +# 'latency_B.dat' using 1:5 title 'B p50' lt 3 lc 2 w lp, \ +# 'latency_B.dat' using 1:4 title 'B mean' lt 4 lc 2 w lp, \ +# 'latency_B.dat' using 1:3 title 'B min' lt 5 lc 2 w lp + +# 'latency_A.dat' using 1:8 title 'A p100' linetype 0 linecolor 1 with linespoints, \ +# 'latency_B.dat' using 1:8 title 'B p100' linetype 0 linecolor 2 with linespoints, \ diff --git a/tests/multi-tenancy-sample/latency.gnuplot.bak b/tests/multi-tenancy-sample/latency.gnuplot.bak new file mode 100644 index 0000000..6be2eed --- /dev/null +++ b/tests/multi-tenancy-sample/latency.gnuplot.bak @@ -0,0 +1,25 @@ +reset + +set term jpeg +set output "latency.jpg" + +set xlabel "Rerervation Utilization %" +set ylabel "Latency (us)" + +set key left top + +set xrange [-5:] +set yrange [0:] + +set style histogram columnstacked +set key horizontal + +plot \ + for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:7 title 'latency_'.t_id.'.dat' lt 2 lc 2 w lp, \ + for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:6 title 'latency_'.t_id.'.dat' lt 2 lc 2 w lp, \ + for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:5 title 'latency_'.t_id.'.dat' lt 2 lc 2 w lp, \ + for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:4 title 'latency_'.t_id.'.dat' lt 2 lc 2 w lp, \ + for [t_id in tenant_ids] 'latency_'.t_id.'.dat' using 1:3 title 'latency_'.t_id.'.dat' lt 2 lc 2 w lp + +# 'latency_A.dat' using 1:8 title 'A p100' linetype 0 linecolor 1 with linespoints, \ +# 'latency_B.dat' using 1:8 title 'B p100' linetype 0 linecolor 2 with linespoints, \ diff --git a/tests/multi-tenancy-sample/run.sh b/tests/multi-tenancy-sample/run.sh new file mode 100755 index 0000000..79e8e67 --- /dev/null +++ b/tests/multi-tenancy-sample/run.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +# shellcheck disable=SC1091,SC2034,SC2155 + +source ../bash_libraries/multi_tenancy_base.sh || exit 1 + +# To reduce post processing time, provide local-only meaningful metrics: +# Comment the following line in order to use ALL the metrics! +declare -a SANDBOX_METRICS=(total running_sys running_user) + +# declare -r APP_WASM="sample_app.wasm.so" +declare -r FIBONACCI_WASM="fibonacci.wasm.so" + +# The global configs for the scripts +declare -r CLIENT_TERMINATE_SERVER=true +declare -r DURATION_sec=30 +declare -r ESTIMATIONS_PERCENTILE=60 +declare -r NWORKERS=$(($(nproc)-2)) # all cores - 2 + +# Tenant configs: +declare -ar TENANT_IDS=("long" "short") +declare -ar INIT_PORTS=(10000 20000) +# declare -ar ROUTES=("sample_app_route1 sample_app_route2" "fib") +declare -ar ROUTES=("fib1 fib2" "fib") +declare -ar MTDS_REPL_PERIODS_us=(0 0) +declare -ar MTDS_MAX_BUDGETS_us=(0 0) + +# Per route configs: +declare -ar WASM_PATHS=("$FIBONACCI_WASM $FIBONACCI_WASM" "$FIBONACCI_WASM") +declare -ar RESP_CONTENT_TYPES=("text/plain text/plain" "text/plain") # image data: "image/png" +declare -ar EXPECTED_EXEC_TIMES_us=("64500 3600" "3600") +declare -ar DEADLINES_us=("322500 18000" "18000") + +# For image data: +# declare -ar ARG_OPTS_HEY=("-D" "-d") +# declare -ar ARG_OPTS_LT=("-b" "-P") +# declare -ar ARGS=("./0_depth.png" "30") + +declare -ar ARG_OPTS_HEY=("-d -d" "-d") +declare -ar ARG_OPTS_LT=("-P -P" "-P") +declare -ar ARGS=("36 30" "30") + +# 100=FULL, 50=HALF etc. +declare -ar LOADS=("50 70" "100") + +# When trying varying values, you must pick ONE value from the above params to ? (question mark) +# For example, for varying the reservations, try: declare -ar LOADS=("50 ?" "100") +declare -ar VARYING=(0) # no variation, single experiment +# declare -ar VARYING=(5 50 100) + +run_init +generate_spec_json +# framework_init "$@" diff --git a/tests/multi-tenancy-sample/spec.json b/tests/multi-tenancy-sample/spec.json new file mode 100644 index 0000000..ca2b3e0 --- /dev/null +++ b/tests/multi-tenancy-sample/spec.json @@ -0,0 +1,72 @@ +[ + { + "name": "Admin", + "port": 55555, + "replenishment-period-us": 0, + "max-budget-us": 0, + "reservation-percentile": 0, + "routes": [ + { + "route": "/admin", + "path": "fibonacci.wasm.so", + "admissions-percentile": 50, + "expected-execution-us": 1000, + "relative-deadline-us": 10000, + "http-resp-content-type": "text/plain" + }, + { + "route": "/terminator", + "path": "fibonacci.wasm.so", + "admissions-percentile": 50, + "expected-execution-us": 1000, + "relative-deadline-us": 10000, + "http-resp-content-type": "text/plain" + } + ], + "extra-exec-percentile": 0 + }, + { + "name": "long-000", + "port": 10000, + "replenishment-period-us": 0, + "max-budget-us": 0, + "reservation-percentile": 0, + "routes": [ + { + "route": "/fib1", + "path": "fibonacci.wasm.so", + "admissions-percentile": 60, + "expected-execution-us": 64500, + "relative-deadline-us": 322500, + "http-resp-content-type": "text/plain" + }, + { + "route": "/fib2", + "path": "fibonacci.wasm.so", + "admissions-percentile": 60, + "expected-execution-us": 3600, + "relative-deadline-us": 18000, + "http-resp-content-type": "text/plain" + } + ], + "extra-exec-percentile": 0 + }, + { + "name": "short-000", + "port": 20000, + "replenishment-period-us": 0, + "max-budget-us": 0, + "reservation-percentile": 0, + "routes": [ + { + "route": "/fib", + "path": "fibonacci.wasm.so", + "admissions-percentile": 60, + "expected-execution-us": 3600, + "relative-deadline-us": 18000, + "http-resp-content-type": "text/plain" + } + ], + "extra-exec-percentile": 0 + } +] diff --git a/tests/multi-tenancy-sample/success.gnuplot b/tests/multi-tenancy-sample/success.gnuplot new file mode 100644 index 0000000..ba32b9f --- /dev/null +++ b/tests/multi-tenancy-sample/success.gnuplot @@ -0,0 +1,15 @@ +reset + +set term jpeg +set output "success.jpg" + +set xlabel "Reservation Utilization %" +set ylabel "Deadline success rate %" + +set xrange [-5:] +set yrange [0:110] + +plot for [t_id in tenant_ids] 'success_'.t_id.'.dat' using 1:2 title t_id w lp + +#plot 'success_A.dat' using 1:2 title 'Tenant A success rate' linetype 1 linecolor 1 with linespoints,\ +# 'success_B.dat' using 1:2 title 'Tenant B success rate' lt 2 lc 2 w lp diff --git a/tests/mt_unimodal/template.json b/tests/multi-tenancy-sample/template.json similarity index 90% rename from tests/mt_unimodal/template.json rename to tests/multi-tenancy-sample/template.json index 696dbd2..721dca5 100644 --- a/tests/mt_unimodal/template.json +++ b/tests/multi-tenancy-sample/template.json @@ -3,6 +3,7 @@ "port": 0, "replenishment-period-us": 0, "max-budget-us": 0, + "reservation-percentile": 0, "routes": [ { "route": "/route", diff --git a/tests/multi-tenancy-sample/throughput.gnuplot b/tests/multi-tenancy-sample/throughput.gnuplot new file mode 100644 index 0000000..fdd536a --- /dev/null +++ b/tests/multi-tenancy-sample/throughput.gnuplot @@ -0,0 +1,15 @@ +reset + +set term jpeg +set output "throughput.jpg" + +set xlabel "Reservation Utilization %" +set ylabel "Requests/sec" + +set xrange [-5:] +set yrange [0:] + +plot for [t_id in tenant_ids] 'throughput_'.t_id.'.dat' using 1:2 title 'Tenant '.t_id w lp + +#plot 'throughput_A.dat' using 1:2 title 'Tenant A Throughput' linetype 1 linecolor 1 with linespoints,\ +# 'throughput_B.dat' using 1:2 title 'Tenant B Throughput' linetype 2 linecolor 2 with linespoints