From ad0657dfe499e8ad6a124c369eb587761219de4f Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Wed, 19 May 2021 20:28:22 +0000 Subject: [PATCH] feat: Initial pass of workload mix --- runtime/experiments/workload_mix/.gitignore | 3 + runtime/experiments/workload_mix/README.md | 1 + .../workload_mix/edf_nopreemption.env | 2 + .../workload_mix/edf_preemption.env | 3 + .../workload_mix/fifo_nopreemption.env | 2 + .../workload_mix/fifo_preemption.env | 2 + runtime/experiments/workload_mix/install.sh | 13 + runtime/experiments/workload_mix/mix.csv | 2 + runtime/experiments/workload_mix/run.sh | 237 ++++++++++++++++++ runtime/experiments/workload_mix/spec.json | 32 +++ 10 files changed, 297 insertions(+) create mode 100644 runtime/experiments/workload_mix/.gitignore create mode 100644 runtime/experiments/workload_mix/README.md create mode 100644 runtime/experiments/workload_mix/edf_nopreemption.env create mode 100644 runtime/experiments/workload_mix/edf_preemption.env create mode 100644 runtime/experiments/workload_mix/fifo_nopreemption.env create mode 100644 runtime/experiments/workload_mix/fifo_preemption.env create mode 100755 runtime/experiments/workload_mix/install.sh create mode 100644 runtime/experiments/workload_mix/mix.csv create mode 100755 runtime/experiments/workload_mix/run.sh create mode 100644 runtime/experiments/workload_mix/spec.json diff --git a/runtime/experiments/workload_mix/.gitignore b/runtime/experiments/workload_mix/.gitignore new file mode 100644 index 0000000..26cdcd9 --- /dev/null +++ b/runtime/experiments/workload_mix/.gitignore @@ -0,0 +1,3 @@ +res +perf.data +perf.data.old diff --git a/runtime/experiments/workload_mix/README.md b/runtime/experiments/workload_mix/README.md new file mode 100644 index 0000000..1c8dfe9 --- /dev/null +++ b/runtime/experiments/workload_mix/README.md @@ -0,0 +1 @@ +# Workload Distribution diff --git a/runtime/experiments/workload_mix/edf_nopreemption.env b/runtime/experiments/workload_mix/edf_nopreemption.env new file mode 100644 index 0000000..eeba531 --- /dev/null +++ b/runtime/experiments/workload_mix/edf_nopreemption.env @@ -0,0 +1,2 @@ +SLEDGE_SCHEDULER=EDF +SLEDGE_DISABLE_PREEMPTION=true diff --git a/runtime/experiments/workload_mix/edf_preemption.env b/runtime/experiments/workload_mix/edf_preemption.env new file mode 100644 index 0000000..302a324 --- /dev/null +++ b/runtime/experiments/workload_mix/edf_preemption.env @@ -0,0 +1,3 @@ +SLEDGE_SCHEDULER=EDF +SLEDGE_DISABLE_PREEMPTION=false +SLEDGE_SIGALRM_HANDLER=TRIAGED diff --git a/runtime/experiments/workload_mix/fifo_nopreemption.env b/runtime/experiments/workload_mix/fifo_nopreemption.env new file mode 100644 index 0000000..a572a70 --- /dev/null +++ b/runtime/experiments/workload_mix/fifo_nopreemption.env @@ -0,0 +1,2 @@ +SLEDGE_SCHEDULER=FIFO +SLEDGE_DISABLE_PREEMPTION=true diff --git a/runtime/experiments/workload_mix/fifo_preemption.env b/runtime/experiments/workload_mix/fifo_preemption.env new file mode 100644 index 0000000..eb1298f --- /dev/null +++ b/runtime/experiments/workload_mix/fifo_preemption.env @@ -0,0 +1,2 @@ +SLEDGE_SCHEDULER=FIFO +SLEDGE_DISABLE_PREEMPTION=false diff --git a/runtime/experiments/workload_mix/install.sh b/runtime/experiments/workload_mix/install.sh new file mode 100755 index 0000000..0cbcfe8 --- /dev/null +++ b/runtime/experiments/workload_mix/install.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +if ! command -v hey > /dev/null; then + HEY_URL=https://hey-release.s3.us-east-2.amazonaws.com/hey_linux_amd64 + wget $HEY_URL -O hey + chmod +x hey + + if [[ $(whoami) == "root" ]]; then + mv hey /usr/bin/hey + else + sudo mv hey /usr/bin/hey + fi +fi diff --git a/runtime/experiments/workload_mix/mix.csv b/runtime/experiments/workload_mix/mix.csv new file mode 100644 index 0000000..5a97992 --- /dev/null +++ b/runtime/experiments/workload_mix/mix.csv @@ -0,0 +1,2 @@ +80,fibonacci_10 +20,fibonacci_40 diff --git a/runtime/experiments/workload_mix/run.sh b/runtime/experiments/workload_mix/run.sh new file mode 100755 index 0000000..267102a --- /dev/null +++ b/runtime/experiments/workload_mix/run.sh @@ -0,0 +1,237 @@ +#!/bin/bash + +# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate +# Success - The percentage of requests that complete by their deadlines +# TODO: Does this handle non-200s? +# Throughput - The mean number of successful requests per second +# Latency - the rount-trip resonse time (unit?) of successful requests at the p50, p90, p99, and p100 percetiles + +# Add bash_libraries directory to path +__run_sh__base_path="$(dirname "$(realpath --logical "${BASH_SOURCE[0]}")")" +__run_sh__bash_libraries_relative_path="../bash_libraries" +__run_sh__bash_libraries_absolute_path=$(cd "$__run_sh__base_path" && cd "$__run_sh__bash_libraries_relative_path" && pwd) +export PATH="$__run_sh__bash_libraries_absolute_path:$PATH" + +source csv_to_dat.sh || exit 1 +source framework.sh || exit 1 +# source generate_gnuplots.sh || exit 1 +source get_result_count.sh || exit 1 +source panic.sh || exit 1 +source path_join.sh || exit 1 + +if ! command -v hey > /dev/null; then + echo "hey is not present." + exit 1 +fi + +# Sends requests until the per-module perf window buffers are full +# This ensures that Sledge has accurate estimates of execution time +run_samples() { + if (($# != 1)); then + panic "invalid number of arguments \"$1\"" + return 1 + elif [[ -z "$1" ]]; then + panic "hostname \"$1\" was empty" + return 1 + fi + + local hostname="${1}" + + # Scrape the perf window size from the source if possible + # TODO: Make a util function + local -r perf_window_path="$(path_join "$__run_sh__base_path" ../../include/perf_window_t.h)" + local -i perf_window_buffer_size + if ! perf_window_buffer_size=$(grep "#define PERF_WINDOW_BUFFER_SIZE" < "$perf_window_path" | cut -d\ -f3); then + printf "Failed to scrape PERF_WINDOW_BUFFER_SIZE from ../../include/perf_window.h\n" + printf "Defaulting to 16\n" + perf_window_buffer_size=16 + fi + local -ir perf_window_buffer_size + + printf "Running Samples: " + hey -n "$perf_window_buffer_size" -c "$perf_window_buffer_size" -cpus 3 -t 0 -o csv -m GET -d "40\n" "http://${hostname}:10040" 1> /dev/null 2> /dev/null || { + printf "[ERR]\n" + panic "fibonacci_40 samples failed with $?" + return 1 + } + + hey -n "$perf_window_buffer_size" -c "$perf_window_buffer_size" -cpus 3 -t 0 -o csv -m GET -d "10\n" "http://${hostname}:100010" 1> /dev/null 2> /dev/null || { + printf "[ERR]\n" + panic "fibonacci_10 samples failed with $?" + return 1 + } + + printf "[OK]\n" + return 0 +} + +# Execute the fib10 and fib40 experiments sequentially and concurrently +# $1 (hostname) +# $2 (results_directory) - a directory where we will store our results +run_experiments() { + if (($# != 2)); then + panic "invalid number of arguments \"$1\"" + return 1 + elif [[ -z "$1" ]]; then + panic "hostname \"$1\" was empty" + return 1 + elif [[ ! -d "$2" ]]; then + panic "directory \"$2\" does not exist" + return 1 + fi + + local hostname="$1" + local results_directory="$2" + + local -a workloads=() + + local -Ar port=( + [fibonacci_10]=10010 + [fibonacci_40]=10040 + ) + + local -Ar body=( + [fibonacci_10]=10 + [fibonacci_40]=40 + ) + + local -A floor=() + local -A length=() + local -i total=0 + + local -a buffer=() + local workload="" + local -i odds=0 + while read -r line; do + # Read into buffer array, splitting on commas + readarray -t -d, buffer < <(echo -n "$line") + # Use human friendly names + odds="${buffer[0]}" + workload="${buffer[1]}" + # Update workload mix structures + workloads+=("$workload") + floor+=(["$workload"]=$total) + length+=(["$workload"]=$odds) + ((total += odds)) + done < mix.csv + + declare -ir random_max=32767 + # Validate Workload Mix + if ((total <= 0 || total > random_max)); then + echo "total must be between 1 and $random_max" + exit 1 + fi + + # TODO: Check that workload is in spec.json + local -ir batch_size=1 + local -i batch_id=0 + local -i roll=0 + local -ir total_iterations=100 + local -ir worker_max=5 + + printf "Running Experiments\n" + + # Select a random workload using the workload mix and run command, writing output to disk + for ((i = 0; i < total_iterations; i += batch_size)); do + # Block waiting for a worker to finish if we are at our max + worker_count=$(($(ps --no-headers -o pid --ppid=$$ | wc -w) - 1)) + # ((worker_count > worker_max)) && { + # echo "More subprocesses than expected" + # exit 1 + # } + while ((worker_count >= worker_max)); do + wait -n + worker_count=$(($(ps --no-headers -o pid --ppid=$$ | wc -w) - 1)) + done + roll=$((RANDOM % total)) + ((batch_id++)) + for workload in "${workloads[@]}"; do + if ((roll >= floor[$workload] && roll < floor[$workload] + length[$workload])); then + # echo "hey -n $batch_size -c 1 -cpus 1 -t 0 -o csv -m GET -d ${body[$workload]}\n http://${hostname}:${port[$workload]}" + + hey -n $batch_size -c 1 -cpus 1 -t 0 -o csv -m GET -d "${body[$workload]}\n" "http://${hostname}:${port[$workload]}" > "$results_directory/${workload}_${batch_id}.csv" 2> /dev/null & + break + fi + done + done + while ((worker_count > 0)); do + wait -n + worker_count=$(($(ps --no-headers -o pid --ppid=$$ | wc -w) - 1)) + done + printf "[OK]\n" + + for workload in "${workloads[@]}"; do + tail --quiet -n +2 "$results_directory/${workload}"_*.csv >> "$results_directory/${workload}.csv" + # rm "$results_directory/${workload}"_*.csv + done + + return 0 +} + +# Process the experimental results and generate human-friendly results for success rate, throughput, and latency +process_results() { + if (($# != 1)); then + error_msg "invalid number of arguments ($#, expected 1)" + return 1 + elif ! [[ -d "$1" ]]; then + error_msg "directory $1 does not exist" + return 1 + fi + + local -r results_directory="$1" + + printf "Processing Results: " + + # Write headers to CSVs + printf "Payload,p50,p90,p99,p100\n" >> "$results_directory/latency.csv" + + local -ar payloads=(fibonacci_10 fibonacci_40) + for payload in "${payloads[@]}"; do + + # Filter on 200s, convert from s to ms, and sort + awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \ + | sort -g > "$results_directory/$payload-response.csv" + + oks=$(wc -l < "$results_directory/$payload-response.csv") + ((oks == 0)) && continue # If all errors, skip line + + # Generate Latency Data for csv + awk ' + BEGIN { + sum = 0 + p50 = int('"$oks"' * 0.5) + p90 = int('"$oks"' * 0.9) + p99 = int('"$oks"' * 0.99) + p100 = '"$oks"' + printf "'"$payload"'," + } + NR==p50 {printf "%1.4f,", $0} + NR==p90 {printf "%1.4f,", $0} + NR==p99 {printf "%1.4f,", $0} + NR==p100 {printf "%1.4f\n", $0} + ' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv" + + # Delete scratch file used for sorting/counting + rm -rf "$results_directory/$payload-response.csv" + done + + # Transform csvs to dat files for gnuplot + csv_to_dat "$results_directory/latency.csv" + + printf "[OK]\n" + return 0 +} + +# Expected Symbol used by the framework +experiment_main() { + local -r target_hostname="$1" + local -r results_directory="$2" + + run_samples "$target_hostname" || return 1 + run_experiments "$target_hostname" "$results_directory" || return 1 + process_results "$results_directory" || return 1 + + return 0 +} + +main "$@" diff --git a/runtime/experiments/workload_mix/spec.json b/runtime/experiments/workload_mix/spec.json new file mode 100644 index 0000000..f40b367 --- /dev/null +++ b/runtime/experiments/workload_mix/spec.json @@ -0,0 +1,32 @@ +{ + "active": true, + "name": "fibonacci_10", + "path": "fibonacci_wasm.so", + "port": 10010, + "expected-execution-us": 6000, + "admissions-percentile": 70, + "relative-deadline-us": 20000, + "argsize": 1, + "http-req-headers": [], + "http-req-content-type": "text/plain", + "http-req-size": 1024, + "http-resp-headers": [], + "http-resp-size": 1024, + "http-resp-content-type": "text/plain" +}, +{ + "active": true, + "name": "fibonacci_40", + "path": "fibonacci_wasm.so", + "port": 10040, + "expected-execution-us": 10000000, + "admissions-percentile": 70, + "relative-deadline-us": 20000000, + "argsize": 1, + "http-req-headers": [], + "http-req-content-type": "text/plain", + "http-req-size": 1024, + "http-resp-headers": [], + "http-resp-size": 1024, + "http-resp-content-type": "text/plain" +}