chore: bash cleanup

main
Sean McBride 4 years ago
parent 16153b997c
commit 14a96de776

@ -21,7 +21,7 @@ else
echo "Running under gdb"
fi
# expected_size="$(find expected_result.jpg -printf "%s")"
expected_size="$(find expected_result.jpg -printf "%s")"
success_count=0
total_count=50

@ -33,7 +33,7 @@ for ((i = 0; i < total_count; i++)); do
for dpi in "${dpis[@]}"; do
echo "${dpi}"_dpi.pnm
pango-view --dpi=$dpi --font=mono -qo "${dpi}"_dpi.png -t "$words"
pango-view --dpi="$dpi" --font=mono -qo "${dpi}"_dpi.png -t "$words"
pngtopnm "${dpi}"_dpi.png > "${dpi}"_dpi.pnm
result=$(curl -H 'Expect:' -H "Content-Type: text/plain" --data-binary @"${dpi}"_dpi.pnm localhost:${dpi_to_port[$dpi]} 2> /dev/null)

@ -32,21 +32,21 @@ for ((i = 1; i <= total_count; i++)); do
echo "DejaVu Sans Mono"
pango-view --font="DejaVu Sans Mono" -qo mono_words.png -t "$words" || exit 1
pngtopnm mono_words.png > mono_words.pnm || exit 1
result=$( curl -H 'Expect:' -H "Content-Type: text/plain" --data-binary @mono_words.pnm localhost:10000 2> /dev/null)
result=$(curl -H 'Expect:' -H "Content-Type: text/plain" --data-binary @mono_words.pnm localhost:10000 2> /dev/null)
diff -ywBZE --suppress-common-lines <(echo "$words") <(echo "$result")
;;
"Roboto")
echo "Roboto"
pango-view --font="Roboto" -qo Roboto_words.png -t "$words" || exit 1
pngtopnm Roboto_words.png > Roboto_words.pnm || exit 1
result=$( curl -H 'Expect:' -H "Content-Type: text/plain" --data-binary @Roboto_words.pnm localhost:10002 2> /dev/null)
result=$(curl -H 'Expect:' -H "Content-Type: text/plain" --data-binary @Roboto_words.pnm localhost:10002 2> /dev/null)
diff -ywBZE --suppress-common-lines <(echo "$words") <(echo "$result")
;;
"Cascadia Code")
echo "Cascadia Code"
pango-view --font="Cascadia Code" -qo Cascadia_Code_words.png -t "$words" || exit 1
pngtopnm Cascadia_Code_words.png > Cascadia_Code_words.pnm || exit 1
result=$( curl -H 'Expect:' -H "Content-Type: text/plain" --data-binary @Cascadia_Code_words.pnm localhost:10001 2> /dev/null)
result=$(curl -H 'Expect:' -H "Content-Type: text/plain" --data-binary @Cascadia_Code_words.pnm localhost:10001 2> /dev/null)
diff -ywBZE --suppress-common-lines <(echo "$words") <(echo "$result")
;;
esac

@ -1,6 +1,10 @@
#!/bin/bash
log_environment() {
if ! command -v git &> /dev/null; then
echo "git could not be found"
exit
fi
echo "*******"
echo "* Git *"
echo "*******"
@ -39,6 +43,20 @@ kill_runtime() {
}
generate_gnuplots() {
if ! command -v gnuplot &> /dev/null; then
echo "gnuplot could not be found"
exit
fi
# shellcheck disable=SC2154
if [ -z "$results_directory" ]; then
echo "results_directory is unset or empty"
exit
fi
# shellcheck disable=SC2154
if [ -z "$experiment_directory" ]; then
echo "experiment_directory is unset or empty"
exit
fi
cd "$results_directory" || exit
gnuplot ../../latency.gnuplot
gnuplot ../../success.gnuplot

@ -7,7 +7,6 @@ source ../common.sh
host=localhost
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt
@ -31,9 +30,9 @@ echo "Running Experiments"
# Run lower priority first, then higher priority. The lower priority has offsets to ensure it runs the entire time the high priority is trying to run
hey -n 1000 -c 1000 -cpus 6 -t 0 -o csv -m GET -d "40\n" http://${host}:10040 > "$results_directory/fib40-con.csv"
# sleep $offset
# hey -n 25000 -c 1000000 -t 0 -o csv -m GET -d "10\n" http://${host}:10010 >"$results_directory/fib10-con.csv" &
# sleep $((duration_sec + offset + 45))
sleep $offset
hey -n 25000 -c 1000000 -t 0 -o csv -m GET -d "10\n" http://${host}:10010 > "$results_directory/fib10-con.csv" &
sleep $((duration_sec + offset + 45))
# Generate *.csv and *.dat results
echo -n "Parsing Results: "

@ -8,7 +8,6 @@ host=192.168.1.13
# host=localhost
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt

@ -8,7 +8,6 @@ host=192.168.1.13
# host=localhost
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
results_directory="$experiment_directory/res/$timestamp"
log=log.txt

@ -1,7 +1,5 @@
#!/bin/bash
source ../common.sh
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)

@ -63,7 +63,7 @@ for payload in ${payloads[*]}; do
awk -F, '
$7 == 200 {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
@ -92,7 +92,7 @@ for payload in ${payloads[*]}; do
NR==p90 {printf "%1.4f,", $0}
NR==p99 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"

@ -29,7 +29,7 @@ for payload in ${payloads[*]}; do
awk -F, '
$7 == 200 {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort
awk -F, '$7 == 200 {print ($1 * 1000)}' < "$results_directory/$payload.csv" \
@ -62,7 +62,7 @@ for payload in ${payloads[*]}; do
NR==p998 {printf "%1.4f,", $0}
NR==p999 {printf "%1.4f,", $0}
NR==p100 {printf "%1.4f\n", $0}
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
' < "$results_directory/$payload-response.csv" >> "$results_directory/latency.csv"
# Delete scratch file used for sorting/counting
# rm -rf "$results_directory/$payload-response.csv"

@ -1,9 +1,19 @@
#!/bin/bash
source ../common.sh
# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate
# Use -d flag if running under gdb
source ../common.sh
# Validate dependencies
declare -a -r dependencies=(awk hey wc)
for dependency in "${dependencies[@]}"; do
if ! command -v "$dependency" &> /dev/null; then
echo "$dependency could not be found"
exit
fi
done
timestamp=$(date +%s)
experiment_directory=$(pwd)
binary_directory=$(cd ../../bin && pwd)
@ -74,7 +84,7 @@ for scheduler in ${schedulers[*]}; do
# Calculate Success Rate for csv
awk -F, '
$7 == 200 && ($1 * 1000) <= '"$deadline"' {ok++}
END{printf "'"$payload"',%3.5f%\n", (ok / (NR - 1) * 100)}
END{printf "'"$payload"',%3.5f\n", (ok / (NR - 1) * 100)}
' < "$results_directory/$payload.csv" >> "$results_directory/success.csv"
# Filter on 200s, convery from s to ms, and sort

Loading…
Cancel
Save