Compare commits

...

10 Commits

@ -1,3 +1,3 @@
LD_LIBRARY_PATH=/home/hai/sledge-old/runtime/bin LD_LIBRARY_PATH=/home/hai/sledge/sledge/runtime/bin
SLEDGE_SCHEDULER=EDF SLEDGE_SCHEDULER=MDL
SLEDGE_SANDBOX_PERF_LOG=/home/hai/sledge-old/runtime_sandbox_perf_log.log SLEDGE_SANDBOX_PERF_LOG=/home/hai/sledge/sledge/runtime/tests/runtime_sandbox_perf_log.log

5
.gitignore vendored

@ -56,6 +56,11 @@ runtime/tests/tmp/
runtime/tests/**/*.csv runtime/tests/**/*.csv
runtime/tests/**/*.txt runtime/tests/**/*.txt
runtime/tests/**/*.xlsx runtime/tests/**/*.xlsx
runtime/tests/test_data
runtime/tests/*.log
runtime/data/*.txt
runtime/data/*.log
runtime/data/*.json
# Swap Files # Swap Files
*.swp *.swp

4
.gitmodules vendored

@ -28,8 +28,8 @@ url = https://github.com/gwsystems/CMSIS_5_NN.git
branch = sledge branch = sledge
[submodule "runtime/tests/sod"] [submodule "runtime/tests/sod"]
path = runtime/tests/sod path = runtime/tests/sod
url = https://github.com/gwsystems/sod.git url = http://47.120.57.226:3000/haiwan/Sod.git
branch = sledge branch = main
[submodule "runtime/tests/speechtotext"] [submodule "runtime/tests/speechtotext"]
path = runtime/tests/speechtotext path = runtime/tests/speechtotext
url = https://github.com/gwsystems/speechtotext.git url = https://github.com/gwsystems/speechtotext.git

@ -10,7 +10,8 @@
"USE_MEM_VM", "USE_MEM_VM",
"x86_64", "x86_64",
"_GNU_SOURCE", "_GNU_SOURCE",
"LOG_TO_FILE" "LOG_TO_FILE",
"DEEP_LEARN_SCHDUE"
], ],
"cStandard": "${default}", "cStandard": "${default}",
"compilerPath": "/usr/bin/clang", "compilerPath": "/usr/bin/clang",

@ -1,7 +1,4 @@
{ {
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0", "version": "0.2.0",
"configurations": [ "configurations": [
{ {
@ -58,7 +55,9 @@
], ],
"stopAtEntry": false, "stopAtEntry": false,
"cwd": "${workspaceFolder}", "cwd": "${workspaceFolder}",
"sourceFileMap": {"/sledge/runtime": "${workspaceFolder}/runtime"}, "sourceFileMap": {
"/sledge/runtime": "${workspaceFolder}/runtime"
},
"environment": [], "environment": [],
"externalConsole": false, "externalConsole": false,
"MIMode": "gdb", "MIMode": "gdb",
@ -91,6 +90,25 @@
"ignoreFailures": true "ignoreFailures": true
} }
] ]
},
{
"name": "C/C++ Runner: Debug Session",
"type": "cppdbg",
"request": "launch",
"args": [],
"stopAtEntry": false,
"externalConsole": false,
"cwd": "/home/weihao/sledge/sledge_tree/runtime/tests/noop",
"program": "/home/weihao/sledge/sledge_tree/runtime/tests/noop/build/Debug/outDebug",
"MIMode": "gdb",
"miDebuggerPath": "gdb",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
}
]
} }
] ]
} }

@ -111,7 +111,10 @@
"atomic": "c", "atomic": "c",
"condition_variable": "c", "condition_variable": "c",
"ostream": "c", "ostream": "c",
"stop_token": "c" "stop_token": "c",
"dag_data_split.h": "c",
"scheduler.h": "c",
"priority_queue.h": "c"
}, },
"files.exclude": { "files.exclude": {
"**/.git": true, "**/.git": true,

@ -1,46 +0,0 @@
Summary:
Total: 5.2029 secs
Slowest: 0.2684 secs
Fastest: 0.0281 secs
Average: 0.2039 secs
Requests/sec: 573.9092
Total data: 158614 bytes
Size/request: 53 bytes
Response time histogram:
0.028 [1] |
0.052 [26] |■
0.076 [15] |■
0.100 [16] |■
0.124 [11] |
0.148 [16] |■
0.172 [46] |■■
0.196 [1042] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■
0.220 [968] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■
0.244 [702] |■■■■■■■■■■■■■■■■■■■■■■■■■■■
0.268 [143] |■■■■■
Latency distribution:
10% in 0.1811 secs
25% in 0.1894 secs
50% in 0.2028 secs
75% in 0.2231 secs
90% in 0.2385 secs
95% in 0.2441 secs
99% in 0.2531 secs
Details (average, fastest, slowest):
DNS+dialup: 0.0004 secs, 0.0281 secs, 0.2684 secs
DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0000 secs
req write: 0.0002 secs, 0.0000 secs, 0.0249 secs
resp wait: 0.2026 secs, 0.0102 secs, 0.2680 secs
resp read: 0.0001 secs, 0.0000 secs, 0.0012 secs
Status code distribution:
[200] 2986 responses

@ -1,247 +0,0 @@
Summary:
Total: 10.0240 secs
Slowest: 1.1570 secs
Fastest: 0.0692 secs
Average: 0.7488 secs
Requests/sec: 3390.6720
Total data: 33168 bytes
Size/request: 48 bytes
Response time histogram:
0.069 [1] |
0.178 [16] |■■
0.287 [15] |■■
0.396 [10] |■
0.504 [8] |■
0.613 [10] |■
0.722 [391] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■
0.831 [43] |■■■■
0.939 [26] |■■■
1.048 [69] |■■■■■■■
1.157 [102] |■■■■■■■■■■
Latency distribution:
10% in 0.6506 secs
25% in 0.6649 secs
50% in 0.6792 secs
75% in 0.9356 secs
90% in 1.0743 secs
95% in 1.0935 secs
99% in 1.1200 secs
Details (average, fastest, slowest):
DNS+dialup: 0.0073 secs, 0.0692 secs, 1.1570 secs
DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0000 secs
req write: 0.0016 secs, 0.0000 secs, 0.0855 secs
resp wait: 0.7388 secs, 0.0480 secs, 1.1547 secs
resp read: 0.0004 secs, 0.0001 secs, 0.0047 secs
Status code distribution:
[200] 691 responses
Error distribution:
[5] Post "http://127.0.0.1:10000": EOF
[33093] Post "http://127.0.0.1:10000": dial tcp 127.0.0.1:10000: connect: connection refused
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57822->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57830->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57838->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57840->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57850->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57856->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57864->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57876->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57882->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57894->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57906->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57918->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57920->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57934->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57938->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57948->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57958->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57964->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57978->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57986->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58000->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58008->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58022->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58036->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58052->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58056->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58068->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58082->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58084->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58098->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58104->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58116->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58120->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58134->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58148->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58154->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58170->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58178->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58186->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58190->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58192->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58194->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58198->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58212->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58220->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58230->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58232->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58242->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58246->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58250->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58266->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58270->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58278->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58288->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58300->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58314->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58326->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58332->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58346->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58356->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58366->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58374->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58382->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58392->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58400->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58414->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58416->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58426->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58438->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58450->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58464->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58478->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58492->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58494->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58498->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58508->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58522->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58532->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58546->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58562->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58576->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58588->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58590->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58592->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58594->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58610->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58618->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58634->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58638->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58642->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58648->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58658->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58672->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58688->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58700->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58714->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58730->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58734->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58738->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58744->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58754->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58768->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58780->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58786->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58802->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58816->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58830->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58840->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58848->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58856->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58864->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58876->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58890->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58892->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58908->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58910->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58922->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58928->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58940->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58942->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58948->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58950->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58958->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58966->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58978->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58994->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59008->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59012->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59016->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59032->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59034->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59044->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59056->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59068->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59078->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59086->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59096->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59108->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59110->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59112->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59120->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59132->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59134->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59140->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59146->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59148->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59152->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59156->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59172->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59180->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59194->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59202->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59218->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59232->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59248->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59264->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59272->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59284->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59300->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59308->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59322->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59332->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59340->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59344->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59352->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59362->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59364->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59376->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59388->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59390->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59392->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59408->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59414->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59420->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59430->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59438->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59454->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59456->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59458->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59462->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59478->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59484->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59486->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59492->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59498->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59512->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59518->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59522->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59536->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59540->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59556->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59562->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59572->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59578->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59592->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59606->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59614->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59618->127.0.0.1:10000: read: connection reset by peer
[1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59622->127.0.0.1:10000: read: connection reset by peer

@ -10,10 +10,10 @@ PAGE_SIZE := $(shell getconf PAGESIZE)
# Compiler Settings # Compiler Settings
CC=clang CC=clang
# CC_OPTIONS = -O3 -flto -g -pthread -D_GNU_SOURCE CC_OPTIONS = -O3 -flto -g -pthread -D_GNU_SOURCE
# CC_OPTIONS for Debugging # CC_OPTIONS for Debugging
CC_OPTIONS = -O0 -g -pthread -D_GNU_SOURCE # CC_OPTIONS = -O0 -g -pthread -D_GNU_SOURCE
# CFI Sanitizer # CFI Sanitizer
# CC_OPTIONS = -O0 -g -pthread -D_GNU_SOURCE -flto -fvisibility=default -fsanitize=cfi # CC_OPTIONS = -O0 -g -pthread -D_GNU_SOURCE -flto -fvisibility=default -fsanitize=cfi
@ -41,6 +41,14 @@ BINARY_NAME=sledgert
# Feature Toggles # Feature Toggles
# CFLAGS += -DADMISSIONS_CONTROL # CFLAGS += -DADMISSIONS_CONTROL
# This definition is used when the module is triggered by an HTTP request.
# It retrieves the length of the HTTP message data, the hash table, and the global queue length via a TCP socket.
# These values are then used as features for machine learning training
# CFLAGS += -DDEEP_LEARN_SCHDUE
# This definition determines whether to use the median or the mean to calculate the execution times of the first 256 services.
# CFLAGS += -DGET_AVER_TIME
# Debugging Flags # Debugging Flags
# Strips out calls to assert() and disables debuglog # Strips out calls to assert() and disables debuglog
@ -61,6 +69,9 @@ CFLAGS += -DLOG_TO_FILE
# CFLAGS += -DOPT_AVOID_GLOBAL_QUEUE # CFLAGS += -DOPT_AVOID_GLOBAL_QUEUE
# CFLAGS += -DLOG_RUNTIME_FILE_LOG # CFLAGS += -DLOG_RUNTIME_FILE_LOG
CFLAGS += -DLOG_RUNTIME_MEM_LOG CFLAGS += -DLOG_RUNTIME_MEM_LOG
# For machine learning purposes, the following data is collected and saved:
# Length of the HTTP message Hash table Global queue length Service execution time
# CFLAGS += -DLOG_DEEP_LEARN_SCHDUE
# This dumps per module *.csv files containing the cycle a sandbox has been in RUNNING when each # This dumps per module *.csv files containing the cycle a sandbox has been in RUNNING when each
# page is allocated. This helps understand the relationship to memory allocation and execution time. # page is allocated. This helps understand the relationship to memory allocation and execution time.

@ -0,0 +1,30 @@
import sys
def calculate_average(input_file, column_index):
total = 0
count = 0
with open(input_file, 'r') as f:
for line in f:
columns = line.strip().split(',')
if len(columns) > column_index:
try:
value = float(columns[column_index])
total += value
count += 1
except ValueError:
print(f"error value: {columns[column_index]}")
if count > 0:
average = total / count
print(f"list {column_index + 1} average: {average}")
else:
print("no value")
if __name__ == "__main__":
if len(sys.argv) != 3:
print(" python calculate_average.py input_file column_index")
else:
input_file = sys.argv[1]
column_index = int(sys.argv[2]) - 1
calculate_average(input_file, column_index)

@ -0,0 +1,51 @@
import matplotlib.pyplot as plt
from matplotlib import font_manager as fm
def load_data(filename):
"""
Load data from a file.
Assumes that each line in the file is a float value.
"""
with open(filename, 'r') as file:
data = file.read().split()
data = [float(i) for i in data]
return data
def main():
# 加载数据
edf_data = load_data('edf_5k.txt')
llf_data = load_data('llf_5k.txt')
# 设置X轴的数据点
x_labels = [50, 60, 70, 80, 90, 100] # 确保数据与这些标签相匹配
font_properties = fm.FontProperties(family='Times New Roman', size=18)
plt.rcParams.update({'font.size': 18, 'font.family': 'Times New Roman'})
# 创建图形和绘制数据
plt.figure(figsize=(10, 6))
ax = plt.gca() # 获取当前的Axes对象ax
ax.set_facecolor('#f0f0f0') # 设置浅灰色背景
plt.plot(x_labels, edf_data, marker='s', linestyle='-', color='#C8503D', markersize=8, label='EDF')
plt.plot(x_labels, llf_data, marker='^', linestyle='-', color='#00008B', markersize=8, label='LLF')
# 添加标题、标签和图例
plt.title('5KB-1.2* Deadline', fontsize=20, fontproperties=font_properties)
plt.xlabel('Load (% of maximum RPS)', fontproperties=font_properties)
plt.ylabel('Deadline Miss Rate (%)', fontproperties=font_properties)
plt.legend(prop=font_properties)
# 设置X轴刻度
plt.xticks(range(50, 101, 10))
# 设置网格
plt.grid(True)
# 移除边框的四边刻度线
plt.tick_params(axis='both', which='both', length=0) # 移除刻度线
# 显示图形
plt.show()
if __name__ == "__main__":
main()

@ -0,0 +1,24 @@
# split_logs.py
def split_logs(input_file):
modules = {
"resize1": [],
"png2bmp1": [],
"lpd_wasm1": [],
"cifar10_1": [],
"work1": []
}
with open(input_file, 'r') as f:
for line in f:
for module in modules.keys():
if module in line:
modules[module].append(line.strip())
break
for module, entries in modules.items():
with open(f"{module}.txt", 'w') as outfile:
outfile.write("\n".join(entries) + "\n")
if __name__ == "__main__":
split_logs("sledge.log")

@ -0,0 +1,20 @@
import sys
def split_columns(input_file):
columns = []
with open(input_file, 'r') as f:
for line in f:
parts = line.strip().split(',')
for i, part in enumerate(parts):
if len(columns) <= i:
columns.append([])
columns[i].append(part)
for i, column in enumerate(columns):
with open(f"{input_file[:-4]}_column_{i + 1}.txt", 'w') as outfile:
outfile.write("\n".join(column) + "\n")
if __name__ == "__main__":
for input_file in sys.argv[1:]:
split_columns(input_file)

@ -15,4 +15,5 @@ void admissions_info_initialize(struct admissions_info *self, char* module_name,
void admissions_info_update(struct admissions_info *self, uint64_t execution_duration); void admissions_info_update(struct admissions_info *self, uint64_t execution_duration);
uint64_t admission_info_get_percentile(struct admissions_info *self); uint64_t admission_info_get_percentile(struct admissions_info *self);
uint64_t admission_info_get_average(struct admissions_info *self);

@ -9,12 +9,14 @@ typedef struct sandbox_request *(*global_request_scheduler_add_fn_t)(void *);
typedef int (*global_request_scheduler_remove_fn_t)(struct sandbox_request **); typedef int (*global_request_scheduler_remove_fn_t)(struct sandbox_request **);
typedef int (*global_request_scheduler_remove_if_earlier_fn_t)(struct sandbox_request **, uint64_t); typedef int (*global_request_scheduler_remove_if_earlier_fn_t)(struct sandbox_request **, uint64_t);
typedef uint64_t (*global_request_scheduler_peek_fn_t)(void); typedef uint64_t (*global_request_scheduler_peek_fn_t)(void);
typedef int (*global_request_scheduler_size_fn_t)(void);
struct global_request_scheduler_config { struct global_request_scheduler_config {
global_request_scheduler_add_fn_t add_fn; global_request_scheduler_add_fn_t add_fn;
global_request_scheduler_remove_fn_t remove_fn; global_request_scheduler_remove_fn_t remove_fn;
global_request_scheduler_remove_if_earlier_fn_t remove_if_earlier_fn; global_request_scheduler_remove_if_earlier_fn_t remove_if_earlier_fn;
global_request_scheduler_peek_fn_t peek_fn; global_request_scheduler_peek_fn_t peek_fn;
global_request_scheduler_size_fn_t size_fn;
}; };
@ -23,3 +25,4 @@ struct sandbox_request *global_request_scheduler_add(struct sandbox_request *);
int global_request_scheduler_remove(struct sandbox_request **); int global_request_scheduler_remove(struct sandbox_request **);
int global_request_scheduler_remove_if_earlier(struct sandbox_request **, uint64_t targed_deadline); int global_request_scheduler_remove_if_earlier(struct sandbox_request **, uint64_t targed_deadline);
uint64_t global_request_scheduler_peek(void); uint64_t global_request_scheduler_peek(void);
int global_request_scheduler_size(void);

@ -39,7 +39,14 @@
#warning \ #warning \
"MODULE_MAX_PENDING_CLIENT_REQUESTS likely exceeds the value in /proc/sys/net/core/somaxconn and thus may be silently truncated"; "MODULE_MAX_PENDING_CLIENT_REQUESTS likely exceeds the value in /proc/sys/net/core/somaxconn and thus may be silently truncated";
#endif #endif
#ifdef DEEP_LEARN_SCHDUE
struct MDL
{
int hash_node;
int global_queue_node;
int http_data_size;
};
#endif
struct module { struct module {
char name[MODULE_MAX_NAME_LENGTH]; char name[MODULE_MAX_NAME_LENGTH];
char path[MODULE_MAX_PATH_LENGTH]; char path[MODULE_MAX_PATH_LENGTH];
@ -83,8 +90,11 @@ struct module {
char **next_module_names; /* the next modules name in the DAG */ char **next_module_names; /* the next modules name in the DAG */
uint32_t next_module_count; uint32_t next_module_count;
uint32_t pre_module_count; uint32_t pre_module_count;
bool runtime_visited; bool runtime_visited; /* used for calculating the estimated time */
uint32_t run_priority; uint32_t run_priority;/* Used for prioritizing data fan-in to a node */
#ifdef DEEP_LEARN_SCHDUE
struct MDL mdl;/*save data for deep learn */
#endif
}; };
/************************* /*************************

@ -24,6 +24,9 @@ perf_window_initialize(struct perf_window *self, char* module_name)
self->count = 0; self->count = 0;
memset(&self->by_duration, 0, sizeof(struct execution_node) * PERF_WINDOW_BUFFER_SIZE); memset(&self->by_duration, 0, sizeof(struct execution_node) * PERF_WINDOW_BUFFER_SIZE);
memset(&self->by_termination, 0, sizeof(uint16_t) * PERF_WINDOW_BUFFER_SIZE); memset(&self->by_termination, 0, sizeof(uint16_t) * PERF_WINDOW_BUFFER_SIZE);
#ifdef GET_AVER_TIME
memset(&self->by_duration_for_mdl, 0, sizeof(uint64_t) * PERF_WINDOW_BUFFER_SIZE);
#endif
} }
@ -90,12 +93,18 @@ perf_window_add(struct perf_window *self, uint64_t value)
self->by_termination[i] = i; self->by_termination[i] = i;
self->by_duration[i] = (struct execution_node){ .execution_time = value, self->by_duration[i] = (struct execution_node){ .execution_time = value,
.by_termination_idx = i }; .by_termination_idx = i };
#ifdef GET_AVER_TIME
self->by_duration_for_mdl[i] = value;
#endif
} }
self->count = PERF_WINDOW_BUFFER_SIZE; self->count = PERF_WINDOW_BUFFER_SIZE;
goto done; goto done;
} }
/* Otherwise, replace the oldest value, and then sort */ /* Otherwise, replace the oldest value, and then sort */
#ifdef GET_AVER_TIME
self->by_duration_for_mdl[self->count % PERF_WINDOW_BUFFER_SIZE] = value;
#endif
uint16_t idx_of_oldest = self->by_termination[self->count % PERF_WINDOW_BUFFER_SIZE]; uint16_t idx_of_oldest = self->by_termination[self->count % PERF_WINDOW_BUFFER_SIZE];
bool check_up = value > self->by_duration[idx_of_oldest].execution_time; bool check_up = value > self->by_duration[idx_of_oldest].execution_time;
@ -149,11 +158,38 @@ perf_window_get_percentile(struct perf_window *self, int percentile, int precomp
return 0; return 0;
} }
//if (likely(size >= PERF_WINDOW_BUFFER_SIZE)) return self->by_duration[precomputed_index].execution_time; if (likely(size >= PERF_WINDOW_BUFFER_SIZE)) return self->by_duration[precomputed_index].execution_time;
return self->by_duration[size * percentile / 100].execution_time; return self->by_duration[size * percentile / 100].execution_time;
} }
#ifdef GET_AVER_TIME
static inline uint64_t
perf_window_get_average(struct perf_window *self)
{
assert(self != NULL);
int size = self->count;
if (size == 0) {
return 0;
} else if (size < PERF_WINDOW_BUFFER_SIZE) {
uint64_t average = 0;
for (size_t i = 0; i < size; i++)
{
average += self->by_duration_for_mdl[i];
}
average = average / size;
return average;
} else{
uint64_t average = 0;
for (size_t i = 0; i < PERF_WINDOW_BUFFER_SIZE; i++) {
average += self->by_duration_for_mdl[i];
}
average /= PERF_WINDOW_BUFFER_SIZE;
return average;
}
}
#endif
/** /**
* Returns the total count of executions * Returns the total count of executions
* @returns total count * @returns total count

@ -26,6 +26,9 @@ struct execution_node {
struct perf_window { struct perf_window {
char name[32]; char name[32];
struct execution_node by_duration[PERF_WINDOW_BUFFER_SIZE]; struct execution_node by_duration[PERF_WINDOW_BUFFER_SIZE];
#ifdef GET_AVER_TIME
uint64_t by_duration_for_mdl[PERF_WINDOW_BUFFER_SIZE];
#endif
uint16_t by_termination[PERF_WINDOW_BUFFER_SIZE]; uint16_t by_termination[PERF_WINDOW_BUFFER_SIZE];
uint64_t count; uint64_t count;
lock_t lock; lock_t lock;

@ -298,7 +298,9 @@ static inline int
priority_queue_length_nolock(struct priority_queue *self) priority_queue_length_nolock(struct priority_queue *self)
{ {
assert(self != NULL); assert(self != NULL);
#ifndef DEEP_LEARN_SCHDUE
assert(!listener_thread_is_running()); assert(!listener_thread_is_running());
#endif
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
return self->size; return self->size;

@ -44,7 +44,7 @@ extern uint32_t runtime_worker_threads_count;
extern int runtime_worker_threads_argument[RUNTIME_WORKER_THREAD_CORE_COUNT]; extern int runtime_worker_threads_argument[RUNTIME_WORKER_THREAD_CORE_COUNT];
extern uint64_t runtime_worker_threads_deadline[RUNTIME_WORKER_THREAD_CORE_COUNT]; extern uint64_t runtime_worker_threads_deadline[RUNTIME_WORKER_THREAD_CORE_COUNT];
extern uint64_t runtime_worker_threads_remaining_slack[RUNTIME_WORKER_THREAD_CORE_COUNT]; extern uint64_t runtime_worker_threads_remaining_slack[RUNTIME_WORKER_THREAD_CORE_COUNT];
extern uint64_t runtime_worker_threads_laxity[RUNTIME_WORKER_THREAD_CORE_COUNT];
extern void runtime_initialize(void); extern void runtime_initialize(void);
extern void runtime_set_pthread_prio(pthread_t thread, unsigned int nice); extern void runtime_set_pthread_prio(pthread_t thread, unsigned int nice);
extern void runtime_set_resource_limits_to_max(void); extern void runtime_set_resource_limits_to_max(void);

@ -115,6 +115,24 @@ sandbox_get_srsf_priority(void *element)
return remaining_slack; return remaining_slack;
}; };
static inline uint64_t
sandbox_get_mdl_priority(void *element)
{
struct sandbox *sandbox = (struct sandbox *)element;
uint64_t now = __getcycles();
uint64_t remaining_slack_mdl = sandbox->remaining_slack - (now - sandbox->last_update_timestamp);
return remaining_slack_mdl;
};
static inline uint64_t
sandbox_get_llf_priority(void *element)
{
struct sandbox *sandbox = (struct sandbox *)element;
uint64_t now = __getcycles();
uint64_t Laxity_llf = sandbox->laxity - (now - sandbox->last_update_timestamp);
return Laxity_llf;
};
/** /**
* Maps a sandbox fd to an underlying host fd * Maps a sandbox fd to an underlying host fd
* Returns error condition if the file_descriptor to set does not contain sandbox preopen magic * Returns error condition if the file_descriptor to set does not contain sandbox preopen magic
@ -242,9 +260,26 @@ sandbox_mem_print_perf(struct sandbox *sandbox)
* becomes more intelligent, then peak linear memory size needs to be tracked * becomes more intelligent, then peak linear memory size needs to be tracked
* seperately from current linear memory size. * seperately from current linear memory size.
*/ */
mem_log("%d,%u,%s():%d,%s,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%u\n", worker_thread_idx, sandbox->id, mem_log("%d,%lu,%s():%d,%s,%u,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%u\n", worker_thread_idx, sandbox->id,
sandbox->module->name, sandbox->module->port, sandbox_state_stringify(sandbox->state), sandbox->module->name, sandbox->module->port, sandbox_state_stringify(sandbox->state),
sandbox->module->relative_deadline_us, total_time_us, queued_us, initializing_us, runnable_us, sandbox->module->relative_deadline_us, total_time_us, queued_us, initializing_us, runnable_us,
running_us, blocked_us, returned_us, sandbox->linear_memory_size); running_us, blocked_us, returned_us, sandbox->linear_memory_size);
} }
static inline void
sandbox_MDL_print_perf(struct sandbox *sandbox)
{
#ifndef LOG_DEEP_LEARN_SCHDUE
return;
#endif
/* If the log was not defined by an environment variable, early out */
if (runtime_sandbox_perf_log == NULL) return;
#ifdef DEEP_LEARN_SCHDUE
if (sandbox->module->next_module == NULL) {
uint64_t total_time = (sandbox->completion_timestamp - sandbox->request_arrival_timestamp) / runtime_processor_speed_MHz;
fprintf(runtime_sandbox_perf_log, "%s,%lu,%d,%d,%d\n", sandbox->module->name, total_time,sandbox->module->mdl.hash_node,sandbox->module->mdl.global_queue_node,sandbox->module->mdl.http_data_size);
}
#endif
}

@ -32,6 +32,7 @@ struct sandbox_request {
uint64_t absolute_deadline; /* cycles */ uint64_t absolute_deadline; /* cycles */
uint64_t last_update_timestamp; /* cycles */ uint64_t last_update_timestamp; /* cycles */
uint64_t remaining_slack; /* cycles */ uint64_t remaining_slack; /* cycles */
uint64_t laxity; /* cycles */
struct sandbox_pre_functions_output *pre_functions_output; struct sandbox_pre_functions_output *pre_functions_output;
pthread_spinlock_t lock; pthread_spinlock_t lock;
char * previous_function_output; char * previous_function_output;
@ -82,7 +83,7 @@ sandbox_request_log_allocation(struct sandbox_request *sandbox_request)
static inline struct sandbox_request * static inline struct sandbox_request *
sandbox_request_allocate(struct module *module, bool request_from_outside, ssize_t request_length, sandbox_request_allocate(struct module *module, bool request_from_outside, ssize_t request_length,
char *arguments, int socket_descriptor, const struct sockaddr *socket_address, char *arguments, int socket_descriptor, const struct sockaddr *socket_address,
uint64_t request_arrival_timestamp, uint64_t enqueue_timestamp, uint64_t remaining_slack, uint64_t request_arrival_timestamp, uint64_t enqueue_timestamp, uint64_t remaining_slack, uint64_t laxity,
uint64_t admissions_estimate, char *previous_function_output, ssize_t output_length) uint64_t admissions_estimate, char *previous_function_output, ssize_t output_length)
{ {
struct sandbox_request *sandbox_request = (struct sandbox_request *)malloc(sizeof(struct sandbox_request)); struct sandbox_request *sandbox_request = (struct sandbox_request *)malloc(sizeof(struct sandbox_request));
@ -104,6 +105,7 @@ sandbox_request_allocate(struct module *module, bool request_from_outside, ssize
sandbox_request->previous_request_length = request_length; sandbox_request->previous_request_length = request_length;
sandbox_request->last_update_timestamp = enqueue_timestamp; sandbox_request->last_update_timestamp = enqueue_timestamp;
sandbox_request->remaining_slack = remaining_slack; sandbox_request->remaining_slack = remaining_slack;
sandbox_request->laxity = laxity;
/*Avoid pointer suspension*/ /*Avoid pointer suspension*/
sandbox_request->pre_functions_output = NULL; sandbox_request->pre_functions_output = NULL;
@ -127,6 +129,7 @@ sandbox_request_allocate(struct module *module, bool request_from_outside, ssize
* @param output the output of the previous function * @param output the output of the previous function
* @param output_length the length of the output * @param output_length the length of the output
* @param run_priority the run_priority of the sandbox->module->run_priority * @param run_priority the run_priority of the sandbox->module->run_priority
* The first four bytes of the data represent the length of the data, and the tail character & serves as the delimiter marker
**/ **/
static inline void static inline void
pre_functions_output_request_add(struct sandbox_request *request, char *output, ssize_t output_length, uint32_t run_priority) pre_functions_output_request_add(struct sandbox_request *request, char *output, ssize_t output_length, uint32_t run_priority)
@ -179,8 +182,22 @@ concatenate_outputs(struct sandbox_request *request) {
size_t total_length = 0; // Calculate total length without extra for null character size_t total_length = 0; // Calculate total length without extra for null character
struct sandbox_pre_functions_output *current = request->pre_functions_output; struct sandbox_pre_functions_output *current = request->pre_functions_output;
if(current && !current->next)
{
char *previous_function_output = (char *)malloc(current->output_length);
if (!previous_function_output) {
panic("Could not allocate memory for concatenated output");
return;
}
memcpy(previous_function_output, current->previous_function_output, current->output_length);
request->output_length = current->output_length;
request->previous_function_output = previous_function_output;
return;
}
/* 4 bytes of data represents the length of the data */
while (current != NULL) { while (current != NULL) {
total_length += current->output_length; total_length += current->output_length + 4;
current = current->next; current = current->next;
} }
@ -194,15 +211,15 @@ concatenate_outputs(struct sandbox_request *request) {
current = request->pre_functions_output; current = request->pre_functions_output;
while (current != NULL) { while (current != NULL) {
size_t copy_length = current->output_length; size_t copy_length = current->output_length;
if (current->next) {
memcpy(copy_dest, current->previous_function_output, copy_length - 1); *(uint32_t *)copy_dest = (uint32_t)copy_length;
copy_dest[copy_length - 1] = '&'; copy_dest += 4;
copy_dest += copy_length;
} else {
memcpy(copy_dest, current->previous_function_output, copy_length); memcpy(copy_dest, current->previous_function_output, copy_length);
break; copy_dest += copy_length;
}
current = current->next; current = current->next;
} }
@ -214,4 +231,3 @@ concatenate_outputs(struct sandbox_request *request) {
request->output_length = total_length; request->output_length = total_length;
request->previous_function_output = concatenated_output; request->previous_function_output = concatenated_output;
} }

@ -59,6 +59,9 @@ sandbox_set_as_complete(struct sandbox *sandbox, sandbox_state_t last_state)
/* Terminal State Logging */ /* Terminal State Logging */
sandbox_print_perf(sandbox); sandbox_print_perf(sandbox);
sandbox_mem_print_perf(sandbox); sandbox_mem_print_perf(sandbox);
#ifdef DEEP_LEARN_SCHDUE
sandbox_MDL_print_perf(sandbox);
#endif
sandbox_summarize_page_allocations(sandbox); sandbox_summarize_page_allocations(sandbox);
/* Do not touch sandbox state after adding to completion queue to avoid use-after-free bugs */ /* Do not touch sandbox state after adding to completion queue to avoid use-after-free bugs */

@ -54,6 +54,9 @@ sandbox_set_as_error(struct sandbox *sandbox, sandbox_state_t last_state)
sandbox->state = SANDBOX_ERROR; sandbox->state = SANDBOX_ERROR;
sandbox_print_perf(sandbox); sandbox_print_perf(sandbox);
sandbox_mem_print_perf(sandbox); sandbox_mem_print_perf(sandbox);
#ifdef DEEP_LEARN_SCHDUE
sandbox_MDL_print_perf(sandbox);
#endif
sandbox_summarize_page_allocations(sandbox); sandbox_summarize_page_allocations(sandbox);
sandbox_free_linear_memory(sandbox); sandbox_free_linear_memory(sandbox);
admissions_control_subtract(sandbox->admissions_estimate); admissions_control_subtract(sandbox->admissions_estimate);

@ -53,6 +53,7 @@ sandbox_set_as_initialized(struct sandbox *sandbox, struct sandbox_request *sand
/* Copy the socket descriptor, address, and arguments of the client invocation */ /* Copy the socket descriptor, address, and arguments of the client invocation */
sandbox->absolute_deadline = sandbox_request->absolute_deadline; sandbox->absolute_deadline = sandbox_request->absolute_deadline;
sandbox->remaining_slack = sandbox_request->remaining_slack; sandbox->remaining_slack = sandbox_request->remaining_slack;
sandbox->laxity = sandbox_request->laxity;
sandbox->last_update_timestamp = sandbox_request->last_update_timestamp; sandbox->last_update_timestamp = sandbox_request->last_update_timestamp;
sandbox->arguments = (void *)sandbox_request->arguments; sandbox->arguments = (void *)sandbox_request->arguments;
sandbox->client_socket_descriptor = sandbox_request->socket_descriptor; sandbox->client_socket_descriptor = sandbox_request->socket_descriptor;

@ -25,11 +25,13 @@ sandbox_set_as_running(struct sandbox *sandbox, sandbox_state_t last_state)
//uint64_t last = sandbox->last_update_timestamp; //uint64_t last = sandbox->last_update_timestamp;
//uint64_t last_rs = sandbox->remaining_slack; //uint64_t last_rs = sandbox->remaining_slack;
sandbox->remaining_slack -= (now - sandbox->last_update_timestamp); sandbox->remaining_slack -= (now - sandbox->last_update_timestamp);
sandbox->laxity -= (now - sandbox->last_update_timestamp);
sandbox->last_update_timestamp = now; sandbox->last_update_timestamp = now;
sandbox->runnable_duration += duration_of_last_state; sandbox->runnable_duration += duration_of_last_state;
current_sandbox_set(sandbox); current_sandbox_set(sandbox);
runtime_worker_threads_deadline[worker_thread_idx] = sandbox->absolute_deadline; runtime_worker_threads_deadline[worker_thread_idx] = sandbox->absolute_deadline;
runtime_worker_threads_remaining_slack[worker_thread_idx] = sandbox->remaining_slack; runtime_worker_threads_remaining_slack[worker_thread_idx] = sandbox->remaining_slack;
runtime_worker_threads_laxity[worker_thread_idx] = sandbox->laxity;
//mem_log("time %lu sandbox starts running, request id:%d name %s obj=%p remaining slack %lu, last_rs %lu now %lu last %lu \n", start_execution, //mem_log("time %lu sandbox starts running, request id:%d name %s obj=%p remaining slack %lu, last_rs %lu now %lu last %lu \n", start_execution,
// sandbox->id, sandbox->module->name, sandbox, sandbox->remaining_slack, last_rs, now, last); // sandbox->id, sandbox->module->name, sandbox, sandbox->remaining_slack, last_rs, now, last);
/* Does not handle context switch because the caller knows if we need to use fast or slow switched */ /* Does not handle context switch because the caller knows if we need to use fast or slow switched */
@ -49,3 +51,5 @@ sandbox_set_as_running(struct sandbox *sandbox, sandbox_state_t last_state)
sandbox->last_state_change_timestamp = now; sandbox->last_state_change_timestamp = now;
sandbox->state = SANDBOX_RUNNING; sandbox->state = SANDBOX_RUNNING;
} }

@ -54,8 +54,9 @@ struct sandbox {
uint64_t response_timestamp; /* Timestamp when response is sent */ uint64_t response_timestamp; /* Timestamp when response is sent */
uint64_t completion_timestamp; /* Timestamp when sandbox runs to completion */ uint64_t completion_timestamp; /* Timestamp when sandbox runs to completion */
uint64_t last_state_change_timestamp; /* Used for bookkeeping of actual execution time */ uint64_t last_state_change_timestamp; /* Used for bookkeeping of actual execution time */
uint64_t last_update_timestamp; /* Used for bookkeeping timestamp for SRSF */ uint64_t last_update_timestamp; /* Used for bookkeeping timestamp for SRSF && LLF */
uint64_t remaining_slack; /* Cycles */ uint64_t remaining_slack; /* Cycles */
uint64_t laxity; /* Cycles */
#ifdef LOG_SANDBOX_MEMORY_PROFILE #ifdef LOG_SANDBOX_MEMORY_PROFILE
uint32_t page_allocation_timestamps[SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT]; uint32_t page_allocation_timestamps[SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT];
size_t page_allocation_timestamps_size; size_t page_allocation_timestamps_size;

@ -28,7 +28,9 @@ enum SCHEDULER
{ {
SCHEDULER_FIFO = 0, SCHEDULER_FIFO = 0,
SCHEDULER_EDF = 1, SCHEDULER_EDF = 1,
SCHEDULER_SRSF = 2 SCHEDULER_SRSF = 2,
SCHEDULER_MDL = 3,
SCHEDULER_LLF = 4,
}; };
extern enum SCHEDULER scheduler; extern enum SCHEDULER scheduler;
@ -112,6 +114,86 @@ err_allocate:
request = NULL; request = NULL;
goto done; goto done;
} }
static inline struct sandbox *
scheduler_MDL_get_next()
{
/* Get the deadline of the sandbox at the head of the local request queue */
struct sandbox * local = local_runqueue_get_next();
uint64_t local_remaining_MDL = local == NULL ? UINT64_MAX : local->remaining_slack;
struct sandbox_request *request = NULL;
uint64_t global_remaining_slack = global_request_scheduler_peek();
/* Try to pull and allocate from the global queue if earlier
* This will be placed at the head of the local runqueue */
if (global_remaining_slack < local_remaining_MDL && (local_workload_count <=2 || local_runqueue_count == 0)) {
//if (global_remaining_slack < local_remaining_slack) {
if (global_request_scheduler_remove_if_earlier(&request, local_remaining_MDL) == 0) {
//uint64_t pop_time = __getcycles() - system_start_timestamp;
//mem_log("time %lu remove from GQ, request id:%d name %s remaining slack %lu\n", pop_time,
// request->id, request->module->name, request->remaining_slack);
assert(request != NULL);
struct sandbox *global = sandbox_allocate(request);
if (!global) goto err_allocate;
assert(global->state == SANDBOX_INITIALIZED);
sandbox_set_as_runnable(global, SANDBOX_INITIALIZED);
}
}
/* Return what is at the head of the local runqueue or NULL if empty */
done:
return local_runqueue_get_next();
err_allocate:
client_socket_send(request->socket_descriptor, 503);
client_socket_close(request->socket_descriptor, &request->socket_address);
free(request);
request = NULL;
goto done;
}
static inline struct sandbox *
scheduler_LLF_get_next()
{
/* Get the deadline of the sandbox at the head of the local request queue */
struct sandbox * local = local_runqueue_get_next();
uint64_t local_Laxity = local == NULL ? UINT64_MAX : local->remaining_slack;
struct sandbox_request *request = NULL;
uint64_t global_local_Laxity = global_request_scheduler_peek();
/* Try to pull and allocate from the global queue if earlier
* This will be placed at the head of the local runqueue */
if (global_local_Laxity < local_Laxity && (local_workload_count <=2 || local_runqueue_count == 0)) {
if (global_request_scheduler_remove_if_earlier(&request, local_Laxity) == 0) {
//uint64_t pop_time = __getcycles() - system_start_timestamp;
//mem_log("time %lu remove from GQ, request id:%d name %s remaining slack %lu\n", pop_time,
// request->id, request->module->name, request->remaining_slack);
assert(request != NULL);
struct sandbox *global = sandbox_allocate(request);
if (!global) goto err_allocate;
assert(global->state == SANDBOX_INITIALIZED);
sandbox_set_as_runnable(global, SANDBOX_INITIALIZED);
}
}
/* Return what is at the head of the local runqueue or NULL if empty */
done:
return local_runqueue_get_next();
err_allocate:
client_socket_send(request->socket_descriptor, 503);
client_socket_close(request->socket_descriptor, &request->socket_address);
free(request);
request = NULL;
goto done;
}
static inline struct sandbox * static inline struct sandbox *
scheduler_fifo_get_next() scheduler_fifo_get_next()
{ {
@ -156,6 +238,10 @@ scheduler_get_next()
return scheduler_srsf_get_next(); return scheduler_srsf_get_next();
case SCHEDULER_FIFO: case SCHEDULER_FIFO:
return scheduler_fifo_get_next(); return scheduler_fifo_get_next();
case SCHEDULER_MDL:
return scheduler_MDL_get_next();
case SCHEDULER_LLF:
return scheduler_LLF_get_next();
default: default:
panic("Unimplemented\n"); panic("Unimplemented\n");
} }
@ -171,6 +257,12 @@ scheduler_initialize()
case SCHEDULER_SRSF: case SCHEDULER_SRSF:
global_request_scheduler_minheap_initialize(SCHEDULER_SRSF); global_request_scheduler_minheap_initialize(SCHEDULER_SRSF);
break; break;
case SCHEDULER_MDL:
global_request_scheduler_minheap_initialize(SCHEDULER_MDL);
break;
case SCHEDULER_LLF:
global_request_scheduler_minheap_initialize(SCHEDULER_LLF);
break;
case SCHEDULER_FIFO: case SCHEDULER_FIFO:
global_request_scheduler_deque_initialize(); global_request_scheduler_deque_initialize();
break; break;
@ -189,6 +281,12 @@ scheduler_runqueue_initialize()
case SCHEDULER_SRSF: case SCHEDULER_SRSF:
local_runqueue_minheap_initialize(SCHEDULER_SRSF); local_runqueue_minheap_initialize(SCHEDULER_SRSF);
break; break;
case SCHEDULER_MDL:
local_runqueue_minheap_initialize(SCHEDULER_MDL);
break;
case SCHEDULER_LLF:
local_runqueue_minheap_initialize(SCHEDULER_LLF);
break;
case SCHEDULER_FIFO: case SCHEDULER_FIFO:
local_runqueue_list_initialize(); local_runqueue_list_initialize();
break; break;
@ -214,8 +312,8 @@ scheduler_preempt(ucontext_t *user_context)
struct sandbox *current = current_sandbox_get(); struct sandbox *current = current_sandbox_get();
assert(current != NULL); assert(current != NULL);
assert(current->state == SANDBOX_RUNNING); assert(current->state == SANDBOX_RUNNING);
uint64_t RR_least_time = 5000 * runtime_processor_speed_MHz;
if (current-> remaining_slack <= 5000 * runtime_processor_speed_MHz) { if (current-> remaining_slack <= RR_least_time || current->laxity <= RR_least_time) {
return; return;
} }
/* This is for better state-change bookkeeping */ /* This is for better state-change bookkeeping */
@ -293,6 +391,10 @@ scheduler_print(enum SCHEDULER variant)
return "EDF"; return "EDF";
case SCHEDULER_SRSF: case SCHEDULER_SRSF:
return "SRSF"; return "SRSF";
case SCHEDULER_MDL:
return "MDL";
case SCHEDULER_LLF:
return "LLF";
} }
} }

@ -42,6 +42,21 @@ admission_info_get_percentile(struct admissions_info *self)
uint64_t estimated_execution = perf_window_get_percentile(&self->perf_window, self->percentile, self->control_index); uint64_t estimated_execution = perf_window_get_percentile(&self->perf_window, self->percentile, self->control_index);
return estimated_execution; return estimated_execution;
} }
#ifdef GET_AVER_TIME
/*
* Get the average execution time of this module, no lock for accessing the queue
* @param self
* @returns the specified execution time of this module
*/
uint64_t
admission_info_get_average(struct admissions_info *self)
{
uint64_t estimated_execution = perf_window_get_average(&self->perf_window);
return estimated_execution;
}
#endif
/* /*
* Adds an execution value to the perf window and calculates and caches and updated estimate * Adds an execution value to the perf window and calculates and caches and updated estimate
* @param self * @param self

@ -13,6 +13,13 @@
extern uint64_t system_start_timestamp; extern uint64_t system_start_timestamp;
lock_t lock; lock_t lock;
#ifdef DEEP_LEARN_SCHDUE
_Atomic uint32_t hash_count = 0;
#endif
#define OUTPUT_BUFER_SIZE 1024*5 #define OUTPUT_BUFER_SIZE 1024*5
__thread struct sandbox *worker_thread_current_sandbox = NULL; __thread struct sandbox *worker_thread_current_sandbox = NULL;
@ -58,6 +65,13 @@ current_sandbox_disable_preemption(struct sandbox *sandbox)
} }
} }
static inline void
current_sandbox_get_newlaxity(struct sandbox *sandbox, uint64_t now)
{
assert(sandbox);
sandbox->remaining_slack -= (now - sandbox->last_update_timestamp);
}
/** /**
* Sandbox execution logic * Sandbox execution logic
* Handles setup, request parsing, WebAssembly initialization, function execution, response building and * Handles setup, request parsing, WebAssembly initialization, function execution, response building and
@ -80,6 +94,9 @@ current_sandbox_start(void)
if(sandbox_req_map == NULL) if(sandbox_req_map == NULL)
{ {
sandbox_req_map = malloc(sizeof(struct hashmap)); sandbox_req_map = malloc(sizeof(struct hashmap));
#ifdef DEEP_LEARN_SCHDUE
atomic_init(&hash_count, 0);
#endif
map_init(sandbox_req_map); map_init(sandbox_req_map);
} }
if(sandbox_request_id == NULL) if(sandbox_request_id == NULL)
@ -170,13 +187,25 @@ current_sandbox_start(void)
goto err; goto err;
} }
memcpy(individual_pre_func_output, pre_func_output, output_length); memcpy(individual_pre_func_output, pre_func_output, output_length);
uint64_t enqueue_timestamp = __getcycles(); uint64_t now = __getcycles();
current_sandbox_get_newlaxity(sandbox, now);
#ifdef LOG_DEEP_LEARN_SCHDUE
#ifdef DEEP_LEARN_SCHDUE
/*
* If model parameters need to be trained, then enable the feature;
* otherwise, disable it to avoid unnecessary overhead.
*/
next_module_node->mdl.hash_node = sandbox->module->mdl.hash_node;
next_module_node->mdl.global_queue_node = sandbox->module->mdl.global_queue_node;
next_module_node->mdl.http_data_size = sandbox->module->mdl.http_data_size;
#endif
#endif
struct sandbox_request *sandbox_request = struct sandbox_request *sandbox_request =
sandbox_request_allocate(next_module_node, false, sandbox->request_length, sandbox_request_allocate(next_module_node, false, sandbox->request_length,
next_module_node->name, sandbox->client_socket_descriptor, next_module_node->name, sandbox->client_socket_descriptor,
(const struct sockaddr *)&sandbox->client_address, (const struct sockaddr *)&sandbox->client_address,
sandbox->request_arrival_timestamp, enqueue_timestamp, sandbox->request_arrival_timestamp, now,
sandbox->remaining_slack, true, NULL, 0); sandbox->remaining_slack, sandbox->laxity, true, NULL, 0);
/* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate() /* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate()
* will busy-wait to generate an unique id, should we optimize it here? * will busy-wait to generate an unique id, should we optimize it here?
*/ */
@ -242,15 +271,27 @@ current_sandbox_start(void)
uint64_t *request_id = (uint64_t *)map_get(sandbox_request_id, cur_request_id, strlen(cur_request_id), &ret_value_len); uint64_t *request_id = (uint64_t *)map_get(sandbox_request_id, cur_request_id, strlen(cur_request_id), &ret_value_len);
bool mapflag = false; bool mapflag = false;
if (!request_id) { if (!request_id) {
uint64_t enqueue_timestamp = __getcycles(); uint64_t now = __getcycles();
current_sandbox_get_newlaxity(sandbox, now);
//mem_log("time %lu request id:%d executing, name:%s remaining slack %lu\n", current_rs, //mem_log("time %lu request id:%d executing, name:%s remaining slack %lu\n", current_rs,
// sandbox->id, sandbox->module->name, sandbox->remaining_slack); // sandbox->id, sandbox->module->name, sandbox->remaining_slack);
#ifdef LOG_DEEP_LEARN_SCHDUE
#ifdef DEEP_LEARN_SCHDUE
/*
* If model parameters need to be trained, then enable the feature;
* otherwise, disable it to avoid unnecessary overhead.
*/
next_module_node->mdl.hash_node = sandbox->module->mdl.hash_node;
next_module_node->mdl.global_queue_node = sandbox->module->mdl.global_queue_node;
next_module_node->mdl.http_data_size = sandbox->module->mdl.http_data_size;
#endif
#endif
struct sandbox_request *sandbox_request = struct sandbox_request *sandbox_request =
sandbox_request_allocate(next_module_node, false, sandbox->request_length, sandbox_request_allocate(next_module_node, false, sandbox->request_length,
next_module_node->name, sandbox->client_socket_descriptor, next_module_node->name, sandbox->client_socket_descriptor,
(const struct sockaddr *)&sandbox->client_address, (const struct sockaddr *)&sandbox->client_address,
sandbox->request_arrival_timestamp, enqueue_timestamp, sandbox->request_arrival_timestamp, now,
sandbox->remaining_slack, true, NULL, 0); sandbox->remaining_slack,sandbox->laxity, true, NULL, 0);
/* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate() /* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate()
* will busy-wait to generate an unique id, should we optimize it here? * will busy-wait to generate an unique id, should we optimize it here?
*/ */
@ -262,6 +303,9 @@ current_sandbox_start(void)
if(!map_set(sandbox_request_id, cur_request_id, strlen(cur_request_id), &module_pre_count, sizeof(uint32_t), true)) panic("the map of sandbox_request_id is NULL\n"); if(!map_set(sandbox_request_id, cur_request_id, strlen(cur_request_id), &module_pre_count, sizeof(uint32_t), true)) panic("the map of sandbox_request_id is NULL\n");
if(!map_set(sandbox_req_map, cur_request_id, strlen(cur_request_id), sandbox_request, sizeof(struct sandbox_request *), false)) panic("the map of sandbox_request is NULL\n"); if(!map_set(sandbox_req_map, cur_request_id, strlen(cur_request_id), sandbox_request, sizeof(struct sandbox_request *), false)) panic("the map of sandbox_request is NULL\n");
mapflag = true; mapflag = true;
#ifdef DEEP_LEARN_SCHDUE
atomic_fetch_add(&hash_count, 1);
#endif
} }
LOCK_UNLOCK(&lock); LOCK_UNLOCK(&lock);
struct sandbox_request *sandbox_request = map_get(sandbox_req_map, cur_request_id, strlen(cur_request_id), &ret_value_len); struct sandbox_request *sandbox_request = map_get(sandbox_req_map, cur_request_id, strlen(cur_request_id), &ret_value_len);
@ -288,6 +332,9 @@ current_sandbox_start(void)
global_request_scheduler_add(sandbox_request); global_request_scheduler_add(sandbox_request);
map_delete(sandbox_req_map, cur_request_id, strlen(cur_request_id)); map_delete(sandbox_req_map, cur_request_id, strlen(cur_request_id));
map_delete(sandbox_request_id, cur_request_id, strlen(cur_request_id)); map_delete(sandbox_request_id, cur_request_id, strlen(cur_request_id));
#ifdef DEEP_LEARN_SCHDUE
atomic_fetch_sub(&hash_count, 1);
#endif
} }
pthread_spin_unlock(&sandbox_request->lock); pthread_spin_unlock(&sandbox_request->lock);
} }

@ -91,3 +91,8 @@ global_request_scheduler_peek()
{ {
return global_request_scheduler.peek_fn(); return global_request_scheduler.peek_fn();
} }
int
global_request_scheduler_size(){
return global_request_scheduler.size_fn();
}

@ -64,6 +64,12 @@ global_request_scheduler_minheap_peek(void)
return priority_queue_peek(global_request_scheduler_minheap); return priority_queue_peek(global_request_scheduler_minheap);
} }
static int
global_request_scheduler_minheap_size(void)
{
return priority_queue_length(global_request_scheduler_minheap);
}
uint64_t uint64_t
sandbox_request_get_priority_fn(void *element) sandbox_request_get_priority_fn(void *element)
{ {
@ -80,6 +86,25 @@ sandbox_request_get_priority_srsf_fn(void *element)
return remaining_slack; return remaining_slack;
}; };
uint64_t
sandbox_request_get_priority_mdl_fn(void *element)
{
struct sandbox_request *sandbox_request = (struct sandbox_request *)element;
uint64_t now = __getcycles();
uint64_t remaining_slack_mdl = sandbox_request->remaining_slack - (now - sandbox_request->last_update_timestamp);
return remaining_slack_mdl;
};
uint64_t
sandbox_request_get_priority_llf_fn(void *element)
{
struct sandbox_request *sandbox_request = (struct sandbox_request *)element;
uint64_t now = __getcycles();
uint64_t Laxity_llf = sandbox_request->laxity - (now - sandbox_request->last_update_timestamp);
return Laxity_llf;
};
/** /**
* Initializes the variant and registers against the polymorphic interface * Initializes the variant and registers against the polymorphic interface
*/ */
@ -90,13 +115,18 @@ global_request_scheduler_minheap_initialize(enum SCHEDULER scheduler)
global_request_scheduler_minheap = priority_queue_initialize(40960, true, sandbox_request_get_priority_fn); global_request_scheduler_minheap = priority_queue_initialize(40960, true, sandbox_request_get_priority_fn);
} else if (scheduler == SCHEDULER_SRSF) { } else if (scheduler == SCHEDULER_SRSF) {
global_request_scheduler_minheap = priority_queue_initialize(40960, true, sandbox_request_get_priority_srsf_fn); global_request_scheduler_minheap = priority_queue_initialize(40960, true, sandbox_request_get_priority_srsf_fn);
} else if (scheduler == SCHEDULER_MDL) {
global_request_scheduler_minheap = priority_queue_initialize(40960, true, sandbox_request_get_priority_mdl_fn);
} else if (scheduler == SCHEDULER_LLF) {
global_request_scheduler_minheap = priority_queue_initialize(40960, true, sandbox_request_get_priority_llf_fn);
} }
struct global_request_scheduler_config config = { struct global_request_scheduler_config config = {
.add_fn = global_request_scheduler_minheap_add, .add_fn = global_request_scheduler_minheap_add,
.remove_fn = global_request_scheduler_minheap_remove, .remove_fn = global_request_scheduler_minheap_remove,
.remove_if_earlier_fn = global_request_scheduler_minheap_remove_if_earlier, .remove_if_earlier_fn = global_request_scheduler_minheap_remove_if_earlier,
.peek_fn = global_request_scheduler_minheap_peek .peek_fn = global_request_scheduler_minheap_peek,
.size_fn = global_request_scheduler_minheap_size
}; };
global_request_scheduler_initialize(&config); global_request_scheduler_initialize(&config);

@ -7,10 +7,15 @@
#include "generic_thread.h" #include "generic_thread.h"
#include "listener_thread.h" #include "listener_thread.h"
#include "runtime.h" #include "runtime.h"
#include "scheduler.h"
#ifdef DEEP_LEARN_SCHDUE
extern _Atomic uint32_t hash_count;
#endif
extern uint64_t system_start_timestamp; extern uint64_t system_start_timestamp;
extern enum SCHEDULER scheduler;
/*When reading the json file, the size has been determined at module.c JSON_MAX_ELEMENT_COUNT*/ /*When reading the json file, the size has been determined at module.c JSON_MAX_ELEMENT_COUNT*/
const int QUEUE_SIZE = 16; const int QUEUE_SIZE = 36;
/* /*
* Descriptor of the epoll instance used to monitor the socket descriptors of registered * Descriptor of the epoll instance used to monitor the socket descriptors of registered
* serverless modules. The listener cores listens for incoming client requests through this. * serverless modules. The listener cores listens for incoming client requests through this.
@ -102,7 +107,6 @@ listener_thread_main(void *dummy)
*/ */
assert(descriptor_count > 0); assert(descriptor_count > 0);
uint64_t request_arrival_timestamp = __getcycles();
for (int i = 0; i < descriptor_count; i++) { for (int i = 0; i < descriptor_count; i++) {
/* Check Event to determine if epoll returned an error */ /* Check Event to determine if epoll returned an error */
if ((epoll_events[i].events & EPOLLERR) == EPOLLERR) { if ((epoll_events[i].events & EPOLLERR) == EPOLLERR) {
@ -177,40 +181,76 @@ listener_thread_main(void *dummy)
continue; continue;
} }
#ifdef DEEP_LEARN_SCHDUE
/*Get HTTP buffer size*/
int content_length = 0;
module->mdl.global_queue_node = global_request_scheduler_size();
module->mdl.hash_node = hash_count;
char buffer[1024];
ssize_t nbytes_peeked = recv(client_socket, buffer, sizeof(buffer), MSG_PEEK);
if (nbytes_peeked <= 0){
content_length = 5090;
} else{
buffer[sizeof(buffer)-1] = '\0';
const char *content_length_str = "Content-Length: ";
char *start = strstr(buffer, content_length_str);
if (start) {
start += strlen(content_length_str);
while (*start == ' ') start++;
content_length = atoi(start);
//debuglog("Content-Length: %d\n", content_length);
}else{
content_length = 5090;
}
}
module->mdl.http_data_size = content_length;
#endif
/* get total estimated execution time */ /* get total estimated execution time */
uint64_t estimated_execution_time = 0; uint64_t estimated_execution_time = 0;
int front = 0, rear = 0; int front = 0, rear = 0;
struct module **queue = malloc(QUEUE_SIZE * sizeof(struct module*)); struct module *queue[QUEUE_SIZE] = {NULL};
if (queue == NULL) {
panic("Failed to allocate listen.c queue memory for queue");
}
queue[rear++] = module; queue[rear++] = module;
while (rear != front) while (rear != front)
{ {
struct module *current_module = queue[front++]; struct module *current_module = queue[front++];
if (scheduler == SCHEDULER_SRSF || scheduler == SCHEDULER_EDF || scheduler == SCHEDULER_LLF)
{
estimated_execution_time += admission_info_get_percentile(&current_module->admissions_info);
}else if (scheduler == SCHEDULER_MDL ){
#ifdef GET_AVER_TIME
estimated_execution_time += admission_info_get_average(&current_module->admissions_info);
#else
estimated_execution_time += admission_info_get_percentile(&current_module->admissions_info); estimated_execution_time += admission_info_get_percentile(&current_module->admissions_info);
#endif
}
for (int i = 0; i < current_module->next_module_count; i++) { for (int i = 0; i < current_module->next_module_count; i++) {
if (current_module->next_module[i] != NULL && !current_module->next_module[i]->runtime_visited) if (current_module->next_module[i] != NULL && !current_module->next_module[i]->runtime_visited)
{ {
queue[rear++] = current_module->next_module[i]; queue[rear++] = current_module->next_module[i];
current_module->next_module[i]->runtime_visited = true; current_module->next_module[i]->runtime_visited = true;
} }
}
assert(rear <= QUEUE_SIZE);
assert(front <= QUEUE_SIZE);
} }
/*Recover the flags of the module here, so that it can be accessed next time.*/ /*Recover the flags of the module here, so that it can be accessed next time.*/
for (int i = 0; i < QUEUE_SIZE; i++) {
if (queue[i] != NULL) {
struct module *current_module = queue[i];
current_module->runtime_visited = false; current_module->runtime_visited = false;
assert(rear <= QUEUE_SIZE);
assert(front <= QUEUE_SIZE);
} }
free(queue); }
/* Adding system start timestamp to avoid negative remaining slack in the following update. They are all cycles */ /* Adding system start timestamp to avoid negative remaining slack in the following update. They are all cycles */
uint64_t remaining_slack = system_start_timestamp + module->relative_deadline - estimated_execution_time; uint64_t remaining_slack = system_start_timestamp + module->relative_deadline - estimated_execution_time;
uint64_t request_arrival_timestamp = __getcycles();
/* Allocate a Sandbox Request */ /* Allocate a Sandbox Request */
struct sandbox_request *sandbox_request = struct sandbox_request *sandbox_request =
sandbox_request_allocate(module, true, 0, module->name, client_socket, sandbox_request_allocate(module, true, 0, module->name, client_socket,
(const struct sockaddr *)&client_address, (const struct sockaddr *)&client_address,
request_arrival_timestamp, request_arrival_timestamp,remaining_slack, request_arrival_timestamp, request_arrival_timestamp,remaining_slack, remaining_slack,
work_admitted, NULL, 0); work_admitted, NULL, 0);
/* Add to the Global Sandbox Request Scheduler */ /* Add to the Global Sandbox Request Scheduler */

@ -82,6 +82,13 @@ local_runqueue_minheap_initialize(enum SCHEDULER scheduler)
local_runqueue_minheap = priority_queue_initialize(10240, false, sandbox_get_priority); local_runqueue_minheap = priority_queue_initialize(10240, false, sandbox_get_priority);
} else if (scheduler == SCHEDULER_SRSF) { } else if (scheduler == SCHEDULER_SRSF) {
local_runqueue_minheap = priority_queue_initialize(10240, false, sandbox_get_srsf_priority); local_runqueue_minheap = priority_queue_initialize(10240, false, sandbox_get_srsf_priority);
} else if (scheduler == SCHEDULER_MDL) {
local_runqueue_minheap = priority_queue_initialize(10240, false, sandbox_get_mdl_priority);
} else if (scheduler == SCHEDULER_LLF) {
local_runqueue_minheap = priority_queue_initialize(10240, false, sandbox_get_llf_priority);
} else{
panic("Invalid scheduler type %d\n", scheduler);
} }
/* Register Function Pointers for Abstract Scheduling API */ /* Register Function Pointers for Abstract Scheduling API */

@ -186,6 +186,11 @@ runtime_configure()
scheduler = SCHEDULER_FIFO; scheduler = SCHEDULER_FIFO;
} else if (strcmp(scheduler_policy, "SRSF") == 0) { } else if (strcmp(scheduler_policy, "SRSF") == 0) {
scheduler = SCHEDULER_SRSF; scheduler = SCHEDULER_SRSF;
} else if (strcmp(scheduler_policy, "MDL") == 0) {
scheduler = SCHEDULER_MDL;
} else if (strcmp(scheduler_policy, "LLF") == 0)
{
scheduler = SCHEDULER_LLF;
} else { } else {
panic("Invalid scheduler policy: %s. Must be {EDF|FIFO}\n", scheduler_policy); panic("Invalid scheduler policy: %s. Must be {EDF|FIFO}\n", scheduler_policy);
} }
@ -197,7 +202,7 @@ runtime_configure()
if (strcmp(sigalrm_policy, "BROADCAST") == 0) { if (strcmp(sigalrm_policy, "BROADCAST") == 0) {
runtime_sigalrm_handler = RUNTIME_SIGALRM_HANDLER_BROADCAST; runtime_sigalrm_handler = RUNTIME_SIGALRM_HANDLER_BROADCAST;
} else if (strcmp(sigalrm_policy, "TRIAGED") == 0) { } else if (strcmp(sigalrm_policy, "TRIAGED") == 0) {
if (unlikely(scheduler != SCHEDULER_EDF && scheduler != SCHEDULER_SRSF)) panic("triaged sigalrm handlers are only valid with EDF and SRSF\n"); if (unlikely(scheduler != SCHEDULER_EDF && scheduler != SCHEDULER_SRSF && scheduler != SCHEDULER_MDL && scheduler != SCHEDULER_LLF)) panic("triaged sigalrm handlers are only valid with EDF and SRSF\n");
runtime_sigalrm_handler = RUNTIME_SIGALRM_HANDLER_TRIAGED; runtime_sigalrm_handler = RUNTIME_SIGALRM_HANDLER_TRIAGED;
} else { } else {
panic("Invalid sigalrm policy: %s. Must be {BROADCAST|TRIAGED}\n", sigalrm_policy); panic("Invalid sigalrm policy: %s. Must be {BROADCAST|TRIAGED}\n", sigalrm_policy);
@ -226,9 +231,13 @@ runtime_configure()
printf("\tSandbox Performance Log: %s\n", runtime_sandbox_perf_log_path); printf("\tSandbox Performance Log: %s\n", runtime_sandbox_perf_log_path);
runtime_sandbox_perf_log = fopen(runtime_sandbox_perf_log_path, "w"); runtime_sandbox_perf_log = fopen(runtime_sandbox_perf_log_path, "w");
if (runtime_sandbox_perf_log == NULL) { perror("sandbox perf log"); } if (runtime_sandbox_perf_log == NULL) { perror("sandbox perf log"); }
#ifdef LOG_DEEP_LEARN_SCHDUE
fprintf(runtime_sandbox_perf_log, "module.name, total_time,module.hash_node,modul.global_queue_node,module.http_data_size\n");
#else
fprintf(runtime_sandbox_perf_log, "threadid,id,function,state,deadline,actual,queued,initializing,runnable," fprintf(runtime_sandbox_perf_log, "threadid,id,function,state,deadline,actual,queued,initializing,runnable,"
"running,blocked,returned,memory\n"); "running,blocked,returned,memory\n");
//fflush(runtime_sandbox_perf_log); #endif
} else { } else {
printf("\tSandbox Performance Log: Disabled\n"); printf("\tSandbox Performance Log: Disabled\n");
} }

@ -19,7 +19,7 @@
const int JSON_MAX_ELEMENT_COUNT = 16; const int JSON_MAX_ELEMENT_COUNT = 16;
const int JSON_MAX_ELEMENT_SIZE = 1024; const int JSON_MAX_ELEMENT_SIZE = 1024;
const int PRE_MODULE_COUNT = 4; const int MODULE_MAX_COUNT = 36;
/************************* /*************************
* Private Static Inline * * Private Static Inline *
@ -375,8 +375,7 @@ module_new_from_json(char *file_name)
int module_count = 0; int module_count = 0;
char *request_headers = NULL; char *request_headers = NULL;
char *reponse_headers = NULL; char *reponse_headers = NULL;
//struct module *tail_module = NULL; struct module **nodes = malloc( MODULE_MAX_COUNT * sizeof(struct module*));
struct module **nodes = malloc(JSON_MAX_ELEMENT_COUNT * sizeof(struct module*));
if (nodes == NULL) { if (nodes == NULL) {
panic("Memory allocation failed for nodes array\n"); panic("Memory allocation failed for nodes array\n");
} }
@ -413,7 +412,7 @@ module_new_from_json(char *file_name)
uint32_t expected_execution_us = 0; uint32_t expected_execution_us = 0;
int admissions_percentile = 50; int admissions_percentile = 50;
bool is_active = false; bool is_active = false;
bool is_tail_module = false; // bool is_tail_module = false;
int32_t request_count = 0; int32_t request_count = 0;
int32_t response_count = 0; int32_t response_count = 0;
int j = 1; int j = 1;
@ -490,15 +489,15 @@ module_new_from_json(char *file_name)
} else { } else {
panic("Expected active key to be a JSON boolean, was %s\n", val); panic("Expected active key to be a JSON boolean, was %s\n", val);
} }
} else if (strcmp(key, "tail-module") == 0) { // } else if (strcmp(key, "tail-module") == 0) {
assert(tokens[i + j + 1].type == JSMN_PRIMITIVE); // assert(tokens[i + j + 1].type == JSMN_PRIMITIVE);
if (val[0] == 't') { // if (val[0] == 't') {
is_tail_module = true; // is_tail_module = true;
} else if (val[0] == 'f') { // } else if (val[0] == 'f') {
is_tail_module = false; // is_tail_module = false;
} else { // } else {
panic("Expected tail_module key to be a JSON boolean, was %s\n", val); // panic("Expected tail_module key to be a JSON boolean, was %s\n", val);
} // }
} else if (strcmp(key, "relative-deadline-us") == 0) { } else if (strcmp(key, "relative-deadline-us") == 0) {
int64_t buffer = strtoll(val, NULL, 10); int64_t buffer = strtoll(val, NULL, 10);
if (buffer < 0 || buffer > (int64_t)RUNTIME_RELATIVE_DEADLINE_US_MAX) if (buffer < 0 || buffer > (int64_t)RUNTIME_RELATIVE_DEADLINE_US_MAX)
@ -605,7 +604,6 @@ module_new_from_json(char *file_name)
if(next_module_count == 0) if(next_module_count == 0)
{ {
module->next_module_names = NULL; module->next_module_names = NULL;
}else }else
{ {
module->next_module_names = malloc(next_module_count * sizeof(char*)); module->next_module_names = malloc(next_module_count * sizeof(char*));
@ -624,6 +622,11 @@ module_new_from_json(char *file_name)
module->pre_module = NULL; module->pre_module = NULL;
module->runtime_visited = false; module->runtime_visited = false;
module->run_priority = priority; module->run_priority = priority;
#ifdef DEEP_LEARN_SCHDUE
module->mdl.hash_node = 0;
module->mdl.global_queue_node = 0;
module->mdl.http_data_size = 0;
#endif
assert(module); assert(module);
module_set_http_info(module, request_count, request_headers, request_content_type, module_set_http_info(module, request_count, request_headers, request_content_type,

@ -33,6 +33,7 @@ int runtime_worker_threads_argument[RUNTIME_WORKER_THREAD_CORE_COUNT] = {
/* The active deadline of the sandbox running on each worker thread */ /* The active deadline of the sandbox running on each worker thread */
uint64_t runtime_worker_threads_deadline[RUNTIME_WORKER_THREAD_CORE_COUNT] = { UINT64_MAX }; uint64_t runtime_worker_threads_deadline[RUNTIME_WORKER_THREAD_CORE_COUNT] = { UINT64_MAX };
uint64_t runtime_worker_threads_remaining_slack[RUNTIME_WORKER_THREAD_CORE_COUNT] = { UINT64_MAX }; uint64_t runtime_worker_threads_remaining_slack[RUNTIME_WORKER_THREAD_CORE_COUNT] = { UINT64_MAX };
uint64_t runtime_worker_threads_laxity[RUNTIME_WORKER_THREAD_CORE_COUNT] = { UINT64_MAX };
/****************************************** /******************************************
* Shared Process / Listener Thread Logic * * Shared Process / Listener Thread Logic *

@ -94,6 +94,16 @@ sigalrm_propagate_workers(siginfo_t *signal_info)
uint64_t global_slack = global_request_scheduler_peek(); uint64_t global_slack = global_request_scheduler_peek();
if (global_slack < local_remaining_slack) pthread_kill(runtime_worker_threads[i], SIGALRM); if (global_slack < local_remaining_slack) pthread_kill(runtime_worker_threads[i], SIGALRM);
continue; continue;
} else if (scheduler == SCHEDULER_MDL) {
uint64_t local_remaining_slack = runtime_worker_threads_remaining_slack[i];
uint64_t global_slack = global_request_scheduler_peek();
if (global_slack < local_remaining_slack) pthread_kill(runtime_worker_threads[i], SIGALRM);
continue;
} else if (scheduler == SCHEDULER_LLF) {
uint64_t local_Laxity = runtime_worker_threads_laxity[i];
uint64_t global_Laxity = global_request_scheduler_peek();
if (global_Laxity < local_Laxity) pthread_kill(runtime_worker_threads[i], SIGALRM);
continue;
} }
} }
case RUNTIME_SIGALRM_HANDLER_BROADCAST: { case RUNTIME_SIGALRM_HANDLER_BROADCAST: {
@ -253,12 +263,13 @@ done:
void void
software_interrupt_arm_timer(void) software_interrupt_arm_timer(void)
{ {
if (!runtime_preemption_enabled) return;
/* if preemption disabled, broadcast sig alarm to all other threads to record the queuelength info */ /* if preemption disabled, broadcast sig alarm to all other threads to record the queuelength info */
if (!runtime_preemption_enabled) { if (!runtime_preemption_enabled) {
runtime_sigalrm_handler = RUNTIME_SIGALRM_HANDLER_BROADCAST; runtime_sigalrm_handler = RUNTIME_SIGALRM_HANDLER_BROADCAST;
} }
if (!runtime_preemption_enabled) return;
struct itimerval interval_timer; struct itimerval interval_timer;
memset(&interval_timer, 0, sizeof(struct itimerval)); memset(&interval_timer, 0, sizeof(struct itimerval));

@ -2,7 +2,7 @@ include Makefile.inc
#TESTS=fibonacci fibonacci2 fibonacci3 big_fibonacci C-Image-Manip empty work work1k work10k work100k work1m forever filesys sockserver sockclient empty #TESTS=fibonacci fibonacci2 fibonacci3 big_fibonacci C-Image-Manip empty work work1k work10k work100k work1m forever filesys sockserver sockclient empty
TESTS=fibonacci big_fibonacci C-Image-Manip empty work work1k work10k work100k work1m forever filesys sockserver sockclient empty TESTS=fibonacci big_fibonacci C-Image-Manip empty work work1k work10k work100k work1m forever filesys sockserver sockclient empty
TESTS2=fibonacciadd mem TESTS2=fibonacciadd mem work3 picinpic noop
TESTSRT=$(TESTS:%=%_rt) TESTSRT=$(TESTS:%=%_rt)
TESTSRT2=$(TESTS2:%=%_rt) TESTSRT2=$(TESTS2:%=%_rt)
@ -43,6 +43,7 @@ sod:
@make dir samples.so -C ./sod/ @make dir samples.so -C ./sod/
@cp ./sod/bin/license_plate_detection.so ${SLEDGE_BIN_DIR}/lpd_wasm.so @cp ./sod/bin/license_plate_detection.so ${SLEDGE_BIN_DIR}/lpd_wasm.so
@cp ./sod/bin/resize_image.so ${SLEDGE_BIN_DIR}/resize_wasm.so @cp ./sod/bin/resize_image.so ${SLEDGE_BIN_DIR}/resize_wasm.so
# @cp ./sod/bin/reverse.so ${SLEDGE_BIN_DIR}/reverse_wasm.so
C-Image-Manip: C-Image-Manip:
@echo "Making and Installing pngPlay" @echo "Making and Installing pngPlay"

@ -0,0 +1,99 @@
#include "dag_data_split.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
struct DataNode {
uint32_t dataLength;
char *data;
struct DataNode *next;
};
DataNode* splitData(char *buffer, uint32_t bufferSize) {
DataNode *head = NULL;
DataNode *tail = NULL;
uint32_t offset = 0;
while (offset < bufferSize) {
if (offset + 4 > bufferSize) {
break;
}
uint32_t dataLength = *(uint32_t *)(buffer + offset);
offset += 4;
if (offset + dataLength > bufferSize) {
break;
}
DataNode *newNode = (DataNode *)malloc(sizeof(DataNode));
if (newNode == NULL) {
perror("Memory allocation failed");
freeDataNodes(head); // 释放已分配的节点内存
return NULL;
}
newNode->data = (char *)malloc(dataLength);
if (newNode->data == NULL) {
free(newNode);
perror("Memory allocation failed");
freeDataNodes(head); // 释放已分配的节点内存
return NULL;
}
memcpy(newNode->data, buffer + offset, dataLength);
newNode->dataLength = dataLength;
newNode->next = NULL;
if (head == NULL) {
head = newNode;
} else {
tail->next = newNode;
}
tail = newNode;
offset += dataLength;
}
return head;
}
void freeDataNodes(DataNode *head) {
while (head != NULL) {
DataNode *next = head->next;
free(head->data);
free(head);
head = next;
}
}
void printDataList(DataNode *head) {
int index = 0;
DataNode *current = head;
while (current != NULL) {
printf("Data %d: Length = %u\n", index, current->dataLength);
index++;
current = current->next;
}
}
int getDataNodeCount(DataNode *head) {
int count = 0;
DataNode *current = head;
while (current != NULL) {
count++;
current = current->next;
}
return count;
}
const char* getDataNodeByIndex(DataNode *head, int index) {
int count = 1;
DataNode *current = head;
while (current != NULL) {
if (count == index) {
return current->data;
}
count++;
current = current->next;
}
return NULL; // 如果索引超出范围返回NULL
}

@ -0,0 +1,17 @@
#pragma once
#include <stdint.h>
typedef struct DataNode DataNode;
DataNode* splitData(char *buffer, uint32_t bufferSize);
void freeDataNodes(DataNode *head);
void printDataList(DataNode *head);
int getDataNodeCount(DataNode *head);
/**
* @param index is form 1 to n
*/
const char* getDataNodeByIndex(DataNode *head, int index);

Binary file not shown.

@ -2,8 +2,9 @@
#include <stdio.h> #include <stdio.h>
#include <string.h> #include <string.h>
#include <stdlib.h> #include <stdlib.h>
#include <stdint.h>
#include "dag_data_split.h"
// 假设的 Fibonacci 函数实现
unsigned long int fib(unsigned long int n) { unsigned long int fib(unsigned long int n) {
if (n <= 1) return n; if (n <= 1) return n;
return fib(n - 1) + fib(n - 2); return fib(n - 1) + fib(n - 2);
@ -12,26 +13,34 @@ unsigned long int fib(unsigned long int n) {
int main() { int main() {
char buffer[1024]; // Buffer to store input data char buffer[1024]; // Buffer to store input data
// Read data from stdin into buffer ssize_t bytes_read = read(0, buffer, sizeof(buffer));
ssize_t bytes_read = read(0, buffer, sizeof(buffer) - 1);
if (bytes_read < 0) { if (bytes_read < 0) {
perror("Error reading input"); perror("Error reading input");
return 1; return 1;
} }
buffer[bytes_read] = '\0';
// Remove potential newline character at the end of the input DataNode *dataList = splitData(buffer, bytes_read);
if (bytes_read > 0 && (buffer[bytes_read - 1] == '\n' || buffer[bytes_read - 1] == '\r')) { if (dataList == NULL) {
buffer[bytes_read - 1] = '\0'; fprintf(stderr, "Failed to split data.\n");
return 1;
} }
unsigned long int num1, num2; unsigned long int num1, num2;
char *line = strtok(buffer, "&"); const char *firstdata = getDataNodeByIndex(dataList, 1);
char *second_part = strtok(NULL, "&"); // Assume the rest of the string is the second number const char *seconddata = getDataNodeByIndex(dataList, 2);
if (firstdata == NULL || seconddata == NULL) {
fprintf(stderr, "Not enough data for two numbers.\n");
freeDataNodes(dataList);
return 1;
}
if (sscanf(firstdata, "%lu", &num1) != 1 || sscanf(seconddata, "%lu", &num2) != 1) {
fprintf(stderr, "Failed to parse the numbers correctly.\n");
freeDataNodes(dataList);
return 1;
}
if (line && sscanf(line, "%lu", &num1) == 1 &&
second_part && sscanf(second_part, "%lu", &num2) == 1) {
// Calculate Fibonacci numbers and their sum
unsigned long int fib1 = fib(num1); unsigned long int fib1 = fib(num1);
unsigned long int fib2 = fib(num2); unsigned long int fib2 = fib(num2);
unsigned long int sum = fib1 + fib2; unsigned long int sum = fib1 + fib2;
@ -42,13 +51,9 @@ int main() {
// Write to stdout // Write to stdout
write(1, output, len); write(1, output, len);
} else {
const char *error_msg = "Invalid input. Please enter two numbers separated by '&'. Your input was: "; // <20><><EFBFBD><EFBFBD><EFBFBD>ڴ<EFBFBD>
char output_buffer[2048]; // Buffer to hold the error message and user input freeDataNodes(dataList);
int len = snprintf(output_buffer, sizeof(output_buffer), "%s%s\n", error_msg, buffer);
write(1, output_buffer, len);
return 1;
}
return 0; return 0;
} }

@ -25,9 +25,9 @@ pid2=$!
pid3=$! pid3=$!
./test_8c.sh $f4 $duration $concurrency 40k.jpg 10009 2>&1 & ./test_8c.sh $f4 $duration $concurrency 40k.jpg 10009 2>&1 &
pid4=$! pid4=$!
wait -f $pid1 wait $pid1
wait -f $pid2 wait $pid2
wait -f $pid3 wait $pid3
wait -f $pid4 wait $pid4
printf "[OK]\n" printf "[OK]\n"

@ -0,0 +1,26 @@
import sys
def process_file(input_file, noop_functions):
data = {noop: [] for noop in noop_functions}
with open(input_file, "r") as infile:
for line in infile:
for noop in noop_functions:
if noop in line:
value = line.split(",")[6]
data[noop].append(value)
for noop, values in data.items():
with open(f"{noop}.txt", "w") as outfile:
outfile.write("\n".join(values))
if __name__ == "__main__":
noop_functions = ["noop1", "noop2", "noop3", "noop4", "noop5"]
argv = sys.argv[1:]
if len(argv) < 1:
print("usage:", sys.argv[0], "file_dir percentage")
sys.exit()
input_file = argv[0]
process_file(input_file, noop_functions)

@ -0,0 +1,11 @@
#include <stdio.h>
void noop() {
}
int main() {
noop();
return 0;
}

@ -0,0 +1,22 @@
import os
def calculate_average(filename):
with open(filename, "r") as file:
values = file.readlines()
values = [int(value.strip()) for value in values]
average = sum(values) / len(values) if values else 0
return average
def main():
noop_functions = ["noop1", "noop2", "noop3", "noop4", "noop5"]
for noop in noop_functions:
filename = f"{noop}.txt"
if os.path.exists(filename):
average = calculate_average(filename)
print(f"Average for {filename}: {average}")
else:
print(f"{filename} does not exist.")
if __name__ == "__main__":
main()

@ -36,7 +36,7 @@ def get_values(key, value, miss_deadline_rate, total_latency, running_times, pre
before_dot = file_name.split(".")[0] before_dot = file_name.split(".")[0]
joint_f_name = before_dot + "_total_time.txt" joint_f_name = before_dot + "_total_time.txt"
cmd='python3 ~/sledge-serverless-framework/runtime/tests/meet_deadline_percentage.py %s 50' % file_name cmd='python3 /home/hai/sledge/sledge/runtime/tests/meet_deadline_percentage.py %s 50' % file_name
rt=os.popen(cmd).read().strip() rt=os.popen(cmd).read().strip()
cmd2='mv total_time.txt %s' % joint_f_name cmd2='mv total_time.txt %s' % joint_f_name
os.popen(cmd2) os.popen(cmd2)

@ -0,0 +1,111 @@
#include "dag_split_image.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
struct DataNode {
uint32_t dataLength;
unsigned char *data;
struct DataNode *next;
};
DataNode* splitData(unsigned char *buffer, uint32_t bufferSize) {
DataNode *head = NULL;
DataNode *tail = NULL;
uint32_t offset = 0;
while (offset < bufferSize) {
if (offset + 4 > bufferSize) {
break;
}
uint32_t dataLength = *(uint32_t *)(buffer + offset);
offset += 4;
if (offset + dataLength > bufferSize) {
break;
}
DataNode *newNode = (DataNode *)malloc(sizeof(DataNode));
if (newNode == NULL) {
perror("Memory allocation failed");
freeDataNodes(head);
return NULL;
}
newNode->data = (unsigned char *)malloc(dataLength);
if (newNode->data == NULL) {
free(newNode);
perror("Memory allocation failed");
freeDataNodes(head);
return NULL;
}
memcpy(newNode->data, buffer + offset, dataLength);
newNode->dataLength = dataLength;
newNode->next = NULL;
if (head == NULL) {
head = newNode;
} else {
tail->next = newNode;
}
tail = newNode;
offset += dataLength;
}
return head;
}
void freeDataNodes(DataNode *head) {
while (head != NULL) {
DataNode *next = head->next;
free(head->data);
free(head);
head = next;
}
}
void printDataList(DataNode *head) {
int index = 0;
DataNode *current = head;
while (current != NULL) {
printf("Data %d: Length = %u\n", index, current->dataLength);
index++;
current = current->next;
}
}
int getDataNodeCount(DataNode *head) {
int count = 0;
DataNode *current = head;
while (current != NULL) {
count++;
current = current->next;
}
return count;
}
unsigned char* getDataNodeByIndex(DataNode *head, int index) {
int count = 1;
DataNode *current = head;
while (current != NULL) {
if (count == index) {
return current->data;
}
count++;
current = current->next;
}
return NULL;
}
uint32_t getImageDataSize(DataNode *head, int index){
int count = 1;
DataNode *current = head;
while (current != NULL) {
if (count == index) {
return current->dataLength;
}
count++;
current = current->next;
}
return 0;
}

@ -0,0 +1,18 @@
#pragma once
#include <stdint.h>
typedef struct DataNode DataNode;
DataNode* splitData(unsigned char *buffer, uint32_t bufferSize);
void freeDataNodes(DataNode *head);
void printDataList(DataNode *head);
int getDataNodeCount(DataNode *head);
/**
* @param index is form 1 to n
*/
unsigned char* getDataNodeByIndex(DataNode *head, int index);
uint32_t getImageDataSize(DataNode *head, int index);

@ -0,0 +1,79 @@
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include "dag_split_image.h"
#define MAX_IMG_SZ 8*1025*1024
int main()
{
unsigned char *zInpbuf = NULL;
zInpbuf = malloc(MAX_IMG_SZ);
if (!zInpbuf)
{
perror("malloc");
return -1;
}
ssize_t imgSz = read(0, zInpbuf, MAX_IMG_SZ);
if (imgSz <= 0)
{
perror("read");
free(zInpbuf);
return -1;
}
DataNode *dataList = splitData(zInpbuf, imgSz);
if (dataList == NULL) {
fprintf(stderr, "Failed to split data.\n");
return 1;
}
unsigned char *Imagedata1 = getDataNodeByIndex(dataList, 1);
unsigned char *Imagedata2 = getDataNodeByIndex(dataList, 2);
uint32_t imageSize1 = getImageDataSize(dataList, 1);
uint32_t imageSize2 = getImageDataSize(dataList, 2);
int x, y;
FILE *out_bin = stdout;
for (int i = 0; i < 54; i++)
{
x = Imagedata1[i];
y = (i < imageSize2) ? Imagedata2[i] : x;
fwrite(&x, 1, 1, out_bin);
}
int i = 0;
unsigned long big_img_offset = 54;
unsigned long small_img_offset = 54;
while (big_img_offset < imageSize1)
{
if (i == 2400) i = 0;
x = Imagedata1[big_img_offset++];
if (i < 300 && small_img_offset < imageSize2)
{
y = Imagedata2[small_img_offset++];
fwrite(&y, 1, 1, out_bin);
}
else
{
fwrite(&x, 1, 1, out_bin);
}
i++;
}
while (big_img_offset < imageSize1)
{
x = Imagedata1[big_img_offset++];
fwrite(&x, 1, 1, out_bin);
}
free(zInpbuf);
return 0;
}

@ -1 +1,2 @@
sudo chsh -s /bin/bash xiaosuGW sudo chsh -s /bin/bash hai

@ -20,10 +20,10 @@ declare project_path="$(
echo $project_path echo $project_path
path=`pwd` path=`pwd`
#export SLEDGE_DISABLE_PREEMPTION=true #export SLEDGE_DISABLE_PREEMPTION=true
export SLEDGE_CPU_SPEED=3300 #export SLEDGE_CPU_SPEED=3300
#export SLEDGE_SCHEDULER=SRSF export SLEDGE_SCHEDULER=EDF
export SLEDGE_SIGALRM_HANDLER=BROADCAST #export SLEDGE_SIGALRM_HANDLER=BROADCAST
#export SLEDGE_SIGALRM_HANDLER=TRIAGED export SLEDGE_SIGALRM_HANDLER=TRIAGED
#export SLEDGE_NWORKERS=1 #export SLEDGE_NWORKERS=1
export SLEDGE_SCHEDULER=EDF export SLEDGE_SCHEDULER=EDF
export SLEDGE_SANDBOX_PERF_LOG=$path/$output export SLEDGE_SANDBOX_PERF_LOG=$path/$output
@ -37,7 +37,8 @@ cd $project_path/runtime/bin
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/mulitple_linear_chain.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/mulitple_linear_chain.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing3.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing3.json
LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing4.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_fibonacci.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_fibonacci.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_sodresize.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_sodresize.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_sodresize.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_sodresize.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing4.json
LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing_graph.json

@ -21,23 +21,18 @@ echo $project_path
path=`pwd` path=`pwd`
#export SLEDGE_DISABLE_PREEMPTION=true #export SLEDGE_DISABLE_PREEMPTION=true
export SLEDGE_CPU_SPEED=2400 export SLEDGE_CPU_SPEED=2400
export SLEDGE_SCHEDULER=SRSF export SLEDGE_SCHEDULER=EDF
export SLEDGE_SIGALRM_HANDLER=BROADCAST export SLEDGE_SIGALRM_HANDLER=TRIAGED
#export SLEDGE_SIGALRM_HANDLER=TRIAGED #export SLEDGE_SIGALRM_HANDLER=TRIAGED
#export SLEDGE_NWORKERS=1 #export SLEDGE_NWORKERS=1
#export SLEDGE_SCHEDULER=EDF #export SLEDGE_SCHEDULER=EDF
export SLEDGE_SANDBOX_PERF_LOG=$path/$output export SLEDGE_SANDBOX_PERF_LOG=$path/$output
echo $SLEDGE_SANDBOX_PERF_LOG echo $SLEDGE_SANDBOX_PERF_LOG
cd $project_path/runtime/bin cd $project_path/runtime/bin
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_fibonacci.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_big_fibonacci.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/graph.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_armcifar10.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing_graph.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_png2bmp.json LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing_graph2.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_image_processing.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_dag_image.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/mulitple_linear_chain.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing3.json
LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing4.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_fibonacci.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_sodresize.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_sodresize.json

@ -0,0 +1,32 @@
#!/bin/bash
function usage {
echo "$0 [perf output file, chain_function_perf.log or single_function_perf.log or opt_function_perf.log]"
exit 1
}
if [ $# != 1 ] ; then
usage
exit 1;
fi
output=$1
declare project_path="$(
cd "$(dirname "$0")/../.."
pwd
)"
echo $project_path
path=`pwd`
#export SLEDGE_DISABLE_PREEMPTION=true
export SLEDGE_CPU_SPEED=2500
export SLEDGE_SCHEDULER=FIFO
#xport SLEDGE_SIGALRM_HANDLER=BROADCAST
#export SLEDGE_SIGALRM_HANDLER=TRIAGED
#export SLEDGE_NWORKERS=16
#export SLEDGE_SCHEDULER=EDF
export SLEDGE_SANDBOX_PERF_LOG=$path/$output
echo $SLEDGE_SANDBOX_PERF_LOG
cd $project_path/runtime/bin
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_noop1.json

@ -0,0 +1,40 @@
#!/bin/bash
function usage {
echo "$0 [cpu-log]"
exit 1
}
#chmod 400 ./id_rsa
#path="/home/weihao/sledge/sledge_tree/runtime/tests"
path="/home/njl/sledge/runtime/tests"
#test single 5k c5 50% max RPS (480)
f1="5k_single_50.txt"
server_log_file="execution_single_5k_50.log"
$path/start.sh $server_log_file >/dev/null 2>&1 &
echo "sledge is running"
./test_rps.sh $f1 120 48 5k.jpg 10000 2>&1 &
pid1=$!
wait $pid1
$path/kill_sledge.sh
#test single 5k c5 70% max RPS
f1="5k_single_70.txt"
server_log_file="execution_single_5k_70.log"
$path/start.sh $server_log_file >/dev/null 2>&1 &
echo "sledge is running"
./test_rps.sh $f1 120 68 5k.jpg 10000 2>&1 &
pid1=$!
wait $pid1
$path/kill_sledge.sh
#test single 5k c5 99% max RPS
f1="5k_single_99.txt"
server_log_file="execution_single_5k_99.log"
$path/start.sh $server_log_file >/dev/null 2>&1 &
echo "sledge is running"
./test_rps.sh $f1 120 96 5k.jpg 10000 2>&1 &
pid1=$!
wait $pid1
$path/kill_sledge.sh

@ -17,5 +17,5 @@ port=$5
#hey -disable-compression -disable-keepalive -disable-redirects -c 1 -q $rps -z $duration\s -cpus 1 -t 0 -m GET -D "$image" "http://10.10.1.1:$port" #hey -disable-compression -disable-keepalive -disable-redirects -c 1 -q $rps -z $duration\s -cpus 1 -t 0 -m GET -D "$image" "http://10.10.1.1:$port"
#hey -disable-compression -disable-keepalive -disable-redirects -c $concurrency -z 20s -t 0 -m GET -D "$image" "http://10.10.1.1:$port" #hey -disable-compression -disable-keepalive -disable-redirects -c $concurrency -z 20s -t 0 -m GET -D "$image" "http://10.10.1.1:$port"
hey -disable-compression -disable-keepalive -disable-redirects -c $concurrency -z $duration\s -t 0 -m GET -D "$image" "http://10.10.1.1:$port" > $output hey -disable-compression -disable-keepalive -disable-redirects -c 5 -cpus 2 -q $concurrency -z $duration\s -t 0 -m GET -D "$image" "http://10.10.1.1:$port" > $output

@ -0,0 +1,122 @@
{
"active": true,
"name": "work1",
"path": "work1m_wasm.so",
"port": 10000,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 0,
"next_modules": ["reverse1", "resize1"],
"http-req-headers": [],
"http-req-content-type": "image/jpeg",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 4096000,
"http-resp-content-type": "image/jpeg"
},
{
"active": true,
"name": "reverse1",
"path": "reverse_wasm.so",
"port": 10001,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 1,
"next_modules": ["png2bmp1"],
"http-req-headers": [],
"http-req-content-type": "image/jpeg",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 4096000,
"http-resp-content-type": "image/png"
},
{
"active": true,
"name": "resize1",
"path": "resize_wasm.so",
"port": 10002,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 2,
"pre_module_count": 1,
"next_modules": ["png2bmp2"],
"http-req-headers": [],
"http-req-content-type": "image/jpeg",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 4096000,
"http-resp-content-type": "image/png"
},
{
"active": true,
"name": "png2bmp1",
"path": "C-Image-Manip_wasm.so",
"port": 10003,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 1,
"next_modules": ["picinpic1"],
"http-req-headers": [],
"http-req-content-type": "image/png",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 4096000,
"http-resp-content-type": "image/bmp"
},
{
"active": true,
"name": "png2bmp2",
"path": "C-Image-Manip_wasm.so",
"port": 10004,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 1,
"next_modules": ["picinpic1"],
"http-req-headers": [],
"http-req-content-type": "image/png",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 4096000,
"http-resp-content-type": "image/bmp"
},
{
"active": true,
"name": "picinpic1",
"path": "picinpic_wasm.so",
"port": 10005,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 2,
"next_modules": ["cifar10_1"],
"http-req-headers": [],
"http-req-content-type": "image/bmp",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 4096000,
"http-resp-content-type": "image/bmp",
"tail-module": true
},
{
"active": true,
"name": "cifar10_1",
"path": "cifar10_wasm.so",
"port": 10006,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 1,
"next_modules": [],
"http-req-headers": [],
"http-req-content-type": "image/bmp",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain",
"tail-module": true
},

@ -0,0 +1,50 @@
#!/bin/bash
function usage {
echo "Please enter right parameters: current_rps(*5) add_step(*5) duratime"
exit 1
}
if [ $# -ne 3 ]; then
usage
fi
echo "current_rps(*5) add_step(*5) duratime"
#path="/home/njl/sledge/runtime/tests"
path="/home/hai/sledge/sledge/runtime/tests"
current_rps=$1
step=$2
duratime=$3
max_rps=0
max_latency=0
output="hey_test_max_rps.log"
server_log_file="test_rps.log"
loop=1
for loop in {1..5}; do
$path/start-edf.sh $server_log_file >/dev/null 2>&1 &
echo "sledge is running loop $loop"
./test_rps.sh $output $duratime $current_rps 5k.jpg 10000 2>&1 &
pid1=$!
wait $pid1
$path/kill_sledge.sh
latency=$(grep "Requests" $output | awk -F ': ' '{print $2}')
if (( $(echo "$latency < $max_rps" | bc -l) )); then
break
fi
echo "loop_$loop RPS: $latency"
max_rps=$latency
current_rps=$((current_rps + step))
done
echo "Maximum RPS: $max_rps"

@ -0,0 +1,88 @@
{
"active": true,
"name": "resize1",
"path": "resize_wasm.so",
"port": 10000,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 0,
"next_modules": ["png2bmp1", "lpd_wasm1"],
"http-req-headers": [],
"http-req-content-type": "image/jpeg",
"http-req-size": 1024000,
"http-resp-headers": [],
"http-resp-size": 1024000,
"http-resp-content-type": "image/png"
},
{
"active": true,
"name": "png2bmp1",
"path": "C-Image-Manip_wasm.so",
"port": 10001,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 1,
"next_modules": ["cifar10_1"],
"http-req-headers": [],
"http-req-content-type": "image/png",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 4096000,
"http-resp-content-type": "image/bmp"
},
{
"active": true,
"name": "lpd_wasm1",
"path": "lpd_wasm.so",
"port": 10002,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 2,
"pre_module_count": 1,
"next_modules": ["work1"],
"http-req-headers": [],
"http-req-content-type": "image/bmp",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 4096000,
"http-resp-content-type": "text/plain",
"tail-module": true
},
{
"active": true,
"name": "cifar10_1",
"path": "cifar10_wasm.so",
"port": 10003,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 1,
"next_modules": ["work1"],
"http-req-headers": [],
"http-req-content-type": "image/bmp",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain",
"tail-module": true
},
{
"active": true,
"name": "work1",
"path": "work3_wasm.so",
"port": 10004,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 2,
"next_modules": [],
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain",
"tail-module": true
},

@ -0,0 +1,340 @@
{
"active": true,
"name": "resize1",
"path": "resize_wasm.so",
"port": 10000,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 0,
"next_modules": ["png2bmp1", "lpd_wasm1"],
"http-req-headers": [],
"http-req-content-type": "image/jpeg",
"http-req-size": 1024000,
"http-resp-headers": [],
"http-resp-size": 1024000,
"http-resp-content-type": "image/png"
},
{
"active": true,
"name": "png2bmp1",
"path": "C-Image-Manip_wasm.so",
"port": 10001,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 1,
"next_modules": ["cifar10_1"],
"http-req-headers": [],
"http-req-content-type": "image/png",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 4096000,
"http-resp-content-type": "image/bmp"
},
{
"active": true,
"name": "lpd_wasm1",
"path": "lpd_wasm.so",
"port": 10002,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 2,
"pre_module_count": 1,
"next_modules": ["work1"],
"http-req-headers": [],
"http-req-content-type": "image/bmp",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 4096000,
"http-resp-content-type": "text/plain"
},
{
"active": true,
"name": "cifar10_1",
"path": "cifar10_wasm.so",
"port": 10003,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 1,
"next_modules": ["work1"],
"http-req-headers": [],
"http-req-content-type": "image/bmp",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain"
},
{
"active": true,
"name": "work1",
"path": "work3_wasm.so",
"port": 10004,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 2,
"next_modules": [],
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain"
},
{
"active": true,
"name": "resize1_2",
"path": "resize_wasm.so",
"port": 10005,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 0,
"next_modules": ["png2bmp1_2", "lpd_wasm1_2"],
"http-req-headers": [],
"http-req-content-type": "image/jpeg",
"http-req-size": 1024000,
"http-resp-headers": [],
"http-resp-size": 1024000,
"http-resp-content-type": "image/png"
},
{
"active": true,
"name": "png2bmp1_2",
"path": "C-Image-Manip_wasm.so",
"port": 10006,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 1,
"next_modules": ["cifar10_2"],
"http-req-headers": [],
"http-req-content-type": "image/png",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 4096000,
"http-resp-content-type": "image/bmp"
},
{
"active": true,
"name": "lpd_wasm1_2",
"path": "lpd_wasm.so",
"port": 10007,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 2,
"pre_module_count": 1,
"next_modules": ["work1_2"],
"http-req-headers": [],
"http-req-content-type": "image/bmp",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 4096000,
"http-resp-content-type": "text/plain",
},
{
"active": true,
"name": "cifar10_2",
"path": "cifar10_wasm.so",
"port": 10008,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 1,
"next_modules": ["work1_2"],
"http-req-headers": [],
"http-req-content-type": "image/bmp",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain"
},
{
"active": true,
"name": "work1_2",
"path": "work3_wasm.so",
"port": 10009,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 2,
"next_modules": [],
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain"
},{
"active": true,
"name": "resize1_3",
"path": "resize_wasm.so",
"port": 10010,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 0,
"next_modules": ["png2bmp1_3", "lpd_wasm1_3"],
"http-req-headers": [],
"http-req-content-type": "image/jpeg",
"http-req-size": 1024000,
"http-resp-headers": [],
"http-resp-size": 1024000,
"http-resp-content-type": "image/png"
},
{
"active": true,
"name": "png2bmp1_3",
"path": "C-Image-Manip_wasm.so",
"port": 10011,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 1,
"next_modules": ["cifar10_3"],
"http-req-headers": [],
"http-req-content-type": "image/png",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 4096000,
"http-resp-content-type": "image/bmp"
},
{
"active": true,
"name": "lpd_wasm1_3",
"path": "lpd_wasm.so",
"port": 10012,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 2,
"pre_module_count": 1,
"next_modules": ["work1_3"],
"http-req-headers": [],
"http-req-content-type": "image/bmp",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 4096000,
"http-resp-content-type": "text/plain",
},
{
"active": true,
"name": "cifar10_3",
"path": "cifar10_wasm.so",
"port": 10013,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 1,
"next_modules": ["work1_3"],
"http-req-headers": [],
"http-req-content-type": "image/bmp",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain",
},
{
"active": true,
"name": "work1_3",
"path": "work3_wasm.so",
"port": 10014,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 2,
"next_modules": [],
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain",
},
{
"active": true,
"name": "resize1_4",
"path": "resize_wasm.so",
"port": 10015,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 0,
"next_modules": ["png2bmp1_4", "lpd_wasm1_4"],
"http-req-headers": [],
"http-req-content-type": "image/jpeg",
"http-req-size": 1024000,
"http-resp-headers": [],
"http-resp-size": 1024000,
"http-resp-content-type": "image/png"
},
{
"active": true,
"name": "png2bmp1_4",
"path": "C-Image-Manip_wasm.so",
"port": 10016,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 1,
"next_modules": ["cifar10_4"],
"http-req-headers": [],
"http-req-content-type": "image/png",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 4096000,
"http-resp-content-type": "image/bmp"
},
{
"active": true,
"name": "lpd_wasm1_4",
"path": "lpd_wasm.so",
"port": 10017,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 2,
"pre_module_count": 1,
"next_modules": ["work1_4"],
"http-req-headers": [],
"http-req-content-type": "image/bmp",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 4096000,
"http-resp-content-type": "text/plain",
},
{
"active": true,
"name": "cifar10_4",
"path": "cifar10_wasm.so",
"port": 10018,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 1,
"next_modules": ["work1_4"],
"http-req-headers": [],
"http-req-content-type": "image/bmp",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain",
},
{
"active": true,
"name": "work1_4",
"path": "work3_wasm.so",
"port": 10019,
"relative-deadline-us": 16346,
"argsize": 1,
"priority": 1,
"pre_module_count": 2,
"next_modules": [],
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 4096000,
"http-resp-headers": [],
"http-resp-size": 1024,
"http-resp-content-type": "text/plain",
}

@ -0,0 +1,17 @@
{
"active": true,
"name": "noop1",
"path": "noop_wasm.so",
"port": 10000,
"relative-deadline-us": 0,
"argsize": 0,
"priority": 1,
"pre_module_count": 0,
"next_modules": [],
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 0,
"http-resp-headers": [],
"http-resp-size": 0,
"http-resp-content-type": "text/plain",
}

@ -14,6 +14,8 @@ rps=$3
image=$4 image=$4
port=$5 port=$5
#hey -disable-compression -disable-keepalive -disable-redirects -c 1 -q $rps -z $duration\s -cpus 1 -t 0 -m GET -D "$image" "http://10.10.1.1:$port" echo "hey test"
hey -disable-compression -disable-keepalive -disable-redirects -c 8 -q 50 -z $duration\s -t 0 -m GET -D "$image" "http://10.10.1.1:$port" > $output
hey -disable-compression -disable-keepalive -disable-redirects -c 5 -q $rps -z $duration\s -t 0 -m GET -D "$image" "http://127.0.0.1:$port" > $output
#loadtest -c 5 --rps $rps -t $duration --method GET --data @$image "http://10.16.109.192:$port" > $output
#hey -disable-compression -disable-keepalive -disable-redirects -c 8 -q 50 -z $duration\s -t 0 -m GET -D "$image" "http://10.10.1.1:$port" > $output

@ -1,14 +0,0 @@
{
"active": true,
"name": "work1k",
"path": "work1k_wasm.so",
"port": 10000,
"relative-deadline-us": 50000,
"argsize": 1,
"http-req-headers": [],
"http-req-content-type": "text/plain",
"http-req-size": 1200,
"http-resp-headers": [],
"http-resp-size": 1200,
"http-resp-content-type": "text/plain"
}

@ -0,0 +1,99 @@
#include "dag_data_split.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
struct DataNode {
uint32_t dataLength;
char *data;
struct DataNode *next;
};
DataNode* splitData(char *buffer, uint32_t bufferSize) {
DataNode *head = NULL;
DataNode *tail = NULL;
uint32_t offset = 0;
while (offset < bufferSize) {
if (offset + 4 > bufferSize) {
break;
}
uint32_t dataLength = *(uint32_t *)(buffer + offset);
offset += 4;
if (offset + dataLength > bufferSize) {
break;
}
DataNode *newNode = (DataNode *)malloc(sizeof(DataNode));
if (newNode == NULL) {
perror("Memory allocation failed");
freeDataNodes(head);
return NULL;
}
newNode->data = (char *)malloc(dataLength);
if (newNode->data == NULL) {
free(newNode);
perror("Memory allocation failed");
freeDataNodes(head);
return NULL;
}
memcpy(newNode->data, buffer + offset, dataLength);
newNode->dataLength = dataLength;
newNode->next = NULL;
if (head == NULL) {
head = newNode;
} else {
tail->next = newNode;
}
tail = newNode;
offset += dataLength;
}
return head;
}
void freeDataNodes(DataNode *head) {
while (head != NULL) {
DataNode *next = head->next;
free(head->data);
free(head);
head = next;
}
}
void printDataList(DataNode *head) {
int index = 0;
DataNode *current = head;
while (current != NULL) {
printf("Data %d: Length = %u\n", index, current->dataLength);
index++;
current = current->next;
}
}
int getDataNodeCount(DataNode *head) {
int count = 0;
DataNode *current = head;
while (current != NULL) {
count++;
current = current->next;
}
return count;
}
const char* getDataNodeByIndex(DataNode *head, int index) {
int count = 1;
DataNode *current = head;
while (current != NULL) {
if (count == index) {
return current->data;
}
count++;
current = current->next;
}
return NULL;
}

@ -0,0 +1,17 @@
#pragma once
#include <stdint.h>
typedef struct DataNode DataNode;
DataNode* splitData(char *buffer, uint32_t bufferSize);
void freeDataNodes(DataNode *head);
void printDataList(DataNode *head);
int getDataNodeCount(DataNode *head);
/**
* @param index is form 1 to n
*/
const char* getDataNodeByIndex(DataNode *head, int index);

@ -0,0 +1,48 @@
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include "dag_data_split.h"
#define MAX_BUF (1024 * 1024 * 1) // 1m
int main() {
char *d = malloc(MAX_BUF + 1);
ssize_t bytes_read = read(0, d, MAX_BUF);
if (bytes_read < 0) {
perror("Error reading input");
return 1;
}
DataNode *dataList = splitData(d, bytes_read);
if (dataList == NULL) {
fprintf(stderr, "Failed to split data.\n");
return 1;
}
const char *firstdata = getDataNodeByIndex(dataList, 1); // Assume this is text data
const char *seconddata = getDataNodeByIndex(dataList, 2); // Assume this is image data
if (firstdata == NULL || seconddata == NULL) {
fprintf(stderr, "Not enough data.\n");
freeDataNodes(dataList);
return 1;
}
// Prepare output string for the text data
// char output[1024];
// int len = snprintf(output, sizeof(output), "First data: %s\n", firstdata);
printf("the fistdata %s", firstdata);
// Output the text data
free(d);
// Assuming seconddata contains image data in raw binary form.
// Directly write image data to stdout.
// write(1, seconddata, strlen(seconddata));
freeDataNodes(dataList);
return 0;
}

@ -3,7 +3,7 @@ DIST_PREFIX=${CURR_DIR}/dist/
all: clean build all: clean build
build: ck jsmn http-parser hashmap build: ck jsmn http-parser
ck: ck:
mkdir -p ${DIST_PREFIX} mkdir -p ${DIST_PREFIX}
@ -26,4 +26,4 @@ clean:
make -C ck uninstall make -C ck uninstall
rm -rf ${DIST_PREFIX} rm -rf ${DIST_PREFIX}
.PHONY: clean all build ck jsmn http-parser hashmap .PHONY: clean all build ck jsmn http-parser

File diff suppressed because one or more lines are too long

@ -0,0 +1,28 @@
Runtime Environment:
CPU Speed: 2400 MHz
Processor Speed: 2400 MHz
RLIMIT_DATA: Infinite
RLIMIT_NOFILE: 1048576
Core Count: 8
Listener core ID: 1
First Worker core ID: 2
Worker core count: 6
Scheduler Policy: MDL
Sigalrm Policy: BROADCAST
Preemption: Enabled
Quantum: 5000 us
Sandbox Performance Log: /home/hai/sledge/sledge/runtime/tests/runtime_sandbox_perf_log.log
Starting listener thread
Listener core thread: 7ffff7a006c0
Starting 6 worker thread(s)
C: 01, T: 0x7ffff7bfdd80, F: runtime_start_runtime_worker_threads>
Sandboxing environment ready!
C: 01, T: 0x7ffff7bfdd80, F: module_new>
Stack Size: 524288
C: 01, T: 0x7ffff7bfdd80, F: module_new>
Stack Size: 524288
C: 01, T: 0x7ffff7bfdd80, F: module_new>
Stack Size: 524288
C: 01, T: 0x7ffff7bfdd80, F: module_new>
Stack Size: 524288
Loading…
Cancel
Save