diff --git a/data.txt b/data.txt new file mode 100644 index 0000000..48082f7 --- /dev/null +++ b/data.txt @@ -0,0 +1 @@ +12 diff --git a/hey.txt b/hey.txt index 7bfbcb6..8cf019c 100644 --- a/hey.txt +++ b/hey.txt @@ -1,46 +1,247 @@ Summary: - Total: 10.1989 secs - Slowest: 0.2733 secs - Fastest: 0.0142 secs - Average: 0.2073 secs - Requests/sec: 571.9239 + Total: 10.0240 secs + Slowest: 1.1570 secs + Fastest: 0.0692 secs + Average: 0.7488 secs + Requests/sec: 3390.6720 - Total data: 268318 bytes - Size/request: 46 bytes + Total data: 33168 bytes + Size/request: 48 bytes Response time histogram: - 0.014 [1] | - 0.040 [9] | - 0.066 [15] | - 0.092 [13] | - 0.118 [13] | - 0.144 [14] | - 0.170 [15] | - 0.196 [827] |■■■■■■■■ - 0.222 [3956] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■ - 0.247 [943] |■■■■■■■■■■ - 0.273 [27] | + 0.069 [1] | + 0.178 [16] |■■ + 0.287 [15] |■■ + 0.396 [10] |■ + 0.504 [8] |■ + 0.613 [10] |■ + 0.722 [391] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■ + 0.831 [43] |■■■■ + 0.939 [26] |■■■ + 1.048 [69] |■■■■■■■ + 1.157 [102] |■■■■■■■■■■ Latency distribution: - 10% in 0.1929 secs - 25% in 0.1991 secs - 50% in 0.2068 secs - 75% in 0.2167 secs - 90% in 0.2276 secs - 95% in 0.2334 secs - 99% in 0.2433 secs + 10% in 0.6506 secs + 25% in 0.6649 secs + 50% in 0.6792 secs + 75% in 0.9356 secs + 90% in 1.0743 secs + 95% in 1.0935 secs + 99% in 1.1200 secs Details (average, fastest, slowest): - DNS+dialup: 0.0004 secs, 0.0142 secs, 0.2733 secs + DNS+dialup: 0.0073 secs, 0.0692 secs, 1.1570 secs DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0000 secs - req write: 0.0001 secs, 0.0000 secs, 0.0164 secs - resp wait: 0.2063 secs, 0.0050 secs, 0.2712 secs - resp read: 0.0001 secs, 0.0000 secs, 0.0148 secs + req write: 0.0016 secs, 0.0000 secs, 0.0855 secs + resp wait: 0.7388 secs, 0.0480 secs, 1.1547 secs + resp read: 0.0004 secs, 0.0001 secs, 0.0047 secs Status code distribution: - [200] 5833 responses - + [200] 691 responses +Error distribution: + [5] Post "http://127.0.0.1:10000": EOF + [33093] Post "http://127.0.0.1:10000": dial tcp 127.0.0.1:10000: connect: connection refused + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57822->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57830->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57838->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57840->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57850->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57856->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57864->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57876->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57882->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57894->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57906->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57918->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57920->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57934->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57938->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57948->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57958->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57964->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57978->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:57986->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58000->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58008->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58022->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58036->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58052->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58056->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58068->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58082->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58084->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58098->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58104->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58116->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58120->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58134->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58148->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58154->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58170->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58178->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58186->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58190->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58192->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58194->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58198->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58212->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58220->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58230->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58232->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58242->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58246->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58250->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58266->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58270->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58278->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58288->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58300->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58314->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58326->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58332->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58346->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58356->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58366->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58374->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58382->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58392->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58400->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58414->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58416->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58426->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58438->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58450->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58464->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58478->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58492->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58494->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58498->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58508->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58522->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58532->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58546->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58562->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58576->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58588->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58590->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58592->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58594->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58610->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58618->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58634->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58638->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58642->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58648->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58658->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58672->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58688->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58700->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58714->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58730->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58734->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58738->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58744->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58754->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58768->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58780->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58786->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58802->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58816->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58830->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58840->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58848->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58856->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58864->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58876->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58890->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58892->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58908->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58910->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58922->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58928->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58940->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58942->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58948->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58950->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58958->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58966->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58978->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:58994->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59008->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59012->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59016->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59032->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59034->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59044->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59056->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59068->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59078->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59086->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59096->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59108->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59110->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59112->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59120->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59132->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59134->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59140->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59146->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59148->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59152->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59156->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59172->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59180->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59194->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59202->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59218->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59232->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59248->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59264->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59272->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59284->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59300->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59308->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59322->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59332->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59340->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59344->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59352->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59362->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59364->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59376->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59388->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59390->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59392->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59408->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59414->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59420->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59430->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59438->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59454->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59456->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59458->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59462->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59478->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59484->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59486->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59492->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59498->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59512->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59518->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59522->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59536->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59540->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59556->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59562->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59572->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59578->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59592->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59606->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59614->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59618->127.0.0.1:10000: read: connection reset by peer + [1] Post "http://127.0.0.1:10000": read tcp 127.0.0.1:59622->127.0.0.1:10000: read: connection reset by peer diff --git a/runtime/include/sandbox_request.h b/runtime/include/sandbox_request.h index 5add431..b510284 100644 --- a/runtime/include/sandbox_request.h +++ b/runtime/include/sandbox_request.h @@ -12,7 +12,14 @@ #include "module.h" #include "runtime.h" #include "sandbox_state.h" +#include "lock.h" +struct sandbox_pre_functions_output { + char * previous_function_output; + ssize_t output_length; + uint32_t run_priority; + struct sandbox_pre_functions_output * next; +}; struct sandbox_request { uint64_t id; bool request_from_outside; /* true is yes, false is no */ @@ -25,6 +32,8 @@ struct sandbox_request { uint64_t absolute_deadline; /* cycles */ uint64_t last_update_timestamp; /* cycles */ uint64_t remaining_slack; /* cycles */ + struct sandbox_pre_functions_output *pre_functions_output; + pthread_spinlock_t lock; char * previous_function_output; ssize_t output_length; ssize_t previous_request_length; @@ -96,6 +105,9 @@ sandbox_request_allocate(struct module *module, bool request_from_outside, ssize sandbox_request->last_update_timestamp = enqueue_timestamp; sandbox_request->remaining_slack = remaining_slack; + /*Avoid pointer suspension*/ + sandbox_request->pre_functions_output = NULL; + pthread_spin_init(&sandbox_request->lock, PTHREAD_PROCESS_PRIVATE); /* * Admissions Control State * Assumption: an estimate of 0 should have been interpreted as a rejection @@ -107,3 +119,98 @@ sandbox_request_allocate(struct module *module, bool request_from_outside, ssize return sandbox_request; } + +/** +* Allocate a new node for the list of previous function outputs. Ensures that +* the list remains sorted based on run_priority, which must be less than 6. +* @param head the head of struct sandbox_request->previous_function_output +* @param output the output of the previous function +* @param output_length the length of the output +* @param run_priority the run_priority of the sandbox->module->run_priority +**/ +static inline void +pre_functions_output_request_add(struct sandbox_request *request, char *output, ssize_t output_length, uint32_t run_priority) +{ + assert(run_priority < 6); +// pthread_spin_lock(&request->lock); + if (!output || output_length <= 0) { + debuglog("output is null or output_length is <= 0"); +// goto done; + return; + } + struct sandbox_pre_functions_output **head = &request->pre_functions_output; + struct sandbox_pre_functions_output *new_node = (struct sandbox_pre_functions_output *)malloc(sizeof(struct sandbox_pre_functions_output)); + if (!new_node) { + panic("Could not allocate memory for new node"); + } + + new_node->previous_function_output = (char *)malloc(output_length); + if (!new_node->previous_function_output) { + free(new_node); + panic("Could not allocate memory for output buffer"); + } + + //memcpy(new_node->previous_function_output, output, output_length); + new_node->previous_function_output = output; + new_node->output_length = output_length; + new_node->run_priority = run_priority; + new_node->next = NULL; + + if (*head == NULL || (*head)->run_priority >= run_priority) { + new_node->next = *head; + *head = new_node; + } else { + struct sandbox_pre_functions_output *current = *head; + while (current->next && current->next->run_priority < run_priority) { + current = current->next; + } + new_node->next = current->next; + current->next = new_node; + } +//done: +// pthread_spin_unlock(&request->lock); +} + +/** + * The output result of the sandbox in the splicing structure +**/ +static inline void +concatenate_outputs(struct sandbox_request *request) { + size_t total_length = 0; // Calculate total length without extra for null character + struct sandbox_pre_functions_output *current = request->pre_functions_output; + + while (current != NULL) { + total_length += current->output_length; + current = current->next; + } + + char *concatenated_output = (char *)malloc(total_length); + if (!concatenated_output) { + panic("Could not allocate memory for concatenated output"); + return; + } + + char *copy_dest = concatenated_output; + current = request->pre_functions_output; + + while (current != NULL) { + size_t copy_length = current->output_length; + if (current->next) { + memcpy(copy_dest, current->previous_function_output, copy_length - 1); + copy_dest[copy_length - 1] = '&'; + copy_dest += copy_length; + } else { + memcpy(copy_dest, current->previous_function_output, copy_length); + copy_dest += copy_length; + } + current = current->next; + } + + + if (request->previous_function_output != NULL) { + free(request->previous_function_output); + } + request->output_length = total_length; + request->previous_function_output = concatenated_output; +} + diff --git a/runtime/include/sandbox_set_as_initialized.h b/runtime/include/sandbox_set_as_initialized.h index 59169b1..f3a6f6a 100644 --- a/runtime/include/sandbox_set_as_initialized.h +++ b/runtime/include/sandbox_set_as_initialized.h @@ -35,6 +35,7 @@ sandbox_set_as_initialized(struct sandbox *sandbox, struct sandbox_request *sand sandbox->state = SANDBOX_SET_AS_INITIALIZED; sandbox->request_from_outside = sandbox_request->request_from_outside; + concatenate_outputs(sandbox_request); sandbox->previous_function_output = sandbox_request->previous_function_output; sandbox->output_length = sandbox_request->output_length; sandbox->previous_request_length = sandbox_request->previous_request_length; diff --git a/runtime/src/current_sandbox.c b/runtime/src/current_sandbox.c index 056bd45..052708b 100644 --- a/runtime/src/current_sandbox.c +++ b/runtime/src/current_sandbox.c @@ -176,11 +176,12 @@ current_sandbox_start(void) next_module_node->name, sandbox->client_socket_descriptor, (const struct sockaddr *)&sandbox->client_address, sandbox->request_arrival_timestamp, enqueue_timestamp, - sandbox->remaining_slack, true, individual_pre_func_output, output_length); - /* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate() - * will busy-wait to generate an unique id, should we optimize it here? - */ - sandbox_request->id = sandbox->id; + sandbox->remaining_slack, true, NULL, 0); + /* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate() + * will busy-wait to generate an unique id, should we optimize it here? + */ + sandbox_request->id = sandbox->id; + pre_functions_output_request_add(sandbox_request, individual_pre_func_output, output_length, sandbox->module->run_priority); #ifdef OPT_AVOID_GLOBAL_QUEUE /* TODO: The running time of the current sandbox contains the next sandbox's initialization time, does it matter? */ if (sandbox->absolute_deadline == sandbox_request->absolute_deadline) { @@ -198,8 +199,8 @@ current_sandbox_start(void) global_request_scheduler_add(sandbox_request); } #else - /* Add to the Global Sandbox Request Scheduler */ - global_request_scheduler_add(sandbox_request); + /* Add to the Global Sandbox Request Scheduler */ + global_request_scheduler_add(sandbox_request); } #endif /* Remove the client fd from epoll if it is the first sandbox in the chain */ @@ -227,6 +228,7 @@ current_sandbox_start(void) assert(cur_request_id); snprintf(cur_request_id, key_len, "%s%lu", next_module_node->name, sandbox->id); uint32_t ret_value_len; + uint32_t rest_pre_count = 888; /*calculation the pre_function_out*/ ssize_t output_length = sandbox->request_response_data_length - sandbox->request_length; char * pre_func_output = (char *)malloc(output_length); @@ -237,8 +239,9 @@ current_sandbox_start(void) memcpy(pre_func_output, sandbox->request_response_data + sandbox->request_length, output_length); //debuglog("the ID %lu %s pre_func_output is %s\n", sandbox->id, sandbox->module->name, pre_func_output); LOCK_LOCK(&lock); - uint64_t *requet_id = (uint64_t *)map_get(sandbox_request_id, cur_request_id, strlen(cur_request_id), &ret_value_len); - if (!requet_id) { + uint64_t *request_id = (uint64_t *)map_get(sandbox_request_id, cur_request_id, strlen(cur_request_id), &ret_value_len); + bool mapflag = false; + if (!request_id) { uint64_t enqueue_timestamp = __getcycles(); //mem_log("time %lu request id:%d executing, name:%s remaining slack %lu\n", current_rs, // sandbox->id, sandbox->module->name, sandbox->remaining_slack); @@ -247,7 +250,7 @@ current_sandbox_start(void) next_module_node->name, sandbox->client_socket_descriptor, (const struct sockaddr *)&sandbox->client_address, sandbox->request_arrival_timestamp, enqueue_timestamp, - sandbox->remaining_slack, true, pre_func_output, output_length); + sandbox->remaining_slack, true, NULL, 0); /* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate() * will busy-wait to generate an unique id, should we optimize it here? */ @@ -258,85 +261,38 @@ current_sandbox_start(void) assert(module_pre_count); map_set(sandbox_request_id, cur_request_id, strlen(cur_request_id), &module_pre_count, sizeof(uint32_t), true); map_set(sandbox_req_map, cur_request_id, strlen(cur_request_id), sandbox_request, sizeof(struct sandbox_request *), false); - free(cur_request_id); - cur_request_id = NULL; + mapflag = true; } - else + LOCK_UNLOCK(&lock); + struct sandbox_request *sandbox_request = map_get(sandbox_req_map, cur_request_id, strlen(cur_request_id), &ret_value_len); + if(!sandbox_request) panic("the map of sandbox_request is NULL\n"); + if (mapflag) { - uint32_t rest_pre_count = *requet_id; - assert(rest_pre_count >= 1); - struct sandbox_request *sandbox_request = map_get(sandbox_req_map, cur_request_id, strlen(cur_request_id), &ret_value_len); - assert(sandbox_request); - - /*ssize_t new_output_length = sandbox_request->output_length + output_length + 1; - char *new_output = (char *)malloc(new_output_length); - if (!new_output) { - fprintf(stderr, "Failed to allocate memory for the new output: %s\n", strerror(errno)); - goto err; - } - memset(new_output, 0, new_output_length); - if (sandbox->module->run_priority == 1) - { - snprintf(new_output, new_output_length, "%s&%s", sandbox_request->previous_function_output, pre_func_output); - - }else{ - snprintf(new_output, new_output_length, "%s&%s", pre_func_output, sandbox_request->previous_function_output); - } - if(sandbox_request->previous_function_output != NULL) - { - free(sandbox_request->previous_function_output); - sandbox_request->previous_function_output = NULL; + pre_functions_output_request_add(sandbox_request, pre_func_output, output_length, sandbox->module->run_priority); + }else{ + pthread_spin_lock(&sandbox_request->lock); + pre_functions_output_request_add(sandbox_request, pre_func_output, output_length, sandbox->module->run_priority); + if (!request_id) { + panic("Request ID not found or invalid\n"); + }else { + rest_pre_count = *request_id; } - assert(new_output); - sandbox_request->previous_function_output = new_output; - debuglog("the ID %lu %s the merge_output is %s\n", sandbox->id, sandbox->module->name, new_output); - free(pre_func_output); - pre_func_output = NULL; - sandbox_request->output_length = new_output_length;*/ - ssize_t new_output_length = sandbox_request->output_length + output_length; - char *new_output = (char *)malloc(new_output_length); - if (!new_output) { - fprintf(stderr, "Failed to allocate memory for the new output: %s\n", strerror(errno)); - goto err; - } - memset(new_output, 0, new_output_length); - if (sandbox->module->run_priority == 1) - { - memcpy(new_output, pre_func_output, output_length - 1); - new_output[output_length - 1] = '&'; - memcpy(new_output + output_length, sandbox_request->previous_function_output, sandbox_request->output_length); - }else{ - memcpy(new_output, sandbox_request->previous_function_output, sandbox_request->output_length-1); - new_output[sandbox_request->output_length-1] = '&'; - memcpy(new_output + sandbox_request->output_length + 1, pre_func_output, output_length); - } - if(sandbox_request->previous_function_output != NULL) - { - free(sandbox_request->previous_function_output); - sandbox_request->previous_function_output = NULL; - } - //debuglog("the ID %lu %s the merge_output is %s\n", sandbox->id, sandbox->module->name, new_output); - sandbox_request->previous_function_output = new_output; - sandbox_request->output_length = new_output_length; - free(pre_func_output); - pre_func_output = NULL; - + if(rest_pre_count == 888) panic("the rest_pre_count is not get requst_id\n"); rest_pre_count--; if (rest_pre_count != 0) { - map_upsert(sandbox_request_id, cur_request_id, strlen(cur_request_id), &rest_pre_count, sizeof(uint32_t)); - }else - { + map_upsert(sandbox_request_id, cur_request_id, strlen(cur_request_id), &rest_pre_count, sizeof(uint32_t)); + }else{ uint64_t enqueue_timestamp = __getcycles(); sandbox_request->enqueue_timestamp = enqueue_timestamp; global_request_scheduler_add(sandbox_request); map_delete(sandbox_req_map, cur_request_id, strlen(cur_request_id)); map_delete(sandbox_request_id, cur_request_id, strlen(cur_request_id)); } - free(cur_request_id); - cur_request_id = NULL; + pthread_spin_unlock(&sandbox_request->lock); } - LOCK_UNLOCK(&lock); + free(cur_request_id); + cur_request_id = NULL; if (sandbox->request_from_outside) { sandbox_remove_from_epoll(sandbox); } diff --git a/runtime/src/listener_thread.c b/runtime/src/listener_thread.c index 1187557..305ed37 100644 --- a/runtime/src/listener_thread.c +++ b/runtime/src/listener_thread.c @@ -189,7 +189,6 @@ listener_thread_main(void *dummy) { struct module *current_module = queue[front++]; estimated_execution_time += admission_info_get_percentile(¤t_module->admissions_info); - debuglog("Estimated execution time for module %s is %lu\n", current_module->name, estimated_execution_time); for (int i = 0; i < current_module->next_module_count; i++) { if (current_module->next_module[i] != NULL && !current_module->next_module[i]->runtime_visited) { diff --git a/runtime/src/module.c b/runtime/src/module.c index 321baed..da8388a 100644 --- a/runtime/src/module.c +++ b/runtime/src/module.c @@ -602,17 +602,20 @@ module_new_from_json(char *file_name) relative_deadline_us, port, request_size, response_size, admissions_percentile, expected_execution_us); if (module == NULL) goto module_new_err; - module->next_module_names = malloc(next_module_count * sizeof(struct module*)); - for (int i = 0; i < next_module_count; i++) { - module->next_module_names[i] = strdup(next_module_names[i]); - if (module->next_module_names[i] == NULL) { - fprintf(stderr, "Memory allocation failed for next_module_names[%d].\n", i); - exit(EXIT_FAILURE); - } - } - if (module->next_module_names == NULL) + if(next_module_count == 0) + { + module->next_module_names = NULL; + + }else { - panic("Failed to allocate memory for next_module_names"); + module->next_module_names = malloc(next_module_count * sizeof(char*)); + if (module->next_module_names == NULL) panic("Failed to allocate memory for next_module_names"); + for (int i = 0; i < next_module_count; i++) { + module->next_module_names[i] = strdup(next_module_names[i]); + if (module->next_module_names[i] == NULL) { + panic("Memory allocation failed for next_module_names[%d].\n", i); + } + } } module->next_module_count = next_module_count; @@ -671,6 +674,14 @@ module_new_from_json(char *file_name) } } } + /*Avoid module memory copy overhead*/ + for(int i = 0; i < module_count; i++) { + for (int j = 0; j < nodes[i]->next_module_count; j++) + { + free(nodes[i]->next_module_names[j]); + } + nodes[i]->next_module_names = NULL; + } free(nodes); nodes = NULL; #ifdef LOG_MODULE_LOADING diff --git a/runtime/src/sandbox.c b/runtime/src/sandbox.c index 9769d71..ecd9e7a 100644 --- a/runtime/src/sandbox.c +++ b/runtime/src/sandbox.c @@ -152,6 +152,16 @@ sandbox_allocate(struct sandbox_request *sandbox_request) /* Set state to initializing */ sandbox_set_as_initialized(sandbox, sandbox_request, now); + struct sandbox_pre_functions_output *current = sandbox_request->pre_functions_output; + struct sandbox_pre_functions_output *next = NULL; + while (current) { + next = current->next; + free(current->previous_function_output); + free(current); + current = next; + } + sandbox_request->pre_functions_output = NULL; + pthread_spin_destroy(&sandbox_request->lock); free(sandbox_request); done: return sandbox; diff --git a/sledge.log b/sledge.log deleted file mode 100755 index 3670221..0000000 --- a/sledge.log +++ /dev/null @@ -1,44 +0,0 @@ -Runtime Environment: - CPU Speed: 2400 MHz - Processor Speed: 2400 MHz - RLIMIT_DATA: Infinite - RLIMIT_NOFILE: 1048576 (Increased from 8192) - Core Count: 8 - Listener core ID: 1 - First Worker core ID: 2 - Worker core count: 6 - Scheduler Policy: EDF - Sigalrm Policy: BROADCAST - Preemption: Enabled - Quantum: 5000 us - Sandbox Performance Log: /home/hai/sledge-old/runtime_sandbox_perf_log.log -Starting listener thread - Listener core thread: 7ffff7a006c0 -Starting 6 worker thread(s) -C: 01, T: 0x7ffff7bfdd80, F: runtime_start_runtime_worker_threads> - Sandboxing environment ready! - -C: 01, T: 0x7ffff7bfdd80, F: module_new> - Stack Size: 524288 -C: 01, T: 0x7ffff7bfdd80, F: module_new> - Stack Size: 524288 -C: 01, T: 0x7ffff7bfdd80, F: module_new> - Stack Size: 524288 -C: 01, T: 0x7ffff7bfdd80, F: module_new> - Stack Size: 524288 -C: 03, T: 0x7ffff66006c0, F: current_sandbox_start> - the ID 0 work2 pre_func_output is 10 - - -C: 02, T: 0x7ffff70006c0, F: current_sandbox_start> - the ID 0 work3 pre_func_output is 10 - - -C: 02, T: 0x7ffff70006c0, F: current_sandbox_start> - the ID (the output need to be merged) 0 work3 pre_func_output is 10 - - -C: 02, T: 0x7ffff70006c0, F: current_sandbox_start> - the ID 0 work3 the merge_output is 10&10 - -