Compare commits

...

5 Commits

@ -10,7 +10,6 @@ if [[ $ARCH = "x86_64" ]]; then
elif [[ $ARCH = "aarch64" ]]; then
SHFMT_URL=https://github.com/patrickvane/shfmt/releases/download/master/shfmt_linux_arm
echo "ARM64 support is still a work in progress!"
exit 1
else
echo "This script only supports x86_64 and aarch64"
exit 1
@ -64,8 +63,6 @@ wget $SHFMT_URL -O shfmt && chmod +x shfmt && sudo mv shfmt /usr/local/bin/shfmt
sudo ./install_llvm.sh $LLVM_VERSION
curl -sS -L -O $WASI_SDK_URL && sudo dpkg -i wasi-sdk_12.0_amd64.deb && rm -f wasi-sdk_12.0_amd64.deb
if [ -z "${WASI_SDK_PATH}" ]; then
export WASI_SDK_PATH=/opt/wasi-sdk
echo "export WASI_SDK_PATH=/opt/wasi-sdk" >> ~/.bashrc

@ -94,7 +94,7 @@ BINARY_NAME=sledgert
# This flag tracks the total number of sandboxes in the various states
# It is useful to debug if sandboxes are "getting caught" in a particular state
# CFLAGS += -DSANDBOX_STATE_TOTALS
CFLAGS += -DSANDBOX_STATE_TOTALS
# This flag enables an per-worker atomic count of sandbox's local runqueue count in thread local storage
# Useful to debug if sandboxes are "getting caught" or "leaking" while in a local runqueue

@ -4,7 +4,6 @@
#include <assert.h>
#include "arch/common.h"
#include "current_sandbox.h"
#define ARCH_SIG_JMP_OFF 0x100 /* Based on code generated! */
@ -47,24 +46,20 @@ arch_context_switch(struct arch_context *a, struct arch_context *b)
* Assumption: In the case of a slow context switch, the caller
* set current_sandbox to the sandbox containing the target context
*/
if (b->variant == ARCH_CONTEXT_VARIANT_SLOW) {
/*if (b->variant == ARCH_CONTEXT_VARIANT_SLOW) {
struct sandbox *current = current_sandbox_get();
assert(current != NULL && b == &current->ctxt);
}
}*/
#endif
/* if both a and b are NULL, there is no state change */
assert(a != NULL || b != NULL);
assert(a != NULL && b != NULL);
/* Assumption: The caller does not switch to itself */
assert(a != b);
/* Set any NULLs to worker_thread_base_context to resume execution of main */
if (a == NULL) a = &worker_thread_base_context;
if (b == NULL) b = &worker_thread_base_context;
/* A Transition {Unused, Running} -> Fast */
assert(a->variant == ARCH_CONTEXT_VARIANT_UNUSED || a->variant == ARCH_CONTEXT_VARIANT_RUNNING);
assert(a->variant == ARCH_CONTEXT_VARIANT_RUNNING);
/* B Transition {Fast, Slow} -> Running */
assert(b->variant == ARCH_CONTEXT_VARIANT_FAST || b->variant == ARCH_CONTEXT_VARIANT_SLOW);
@ -87,6 +82,8 @@ arch_context_switch(struct arch_context *a, struct arch_context *b)
"ldr x1, [%[bv]]\n\t"
"sub x1, x1, #2\n\t"
"cbz x1, slow%=\n\t"
"mov x3, #3\n\t"
"str x3, [%[bv]]\n\t" /* b->variant = ARCH_CONTEXT_VARIANT_RUNNING; */
"ldr x0, [%[b]]\n\t"
"ldr x1, [%[b], 8]\n\t"
"mov sp, x0\n\t"
@ -95,8 +92,6 @@ arch_context_switch(struct arch_context *a, struct arch_context *b)
"br %[slowpath]\n\t"
".align 8\n\t"
"reset%=:\n\t"
"mov x1, #3\n\t"
"str x1, [%[bv]]\n\t"
".align 8\n\t"
"exit%=:\n\t"
:
@ -109,6 +104,37 @@ arch_context_switch(struct arch_context *a, struct arch_context *b)
return 0;
}
/**
* Load a new sandbox that preempted an existing sandbox, restoring only the
* instruction pointer and stack pointer registers.
* @param active_context - the context of the current worker thread
* @param sandbox_context - the context that we want to restore
*/
static inline void
arch_context_restore_fast(mcontext_t *active_context, struct arch_context *sandbox_context)
{
assert(active_context != NULL);
assert(sandbox_context != NULL);
/* Assumption: Base Context is only ever used by arch_context_switch */
assert(sandbox_context != &worker_thread_base_context);
assert(sandbox_context->regs[UREG_SP]);
assert(sandbox_context->regs[UREG_IP]);
/* Transitioning from Fast -> Running */
assert(sandbox_context->variant == ARCH_CONTEXT_VARIANT_FAST);
sandbox_context->variant = ARCH_CONTEXT_VARIANT_RUNNING;
//active_context->gregs[REG_RSP] = sandbox_context->regs[UREG_SP];
//active_context->gregs[REG_RIP] = sandbox_context->regs[UREG_IP];
active_context->sp = sandbox_context->regs[UREG_SP];
active_context->pc = sandbox_context->regs[UREG_IP];
}
#else
#warning "Neither AARCH64 nor aarch64 was defined, but aarch64/context.h was included!"
#endif

@ -29,7 +29,7 @@ static inline void
http_route_total_increment_request(struct http_route_total *rm)
{
#ifdef HTTP_ROUTE_TOTAL_COUNTERS
atomic_fetch_add(&rm->total_requests, 1);
//atomic_fetch_add(&rm->total_requests, 1);
#endif
}
@ -37,12 +37,12 @@ static inline void
http_route_total_increment(struct http_route_total *rm, int status_code)
{
#ifdef HTTP_ROUTE_TOTAL_COUNTERS
if (status_code >= 200 && status_code <= 299) {
/*if (status_code >= 200 && status_code <= 299) {
atomic_fetch_add(&rm->total_2XX, 1);
} else if (status_code >= 400 && status_code <= 499) {
atomic_fetch_add(&rm->total_4XX, 1);
} else if (status_code >= 500 && status_code <= 599) {
atomic_fetch_add(&rm->total_5XX, 1);
}
}*/
#endif
}

@ -197,8 +197,6 @@ http_session_set_response_header(struct http_session *session, int status_code)
rc = fprintf(session->response_header.handle, HTTP_RESPONSE_CONTENT_LENGTH,
session->response_body.size);
assert(rc > 0);
rc = fputs(HTTP_RESPONSE_200_OK, session->response_header.handle);
assert(rc != EOF);
}
rc = fputs(HTTP_RESPONSE_TERMINATOR, session->response_header.handle);

@ -39,12 +39,12 @@ static inline void
http_total_increment_response(int status_code)
{
#ifdef HTTP_TOTAL_COUNTERS
if (status_code >= 200 && status_code <= 299) {
/*if (status_code >= 200 && status_code <= 299) {
atomic_fetch_add(&http_total_2XX, 1);
} else if (status_code >= 400 && status_code <= 499) {
atomic_fetch_add(&http_total_4XX, 1);
} else if (status_code >= 500 && status_code <= 599) {
atomic_fetch_add(&http_total_5XX, 1);
}
}*/
#endif
}

@ -8,6 +8,9 @@
#include "runtime.h"
extern uint64_t total_held[1024];
extern uint64_t longest_held[1024];
extern thread_local int thread_id;
/* A linked list of nodes */
struct lock_wrapper {
uint64_t longest_held;
@ -73,9 +76,14 @@ lock_unlock(lock_t *self, lock_node_t *node)
ck_spinlock_mcs_unlock(&self->lock, &node->node);
uint64_t now = __getcycles();
assert(node->time_locked < now);
assert(node->time_locked <= now);
uint64_t duration = now - node->time_locked;
node->time_locked = 0;
if (unlikely(duration > self->longest_held)) { self->longest_held = duration; }
self->total_held += duration;
if (unlikely(duration > longest_held[thread_id])) {
longest_held[thread_id] = duration;
}
total_held[thread_id] += duration;
}

@ -60,7 +60,7 @@ static inline void
module_acquire(struct module *module)
{
assert(module->reference_count < UINT32_MAX);
atomic_fetch_add(&module->reference_count, 1);
//atomic_fetch_add(&module->reference_count, 1);
return;
}
@ -158,8 +158,8 @@ module_entrypoint(struct module *module)
static inline void
module_release(struct module *module)
{
assert(module->reference_count > 0);
atomic_fetch_sub(&module->reference_count, 1);
//assert(module->reference_count > 0);
//atomic_fetch_sub(&module->reference_count, 1);
return;
}

@ -55,6 +55,6 @@ static inline void
sandbox_process_scheduler_updates(struct sandbox *sandbox)
{
if (tenant_is_paid(sandbox->tenant)) {
atomic_fetch_sub(&sandbox->tenant->remaining_budget, sandbox->last_state_duration);
//atomic_fetch_sub(&sandbox->tenant->remaining_budget, sandbox->last_state_duration);
}
}

@ -48,6 +48,7 @@ sandbox_perf_log_print_entry(struct sandbox *sandbox)
runtime_processor_speed_MHz);
}
static inline void
sandbox_perf_log_init()
{

@ -34,7 +34,7 @@ sandbox_set_as_initialized(struct sandbox *sandbox, sandbox_state_t last_state)
}
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
assert(now >= sandbox->timestamp_of.last_state_change);
sandbox->last_state_duration = now - sandbox->timestamp_of.last_state_change;
sandbox->duration_of_state[last_state] += sandbox->last_state_duration;
sandbox->timestamp_of.last_state_change = now;

@ -43,7 +43,7 @@ sandbox_set_as_returned(struct sandbox *sandbox, sandbox_state_t last_state)
}
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
assert(now >= sandbox->timestamp_of.last_state_change);
sandbox->last_state_duration = now - sandbox->timestamp_of.last_state_change;
sandbox->duration_of_state[last_state] += sandbox->last_state_duration;
sandbox->timestamp_of.last_state_change = now;

@ -39,7 +39,7 @@ sandbox_set_as_running_sys(struct sandbox *sandbox, sandbox_state_t last_state)
}
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
assert(now >= sandbox->timestamp_of.last_state_change);
sandbox->last_state_duration = now - sandbox->timestamp_of.last_state_change;
sandbox->duration_of_state[last_state] += sandbox->last_state_duration;
sandbox->timestamp_of.last_state_change = now;

@ -35,7 +35,7 @@ sandbox_set_as_running_user(struct sandbox *sandbox, sandbox_state_t last_state)
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
assert(now >= sandbox->timestamp_of.last_state_change);
sandbox->last_state_duration = now - sandbox->timestamp_of.last_state_change;
sandbox->duration_of_state[last_state] += sandbox->last_state_duration;
sandbox->timestamp_of.last_state_change = now;

@ -48,7 +48,7 @@ static inline void
sandbox_state_totals_increment(sandbox_state_t state)
{
#ifdef SANDBOX_STATE_TOTALS
atomic_fetch_add(&sandbox_state_totals[state], 1);
//atomic_fetch_add(&sandbox_state_totals[state], 1);
#endif
}
@ -56,7 +56,7 @@ static inline void
sandbox_state_totals_decrement(sandbox_state_t state)
{
#ifdef SANDBOX_STATE_TOTALS
if (atomic_load(&sandbox_state_totals[state]) == 0) panic("Underflow of %s\n", sandbox_state_stringify(state));
atomic_fetch_sub(&sandbox_state_totals[state], 1);
//if (atomic_load(&sandbox_state_totals[state]) == 0) panic("Underflow of %s\n", sandbox_state_stringify(state));
//atomic_fetch_sub(&sandbox_state_totals[state], 1);
#endif
}

@ -15,5 +15,6 @@ sandbox_total_initialize()
static inline uint64_t
sandbox_total_postfix_increment()
{
return atomic_fetch_add(&sandbox_total, 1) + 1;
//return atomic_fetch_add(&sandbox_total, 1) + 1;
return 1;
}

@ -31,7 +31,7 @@ software_interrupt_mask_signal(int signal)
sigset_t set;
int return_code;
assert(signal == SIGALRM || signal == SIGUSR1 || signal == SIGFPE || signal == SIGSEGV);
assert(signal == SIGALRM || signal == SIGUSR1 || signal == SIGFPE || signal == SIGSEGV || signal == SIGINT);
/* all threads created by the calling thread will have signal blocked */
sigemptyset(&set);
sigaddset(&set, signal);
@ -55,7 +55,7 @@ software_interrupt_unmask_signal(int signal)
sigset_t set;
int return_code;
assert(signal == SIGALRM || signal == SIGUSR1 || signal == SIGFPE || signal == SIGSEGV);
assert(signal == SIGALRM || signal == SIGUSR1 || signal == SIGFPE || signal == SIGSEGV || signal == SIGINT);
/* all threads created by the calling thread will have signal unblocked */
sigemptyset(&set);
sigaddset(&set, signal);

@ -56,7 +56,7 @@ static inline void
software_interrupt_counts_deferred_sigalrm_replay_increment()
{
#ifdef LOG_SOFTWARE_INTERRUPT_COUNTS
atomic_fetch_add(&software_interrupt_counts_deferred_sigalrm_replay[worker_thread_idx], 1);
//atomic_fetch_add(&software_interrupt_counts_deferred_sigalrm_replay[worker_thread_idx], 1);
#endif
}
@ -64,7 +64,7 @@ static inline void
software_interrupt_counts_sigalrm_kernel_increment()
{
#ifdef LOG_SOFTWARE_INTERRUPT_COUNTS
atomic_fetch_add(&software_interrupt_counts_sigalrm_kernel[worker_thread_idx], 1);
//atomic_fetch_add(&software_interrupt_counts_sigalrm_kernel[worker_thread_idx], 1);
#endif
}
@ -72,7 +72,7 @@ static inline void
software_interrupt_counts_sigalrm_thread_increment()
{
#ifdef LOG_SOFTWARE_INTERRUPT_COUNTS
atomic_fetch_add(&software_interrupt_counts_sigalrm_thread[worker_thread_idx], 1);
//atomic_fetch_add(&software_interrupt_counts_sigalrm_thread[worker_thread_idx], 1);
#endif
}
@ -80,7 +80,7 @@ static inline void
software_interrupt_counts_sigfpe_increment()
{
#ifdef LOG_SOFTWARE_INTERRUPT_COUNTS
atomic_fetch_add(&software_interrupt_counts_sigfpe[worker_thread_idx], 1);
//atomic_fetch_add(&software_interrupt_counts_sigfpe[worker_thread_idx], 1);
#endif
}
@ -88,7 +88,7 @@ static inline void
software_interrupt_counts_sigsegv_increment()
{
#ifdef LOG_SOFTWARE_INTERRUPT_COUNTS
atomic_fetch_add(&software_interrupt_counts_sigsegv[worker_thread_idx], 1);
//atomic_fetch_add(&software_interrupt_counts_sigsegv[worker_thread_idx], 1);
#endif
}
@ -96,7 +96,7 @@ static inline void
software_interrupt_counts_sigusr_increment()
{
#ifdef LOG_SOFTWARE_INTERRUPT_COUNTS
atomic_fetch_add(&software_interrupt_counts_sigusr[worker_thread_idx], 1);
//atomic_fetch_add(&software_interrupt_counts_sigusr[worker_thread_idx], 1);
#endif
}

@ -40,7 +40,7 @@ admissions_control_add(uint64_t admissions_estimate)
{
#ifdef ADMISSIONS_CONTROL
assert(admissions_estimate > 0);
atomic_fetch_add(&admissions_control_admitted, admissions_estimate);
//atomic_fetch_add(&admissions_control_admitted, admissions_estimate);
#ifdef LOG_ADMISSIONS_CONTROL
debuglog("Runtime Admitted: %lu / %lu\n", admissions_control_admitted, admissions_control_capacity);

@ -13,6 +13,9 @@
#include "tenant_functions.h"
#include "http_session_perf_log.h"
extern thread_local int thread_id;
extern bool first_request_comming;
time_t t_start;
static void listener_thread_unregister_http_session(struct http_session *http);
static void panic_on_epoll_error(struct epoll_event *evt);
@ -191,6 +194,11 @@ on_client_request_arrival(int client_socket, const struct sockaddr *client_addre
static void
on_client_request_receiving(struct http_session *session)
{
if (first_request_comming == false){
t_start = time(NULL);
first_request_comming = true;
}
/* Read HTTP request */
int rc = http_session_receive_request(session, (void_star_cb)listener_thread_register_http_session);
if (likely(rc == 0)) {
@ -251,6 +259,8 @@ on_client_request_received(struct http_session *session)
return;
}
//struct timeval t_start,t_end;
//gettimeofday(&t_start, NULL);
/* If the global request scheduler is full, return a 429 to the client */
if (unlikely(global_request_scheduler_add(sandbox) == NULL)) {
debuglog("Failed to add sandbox to global queue\n");
@ -259,6 +269,11 @@ on_client_request_received(struct http_session *session)
http_session_set_response_header(session, 429);
on_client_response_header_sending(session);
}
//gettimeofday(&t_end, NULL);
//long cost = t_end.tv_sec * 1000000 + t_end.tv_usec - t_start.tv_sec * 1000000 - t_start.tv_usec;
//printf("%ld ", cost);
}
static void
@ -401,6 +416,7 @@ on_client_socket_epoll_event(struct epoll_event *evt)
noreturn void *
listener_thread_main(void *dummy)
{
thread_id = 200;
struct epoll_event epoll_events[RUNTIME_MAX_EPOLL_EVENTS];
metrics_server_init();

@ -7,6 +7,7 @@
#include "local_runqueue.h"
static struct local_runqueue_config local_runqueue;
thread_local uint32_t total_local_requests = 0;
#ifdef LOG_LOCAL_RUNQUEUE
thread_local uint32_t local_runqueue_count = 0;
@ -27,6 +28,7 @@ void
local_runqueue_add(struct sandbox *sandbox)
{
assert(local_runqueue.add_fn != NULL);
total_local_requests++;
#ifdef LOG_LOCAL_RUNQUEUE
local_runqueue_count++;
#endif

@ -12,6 +12,10 @@
#include "sandbox_functions.h"
#include "runtime.h"
extern thread_local int thread_id;
uint64_t total_held[1024] = {0};
uint64_t longest_held[1024] = {0};
thread_local static struct priority_queue *local_runqueue_minheap;
/**

@ -42,6 +42,8 @@ bool runtime_preemption_enabled = true;
uint32_t runtime_quantum_us = 5000; /* 5ms */
uint64_t runtime_boot_timestamp;
pid_t runtime_pid = 0;
thread_local int thread_id = -1;
bool first_request_comming = false;
/**
* Returns instructions on use of CLI if used incorrectly
@ -448,7 +450,8 @@ main(int argc, char **argv)
printf("Runtime Environment:\n");
runtime_processor_speed_MHz = runtime_get_processor_speed_MHz();
//runtime_processor_speed_MHz = runtime_get_processor_speed_MHz();
runtime_processor_speed_MHz = 1500;
if (unlikely(runtime_processor_speed_MHz == 0)) panic("Failed to detect processor speed\n");
int heading_length = 30;

@ -9,6 +9,7 @@
#include <threads.h>
#include <unistd.h>
#include <ucontext.h>
#include <inttypes.h>
#include "arch/context.h"
#include "current_sandbox.h"
@ -25,6 +26,9 @@
#include "software_interrupt.h"
#include "software_interrupt_counts.h"
extern uint64_t total_held[1024];
extern uint64_t longest_held[1024];
thread_local _Atomic volatile sig_atomic_t handler_depth = 0;
thread_local _Atomic volatile sig_atomic_t deferred_sigalrm = 0;
@ -33,7 +37,9 @@ thread_local _Atomic volatile sig_atomic_t deferred_sigalrm = 0;
**************************************/
extern pthread_t *runtime_worker_threads;
extern thread_local uint32_t total_local_requests;
extern time_t t_start;
extern thread_local int worker_thread_idx;
/**************************
* Private Static Inlines *
*************************/
@ -75,6 +81,33 @@ propagate_sigalrm(siginfo_t *signal_info)
}
}
/**
* A POSIX signal is delivered to only one thread.
* This function broadcasts the sigint signal to all other worker threads
*/
static inline void
sigint_propagate_workers_listener(siginfo_t *signal_info)
{
/* Signal was sent directly by the kernel user space, so forward to other threads */
if (signal_info->si_code == SI_KERNEL || signal_info->si_code == SI_USER) {
for (int i = 0; i < runtime_worker_threads_count; i++) {
if (pthread_self() == runtime_worker_threads[i]) continue;
/* All threads should have been initialized */
assert(runtime_worker_threads[i] != 0);
pthread_kill(runtime_worker_threads[i], SIGINT);
}
/* send to listener thread */
if (pthread_self() != listener_thread_id) {
pthread_kill(listener_thread_id, SIGINT);
}
} else {
/* Signal forwarded from another thread. Just confirm it resulted from pthread_kill */
assert(signal_info->si_code == SI_TKILL);
}
}
static inline bool
worker_thread_is_running_cooperative_scheduler(void)
{
@ -105,7 +138,7 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
/* Signals should not nest */
assert(handler_depth == 0);
atomic_fetch_add(&handler_depth, 1);
//atomic_fetch_add(&handler_depth, 1);
ucontext_t *interrupted_context = (ucontext_t *)interrupted_context_raw;
struct sandbox *current_sandbox = current_sandbox_get();
@ -139,7 +172,7 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
global_timeout_queue_process_promotions();
}
propagate_sigalrm(signal_info);
atomic_fetch_add(&deferred_sigalrm, 1);
//atomic_fetch_add(&deferred_sigalrm, 1);
}
break;
@ -164,7 +197,7 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
software_interrupt_counts_sigfpe_increment();
if (likely(current_sandbox && current_sandbox->state == SANDBOX_RUNNING_USER)) {
atomic_fetch_sub(&handler_depth, 1);
//atomic_fetch_sub(&handler_depth, 1);
current_sandbox_trap(WASM_TRAP_ILLEGAL_ARITHMETIC_OPERATION);
} else {
panic("Runtime SIGFPE\n");
@ -176,7 +209,7 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
software_interrupt_counts_sigsegv_increment();
if (likely(current_sandbox && current_sandbox->state == SANDBOX_RUNNING_USER)) {
atomic_fetch_sub(&handler_depth, 1);
//atomic_fetch_sub(&handler_depth, 1);
current_sandbox_trap(WASM_TRAP_OUT_OF_BOUNDS_LINEAR_MEMORY);
} else {
panic("Runtime SIGSEGV\n");
@ -184,6 +217,23 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
break;
}
case SIGINT: {
/* Stop the alarm timer first */
software_interrupt_disarm_timer();
sigint_propagate_workers_listener(signal_info);
/* calculate the throughput */
time_t t_end = time(NULL);
double seconds = difftime(t_end, t_start);
//double throughput = atomic_load(&sandbox_state_totals[SANDBOX_COMPLETE]) / seconds;
double throughput = total_local_requests / seconds;
uint32_t total_sandboxes_error = atomic_load(&sandbox_state_totals[SANDBOX_ERROR]);
printf("throughput is %f error request is %u global total request %d worker %d total requests is %u worker total_held %"PRIu64" longest_held %"PRIu64" listener total_held %"PRIu64" longest_held %"PRIu64"\n",
throughput, total_sandboxes_error, atomic_load(&sandbox_state_totals[SANDBOX_COMPLETE]), worker_thread_idx, total_local_requests, total_held[worker_thread_idx], longest_held[worker_thread_idx], total_held[200], longest_held[200]);
fflush(stdout);
pthread_exit(0);
}
default: {
const char *signal_name = strsignal(signal_type);
switch (signal_info->si_code) {
@ -200,7 +250,7 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
}
}
atomic_fetch_sub(&handler_depth, 1);
//atomic_fetch_sub(&handler_depth, 1);
return;
}
@ -268,9 +318,10 @@ software_interrupt_initialize(void)
sigaddset(&signal_action.sa_mask, SIGUSR1);
sigaddset(&signal_action.sa_mask, SIGFPE);
sigaddset(&signal_action.sa_mask, SIGSEGV);
sigaddset(&signal_action.sa_mask, SIGINT);
const int supported_signals[] = { SIGALRM, SIGUSR1, SIGFPE, SIGSEGV };
const size_t supported_signals_len = 4;
const int supported_signals[] = { SIGALRM, SIGUSR1, SIGFPE, SIGSEGV, SIGINT };
const size_t supported_signals_len = 5;
for (int i = 0; i < supported_signals_len; i++) {
int signal = supported_signals[i];

@ -22,6 +22,7 @@
* Worker Thread State *
**************************/
extern thread_local int thread_id;
/* context of the runtime thread before running sandboxes or to resume its "main". */
thread_local struct arch_context worker_thread_base_context;
@ -47,6 +48,7 @@ worker_thread_main(void *argument)
/* Index was passed via argument */
worker_thread_idx = *(int *)argument;
thread_id = worker_thread_idx;
/* Set my priority */
// runtime_set_pthread_prio(pthread_self(), 2);
@ -65,6 +67,9 @@ worker_thread_main(void *argument)
software_interrupt_unmask_signal(SIGFPE);
software_interrupt_unmask_signal(SIGSEGV);
/* Unmask SIGINT signals */
software_interrupt_unmask_signal(SIGINT);
/* Unmask signals, unless the runtime has disabled preemption */
if (runtime_preemption_enabled) {
software_interrupt_unmask_signal(SIGALRM);

@ -0,0 +1,21 @@
#!/bin/bash
# Executes the runtime in GDB
# Substitutes the absolute path from the container with a path relatively derived from the location of this script
# This allows debugging outside of the Docker container
# Also disables pagination and stopping on SIGUSR1
declare project_path="$(
cd "$(dirname "$1")/../.."
pwd
)"
path=`pwd`
echo $project_path
cd $project_path/runtime/bin
#export SLEDGE_DISABLE_PREEMPTION=true
#export SLEDGE_SANDBOX_PERF_LOG=$path/srsf.log
export LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH"
gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_path/runtime" \
--eval-command="run ../tests/fib.json" \
./sledgert

@ -0,0 +1,19 @@
[
{
"name": "gwu",
"port": 10030,
"replenishment-period-us": 0,
"max-budget-us": 0,
"routes": [
{
"route": "/fib",
"path": "empty.wasm.so",
"admissions-percentile": 70,
"expected-execution-us": 4000,
"relative-deadline-us": 16000,
"http-resp-content-type": "text/plain"
}
]
}
]

@ -0,0 +1,19 @@
[
{
"name": "gwu",
"port": 10030,
"replenishment-period-us": 0,
"max-budget-us": 0,
"routes": [
{
"route": "/fib",
"path": "fibonacci.wasm.so",
"admissions-percentile": 70,
"expected-execution-us": 4000,
"relative-deadline-us": 16000,
"http-resp-content-type": "text/plain"
}
]
}
]

@ -0,0 +1,6 @@
#!/bin/bash
pid=`ps -ef|grep "sledgert"|grep -v grep |awk '{print $2}'`
echo $pid
sudo kill -2 $pid
sudo kill -9 $pid

@ -0,0 +1,46 @@
import re
import os
import sys
from collections import defaultdict
#get all file names which contain key_str
def file_name(file_dir, key_str):
throughput_table = defaultdict(list)
for root, dirs, files in os.walk(file_dir):
if root != os.getcwd():
continue
for file_i in files:
if file_i.find(key_str) >= 0:
segs = file_i.split('-')
if len(segs) < 3:
continue
#print(file_i)
cores_num = segs[0]
concurrency = segs[2].split(".")[0]
#print("core:", cores_num, " concurrency:", concurrency)
get_values(cores_num, concurrency, file_i, throughput_table)
#file_table[key].append(file_i)
s_result = sorted(throughput_table.items())
for i in range(len(s_result)):
print(s_result[i])
def get_values(core, concurrency, file_name, throughput_table):
cmd='grep "Requests/sec:" %s | awk \'{print $2}\'' % file_name
#cmd='python3 ~/sledge-serverless-framework/runtime/tests/meet_deadline_percentage.py %s 50' % file_name
rt=os.popen(cmd).read().strip()
#print(file_name, rt)
throughput_table[int(core)].append(rt)
if __name__ == "__main__":
import json
argv = sys.argv[1:]
if len(argv) < 1:
print("usage ", sys.argv[0], " file containing key word")
sys.exit()
file_name(os.getcwd(), argv[0])
#for key, value in files_tables.items():
# get_values(key, value, miss_deadline_rate, total_latency, running_times, preemption_count, total_miss_deadline_rate)

@ -0,0 +1,65 @@
import re
import os
import sys
from collections import defaultdict
#get all file names which contain key_str
def file_name(file_dir, key_str):
throughput_table = defaultdict(list)
errors_table = defaultdict(list)
for root, dirs, files in os.walk(file_dir):
if root != os.getcwd():
continue
for file_i in files:
if file_i.find(key_str) >= 0:
segs = file_i.split('-')
if len(segs) < 3:
continue
#print(file_i)
cores_num = segs[1]
concurrency = segs[3].split(".")[0]
print("core:", cores_num, " concurrency:", concurrency)
get_values(cores_num, concurrency, file_i, throughput_table, errors_table)
#file_table[key].append(file_i)
s_result = sorted(throughput_table.items())
#for i in range(len(s_result)):
# print(s_result[i], "errors request:", errors_table[s_result[i][0]])
for i in range(len(s_result)):
print(int(float(((s_result[i][1][0])))),end=" ")
print();
#sys.exit()
def get_values(core, concurrency, file_name, throughput_table, errors_table):
print("parse file:", file_name)
fo = open(file_name, "r+")
total_throughput = 0
for line in fo:
line = line.strip()
if "throughput is" in line:
i_th = float(line.split(" ")[2])
total_throughput += i_th
throughput_table[int(core)].append(total_throughput)
#cmd2='grep "throughput is" %s | awk \'{print $7}\'' % file_name
#rt2=os.popen(cmd2).read().strip()
#if len(rt2) != 0:
# errors = rt2.splitlines()[0]
# errors_table[int(core)].append(int(errors))
#else:
# errors_table[int(core)].append(0)
#print(file_name, rt2)
if __name__ == "__main__":
import json
argv = sys.argv[1:]
if len(argv) < 1:
print("usage ", sys.argv[0], " file containing key word")
sys.exit()
file_name(os.getcwd(), argv[0])
#for key, value in files_tables.items():
# get_values(key, value, miss_deadline_rate, total_latency, running_times, preemption_count, total_miss_deadline_rate)

@ -0,0 +1,66 @@
import re
import os
import sys
from collections import defaultdict
#get all file names which contain key_str
def file_name(file_dir, key_str):
throughput_table = defaultdict(list)
errors_table = defaultdict(list)
for root, dirs, files in os.walk(file_dir):
if root != os.getcwd():
continue
for file_i in files:
if file_i.find(key_str) >= 0:
segs = file_i.split('-')
if len(segs) < 3:
continue
#print(file_i)
cores_num = segs[1]
concurrency = segs[3].split(".")[0]
print("core:", cores_num, " concurrency:", concurrency)
get_values(cores_num, concurrency, file_i, throughput_table, errors_table)
#file_table[key].append(file_i)
s_result = sorted(throughput_table.items())
#for i in range(len(s_result)):
# print(s_result[i], "errors request:", errors_table[s_result[i][0]])
for i in range(len(s_result)):
print(int(float(((s_result[i][1][0])))),end=" ")
print();
#sys.exit()
def get_values(core, concurrency, file_name, throughput_table, errors_table):
print("parse file:", file_name)
fo = open(file_name, "r+")
total_throughput = 0
for line in fo:
line = line.strip()
if "throughput is" in line:
i_th = float(line.split(" ")[2].split(",")[0])
#total_throughput += i_th
throughput_table[int(core)].append(i_th)
break
#cmd2='grep "throughput is" %s | awk \'{print $7}\'' % file_name
#rt2=os.popen(cmd2).read().strip()
#if len(rt2) != 0:
# errors = rt2.splitlines()[0]
# errors_table[int(core)].append(int(errors))
#else:
# errors_table[int(core)].append(0)
#print(file_name, rt2)
if __name__ == "__main__":
import json
argv = sys.argv[1:]
if len(argv) < 1:
print("usage ", sys.argv[0], " file containing key word")
sys.exit()
file_name(os.getcwd(), argv[0])
#for key, value in files_tables.items():
# get_values(key, value, miss_deadline_rate, total_latency, running_times, preemption_count, total_miss_deadline_rate)

@ -0,0 +1,41 @@
#!/bin/bash
function usage {
echo "$0 [cpu cores]"
exit 1
}
if [ $# != 1 ] ; then
usage
exit 1;
fi
core_num=$1
declare project_path="$(
cd "$(dirname "$0")/../.."
pwd
)"
echo $project_path
path=`pwd`
#export SLEDGE_DISABLE_PREEMPTION=true
#export SLEDGE_SIGALRM_HANDLER=TRIAGED
export SLEDGE_NWORKERS=$core_num
#export SLEDGE_SCHEDULER=EDF
#export SLEDGE_SANDBOX_PERF_LOG=/home/lyuxiaosu/sledge-serverless-framework/runtime/tests/server.log
#echo $SLEDGE_SANDBOX_PERF_LOG
cd $project_path/runtime/bin
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_fibonacci.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_big_fibonacci.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_armcifar10.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_png2bmp.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_image_processing.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/mulitple_linear_chain.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing3.json
LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/fib.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/empty.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_fibonacci.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_sodresize.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_sodresize.json

@ -0,0 +1,37 @@
#!/bin/bash
function usage {
echo "$0 [concurrency] [fib number]"
exit 1
}
if [ $# != 2 ] ; then
usage
exit 1;
fi
#cores_list=(1 2 4 10 20 40 50 60 77)
#cores_list=(50 60)
#cores_list=(1 2 4 6 8 10 20 30 40 50 60 70 77)
concurrency=$1
fib_num=$2
echo "fib num is $fib_num"
cores_list=(1 2 4 6 8 10 12 14 16 18 20 24 28 32 36 40 44 48 52 56 60 64 68 72 77)
#cores_list=(1)
ulimit -n 655350
./kill_sledge.sh
for(( i=0;i<${#cores_list[@]};i++ )) do
hey_log=${cores_list[i]}"-$fib_num-$concurrency.log" #8-38-100
server_log="server-"${cores_list[i]}"-$fib_num-$concurrency.log"
#./start.sh ${cores_list[i]} >/dev/null 2>&1 &
./start.sh ${cores_list[i]} > $server_log 2>&1 &
echo "sledge start with worker core ${cores_list[i]}"
taskset --cpu-list 80-159 hey -disable-compression -disable-keepalive -disable-redirects -z "60"s -c "$concurrency" -m POST -d "$fib_num" "http://127.0.0.1:10030/fib" > $hey_log
#taskset --cpu-list 80-159 hey -disable-compression -disable-keepalive -disable-redirects -z "60"s -c "$concurrency" "http://127.0.0.1:10030/fib" > $hey_log
./kill_sledge.sh
done
folder_name="$fib_num""_c$concurrency"
mkdir $folder_name
mv *.log $folder_name

@ -0,0 +1,9 @@
#!/bin/bash
#concurrency_list=(50 100 200 300 400 1000)
concurrency_list=(200 300 400 1000)
for(( i=0;i<${#concurrency_list[@]};i++ )) do
./test.sh ${concurrency_list[i]} 15
done
Loading…
Cancel
Save