Compare commits

...

5 Commits

@ -10,7 +10,6 @@ if [[ $ARCH = "x86_64" ]]; then
elif [[ $ARCH = "aarch64" ]]; then
SHFMT_URL=https://github.com/patrickvane/shfmt/releases/download/master/shfmt_linux_arm
echo "ARM64 support is still a work in progress!"
exit 1
else
echo "This script only supports x86_64 and aarch64"
exit 1
@ -64,8 +63,6 @@ wget $SHFMT_URL -O shfmt && chmod +x shfmt && sudo mv shfmt /usr/local/bin/shfmt
sudo ./install_llvm.sh $LLVM_VERSION
curl -sS -L -O $WASI_SDK_URL && sudo dpkg -i wasi-sdk_12.0_amd64.deb && rm -f wasi-sdk_12.0_amd64.deb
if [ -z "${WASI_SDK_PATH}" ]; then
export WASI_SDK_PATH=/opt/wasi-sdk
echo "export WASI_SDK_PATH=/opt/wasi-sdk" >> ~/.bashrc

@ -94,7 +94,7 @@ BINARY_NAME=sledgert
# This flag tracks the total number of sandboxes in the various states
# It is useful to debug if sandboxes are "getting caught" in a particular state
# CFLAGS += -DSANDBOX_STATE_TOTALS
CFLAGS += -DSANDBOX_STATE_TOTALS
# This flag enables an per-worker atomic count of sandbox's local runqueue count in thread local storage
# Useful to debug if sandboxes are "getting caught" or "leaking" while in a local runqueue

@ -4,7 +4,6 @@
#include <assert.h>
#include "arch/common.h"
#include "current_sandbox.h"
#define ARCH_SIG_JMP_OFF 0x100 /* Based on code generated! */
@ -47,24 +46,20 @@ arch_context_switch(struct arch_context *a, struct arch_context *b)
* Assumption: In the case of a slow context switch, the caller
* set current_sandbox to the sandbox containing the target context
*/
if (b->variant == ARCH_CONTEXT_VARIANT_SLOW) {
/*if (b->variant == ARCH_CONTEXT_VARIANT_SLOW) {
struct sandbox *current = current_sandbox_get();
assert(current != NULL && b == &current->ctxt);
}
}*/
#endif
/* if both a and b are NULL, there is no state change */
assert(a != NULL || b != NULL);
assert(a != NULL && b != NULL);
/* Assumption: The caller does not switch to itself */
assert(a != b);
/* Set any NULLs to worker_thread_base_context to resume execution of main */
if (a == NULL) a = &worker_thread_base_context;
if (b == NULL) b = &worker_thread_base_context;
/* A Transition {Unused, Running} -> Fast */
assert(a->variant == ARCH_CONTEXT_VARIANT_UNUSED || a->variant == ARCH_CONTEXT_VARIANT_RUNNING);
assert(a->variant == ARCH_CONTEXT_VARIANT_RUNNING);
/* B Transition {Fast, Slow} -> Running */
assert(b->variant == ARCH_CONTEXT_VARIANT_FAST || b->variant == ARCH_CONTEXT_VARIANT_SLOW);
@ -87,6 +82,8 @@ arch_context_switch(struct arch_context *a, struct arch_context *b)
"ldr x1, [%[bv]]\n\t"
"sub x1, x1, #2\n\t"
"cbz x1, slow%=\n\t"
"mov x3, #3\n\t"
"str x3, [%[bv]]\n\t" /* b->variant = ARCH_CONTEXT_VARIANT_RUNNING; */
"ldr x0, [%[b]]\n\t"
"ldr x1, [%[b], 8]\n\t"
"mov sp, x0\n\t"
@ -95,8 +92,6 @@ arch_context_switch(struct arch_context *a, struct arch_context *b)
"br %[slowpath]\n\t"
".align 8\n\t"
"reset%=:\n\t"
"mov x1, #3\n\t"
"str x1, [%[bv]]\n\t"
".align 8\n\t"
"exit%=:\n\t"
:
@ -109,6 +104,37 @@ arch_context_switch(struct arch_context *a, struct arch_context *b)
return 0;
}
/**
* Load a new sandbox that preempted an existing sandbox, restoring only the
* instruction pointer and stack pointer registers.
* @param active_context - the context of the current worker thread
* @param sandbox_context - the context that we want to restore
*/
static inline void
arch_context_restore_fast(mcontext_t *active_context, struct arch_context *sandbox_context)
{
assert(active_context != NULL);
assert(sandbox_context != NULL);
/* Assumption: Base Context is only ever used by arch_context_switch */
assert(sandbox_context != &worker_thread_base_context);
assert(sandbox_context->regs[UREG_SP]);
assert(sandbox_context->regs[UREG_IP]);
/* Transitioning from Fast -> Running */
assert(sandbox_context->variant == ARCH_CONTEXT_VARIANT_FAST);
sandbox_context->variant = ARCH_CONTEXT_VARIANT_RUNNING;
//active_context->gregs[REG_RSP] = sandbox_context->regs[UREG_SP];
//active_context->gregs[REG_RIP] = sandbox_context->regs[UREG_IP];
active_context->sp = sandbox_context->regs[UREG_SP];
active_context->pc = sandbox_context->regs[UREG_IP];
}
#else
#warning "Neither AARCH64 nor aarch64 was defined, but aarch64/context.h was included!"
#endif

@ -29,7 +29,7 @@ static inline void
http_route_total_increment_request(struct http_route_total *rm)
{
#ifdef HTTP_ROUTE_TOTAL_COUNTERS
atomic_fetch_add(&rm->total_requests, 1);
//atomic_fetch_add(&rm->total_requests, 1);
#endif
}
@ -37,12 +37,12 @@ static inline void
http_route_total_increment(struct http_route_total *rm, int status_code)
{
#ifdef HTTP_ROUTE_TOTAL_COUNTERS
if (status_code >= 200 && status_code <= 299) {
/*if (status_code >= 200 && status_code <= 299) {
atomic_fetch_add(&rm->total_2XX, 1);
} else if (status_code >= 400 && status_code <= 499) {
atomic_fetch_add(&rm->total_4XX, 1);
} else if (status_code >= 500 && status_code <= 599) {
atomic_fetch_add(&rm->total_5XX, 1);
}
}*/
#endif
}

@ -177,7 +177,7 @@ http_session_set_response_header(struct http_session *session, int status_code)
{
assert(session != NULL);
assert(status_code >= 200 && status_code <= 599);
http_total_increment_response(status_code);
//http_total_increment_response(status_code);
/* We might not have actually matched a route */
if (likely(session->route != NULL)) { http_route_total_increment(&session->route->metrics, status_code); }
@ -197,8 +197,6 @@ http_session_set_response_header(struct http_session *session, int status_code)
rc = fprintf(session->response_header.handle, HTTP_RESPONSE_CONTENT_LENGTH,
session->response_body.size);
assert(rc > 0);
rc = fputs(HTTP_RESPONSE_200_OK, session->response_header.handle);
assert(rc != EOF);
}
rc = fputs(HTTP_RESPONSE_TERMINATOR, session->response_header.handle);

@ -6,18 +6,21 @@
/* Returns pointer back if successful, null otherwise */
typedef void (*local_runqueue_add_fn_t)(struct sandbox *);
typedef void (*local_runqueue_add_fn_t_idx)(int index, struct sandbox *);
typedef bool (*local_runqueue_is_empty_fn_t)(void);
typedef void (*local_runqueue_delete_fn_t)(struct sandbox *sandbox);
typedef struct sandbox *(*local_runqueue_get_next_fn_t)();
struct local_runqueue_config {
local_runqueue_add_fn_t add_fn;
local_runqueue_add_fn_t_idx add_fn_idx;
local_runqueue_is_empty_fn_t is_empty_fn;
local_runqueue_delete_fn_t delete_fn;
local_runqueue_get_next_fn_t get_next_fn;
};
void local_runqueue_add(struct sandbox *);
void local_runqueue_add_index(int index, struct sandbox *);
void local_runqueue_delete(struct sandbox *);
bool local_runqueue_is_empty();
struct sandbox *local_runqueue_get_next();

@ -7,7 +7,10 @@
#include "arch/getcycles.h"
#include "runtime.h"
extern uint64_t total_held[1024];
extern uint64_t longest_held[1024];
extern thread_local int thread_id;
/* A linked list of nodes */
struct lock_wrapper {
uint64_t longest_held;
@ -73,9 +76,15 @@ lock_unlock(lock_t *self, lock_node_t *node)
ck_spinlock_mcs_unlock(&self->lock, &node->node);
uint64_t now = __getcycles();
assert(node->time_locked < now);
assert(node->time_locked <= now);
uint64_t duration = now - node->time_locked;
node->time_locked = 0;
if (unlikely(duration > self->longest_held)) { self->longest_held = duration; }
if (unlikely(duration > self->longest_held)) {
self->longest_held = duration;
}
self->total_held += duration;
if (unlikely(duration > longest_held[thread_id])) {
longest_held[thread_id] = duration;
}
total_held[thread_id] += duration;
}

@ -60,7 +60,7 @@ static inline void
module_acquire(struct module *module)
{
assert(module->reference_count < UINT32_MAX);
atomic_fetch_add(&module->reference_count, 1);
//atomic_fetch_add(&module->reference_count, 1);
return;
}
@ -158,8 +158,8 @@ module_entrypoint(struct module *module)
static inline void
module_release(struct module *module)
{
assert(module->reference_count > 0);
atomic_fetch_sub(&module->reference_count, 1);
//assert(module->reference_count > 0);
//atomic_fetch_sub(&module->reference_count, 1);
return;
}

@ -26,7 +26,7 @@
#define RUNTIME_MAX_EPOLL_EVENTS 128
#define RUNTIME_MAX_TENANT_COUNT 32
#define RUNTIME_RELATIVE_DEADLINE_US_MAX 3600000000 /* One Hour. Fits in uint32_t */
#define RUNTIME_RUNQUEUE_SIZE 256 /* Minimum guaranteed size. Might grow! */
#define RUNTIME_RUNQUEUE_SIZE 10240000 /* Minimum guaranteed size. Might grow! */
#define RUNTIME_TENANT_QUEUE_SIZE 4096
enum RUNTIME_SIGALRM_HANDLER

@ -55,6 +55,6 @@ static inline void
sandbox_process_scheduler_updates(struct sandbox *sandbox)
{
if (tenant_is_paid(sandbox->tenant)) {
atomic_fetch_sub(&sandbox->tenant->remaining_budget, sandbox->last_state_duration);
//atomic_fetch_sub(&sandbox->tenant->remaining_budget, sandbox->last_state_duration);
}
}

@ -48,6 +48,7 @@ sandbox_perf_log_print_entry(struct sandbox *sandbox)
runtime_processor_speed_MHz);
}
static inline void
sandbox_perf_log_init()
{

@ -3,6 +3,7 @@
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <inttypes.h>
#include "arch/context.h"
#include "current_sandbox.h"
@ -34,7 +35,7 @@ sandbox_set_as_initialized(struct sandbox *sandbox, sandbox_state_t last_state)
}
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
assert(now >= sandbox->timestamp_of.last_state_change);
sandbox->last_state_duration = now - sandbox->timestamp_of.last_state_change;
sandbox->duration_of_state[last_state] += sandbox->last_state_duration;
sandbox->timestamp_of.last_state_change = now;

@ -43,7 +43,7 @@ sandbox_set_as_returned(struct sandbox *sandbox, sandbox_state_t last_state)
}
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
assert(now >= sandbox->timestamp_of.last_state_change);
sandbox->last_state_duration = now - sandbox->timestamp_of.last_state_change;
sandbox->duration_of_state[last_state] += sandbox->last_state_duration;
sandbox->timestamp_of.last_state_change = now;

@ -2,6 +2,7 @@
#include <assert.h>
#include <stdint.h>
#include <inttypes.h>
#include "arch/getcycles.h"
#include "local_runqueue.h"
@ -31,7 +32,7 @@ sandbox_set_as_runnable(struct sandbox *sandbox, sandbox_state_t last_state)
switch (last_state) {
case SANDBOX_INITIALIZED: {
sandbox->timestamp_of.dispatched = now;
local_runqueue_add(sandbox);
//local_runqueue_add(sandbox);
break;
}
case SANDBOX_ASLEEP: {
@ -45,7 +46,7 @@ sandbox_set_as_runnable(struct sandbox *sandbox, sandbox_state_t last_state)
}
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
assert(now >= sandbox->timestamp_of.last_state_change);
sandbox->last_state_duration = now - sandbox->timestamp_of.last_state_change;
sandbox->duration_of_state[last_state] += sandbox->last_state_duration;
sandbox->timestamp_of.last_state_change = now;
@ -58,7 +59,6 @@ sandbox_set_as_runnable(struct sandbox *sandbox, sandbox_state_t last_state)
sandbox_state_transition_to_hook(sandbox, SANDBOX_RUNNABLE);
}
static inline void
sandbox_wakeup(struct sandbox *sandbox)
{

@ -39,7 +39,7 @@ sandbox_set_as_running_sys(struct sandbox *sandbox, sandbox_state_t last_state)
}
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
assert(now >= sandbox->timestamp_of.last_state_change);
sandbox->last_state_duration = now - sandbox->timestamp_of.last_state_change;
sandbox->duration_of_state[last_state] += sandbox->last_state_duration;
sandbox->timestamp_of.last_state_change = now;

@ -35,7 +35,7 @@ sandbox_set_as_running_user(struct sandbox *sandbox, sandbox_state_t last_state)
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
assert(now >= sandbox->timestamp_of.last_state_change);
sandbox->last_state_duration = now - sandbox->timestamp_of.last_state_change;
sandbox->duration_of_state[last_state] += sandbox->last_state_duration;
sandbox->timestamp_of.last_state_change = now;

@ -48,7 +48,7 @@ static inline void
sandbox_state_totals_increment(sandbox_state_t state)
{
#ifdef SANDBOX_STATE_TOTALS
atomic_fetch_add(&sandbox_state_totals[state], 1);
//atomic_fetch_add(&sandbox_state_totals[state], 1);
#endif
}
@ -56,7 +56,7 @@ static inline void
sandbox_state_totals_decrement(sandbox_state_t state)
{
#ifdef SANDBOX_STATE_TOTALS
if (atomic_load(&sandbox_state_totals[state]) == 0) panic("Underflow of %s\n", sandbox_state_stringify(state));
atomic_fetch_sub(&sandbox_state_totals[state], 1);
//if (atomic_load(&sandbox_state_totals[state]) == 0) panic("Underflow of %s\n", sandbox_state_stringify(state));
//atomic_fetch_sub(&sandbox_state_totals[state], 1);
#endif
}

@ -15,5 +15,6 @@ sandbox_total_initialize()
static inline uint64_t
sandbox_total_postfix_increment()
{
return atomic_fetch_add(&sandbox_total, 1) + 1;
//return atomic_fetch_add(&sandbox_total, 1) + 1;
return 1;
}

@ -114,11 +114,11 @@ scheduler_edf_get_next()
uint64_t local_deadline = local == NULL ? UINT64_MAX : local->absolute_deadline;
struct sandbox *global = NULL;
uint64_t global_deadline = global_request_scheduler_peek();
//uint64_t global_deadline = global_request_scheduler_peek();
/* Try to pull and allocate from the global queue if earlier
* This will be placed at the head of the local runqueue */
if (global_deadline < local_deadline) {
/*if (global_deadline < local_deadline) {
if (global_request_scheduler_remove_if_earlier(&global, local_deadline) == 0) {
assert(global != NULL);
assert(global->absolute_deadline < local_deadline);
@ -126,10 +126,16 @@ scheduler_edf_get_next()
assert(global->state == SANDBOX_INITIALIZED);
sandbox_set_as_runnable(global, SANDBOX_INITIALIZED);
}
}*/
if (local != NULL) {
if (local->state == SANDBOX_INITIALIZED) {
sandbox_prepare_execution_environment(local);
assert(local->state == SANDBOX_INITIALIZED);
sandbox_set_as_runnable(local, SANDBOX_INITIALIZED);
}
}
/* Return what is at the head of the local runqueue or NULL if empty */
return local_runqueue_get_next();
return local;
}
static inline struct sandbox *

@ -31,7 +31,7 @@ software_interrupt_mask_signal(int signal)
sigset_t set;
int return_code;
assert(signal == SIGALRM || signal == SIGUSR1 || signal == SIGFPE || signal == SIGSEGV);
assert(signal == SIGALRM || signal == SIGUSR1 || signal == SIGFPE || signal == SIGSEGV || signal == SIGINT);
/* all threads created by the calling thread will have signal blocked */
sigemptyset(&set);
sigaddset(&set, signal);
@ -55,7 +55,7 @@ software_interrupt_unmask_signal(int signal)
sigset_t set;
int return_code;
assert(signal == SIGALRM || signal == SIGUSR1 || signal == SIGFPE || signal == SIGSEGV);
assert(signal == SIGALRM || signal == SIGUSR1 || signal == SIGFPE || signal == SIGSEGV || signal == SIGINT);
/* all threads created by the calling thread will have signal unblocked */
sigemptyset(&set);
sigaddset(&set, signal);

@ -56,7 +56,7 @@ static inline void
software_interrupt_counts_deferred_sigalrm_replay_increment()
{
#ifdef LOG_SOFTWARE_INTERRUPT_COUNTS
atomic_fetch_add(&software_interrupt_counts_deferred_sigalrm_replay[worker_thread_idx], 1);
//atomic_fetch_add(&software_interrupt_counts_deferred_sigalrm_replay[worker_thread_idx], 1);
#endif
}
@ -64,7 +64,7 @@ static inline void
software_interrupt_counts_sigalrm_kernel_increment()
{
#ifdef LOG_SOFTWARE_INTERRUPT_COUNTS
atomic_fetch_add(&software_interrupt_counts_sigalrm_kernel[worker_thread_idx], 1);
//atomic_fetch_add(&software_interrupt_counts_sigalrm_kernel[worker_thread_idx], 1);
#endif
}
@ -72,7 +72,7 @@ static inline void
software_interrupt_counts_sigalrm_thread_increment()
{
#ifdef LOG_SOFTWARE_INTERRUPT_COUNTS
atomic_fetch_add(&software_interrupt_counts_sigalrm_thread[worker_thread_idx], 1);
//atomic_fetch_add(&software_interrupt_counts_sigalrm_thread[worker_thread_idx], 1);
#endif
}
@ -80,7 +80,7 @@ static inline void
software_interrupt_counts_sigfpe_increment()
{
#ifdef LOG_SOFTWARE_INTERRUPT_COUNTS
atomic_fetch_add(&software_interrupt_counts_sigfpe[worker_thread_idx], 1);
//atomic_fetch_add(&software_interrupt_counts_sigfpe[worker_thread_idx], 1);
#endif
}
@ -88,7 +88,7 @@ static inline void
software_interrupt_counts_sigsegv_increment()
{
#ifdef LOG_SOFTWARE_INTERRUPT_COUNTS
atomic_fetch_add(&software_interrupt_counts_sigsegv[worker_thread_idx], 1);
//atomic_fetch_add(&software_interrupt_counts_sigsegv[worker_thread_idx], 1);
#endif
}
@ -96,7 +96,7 @@ static inline void
software_interrupt_counts_sigusr_increment()
{
#ifdef LOG_SOFTWARE_INTERRUPT_COUNTS
atomic_fetch_add(&software_interrupt_counts_sigusr[worker_thread_idx], 1);
//atomic_fetch_add(&software_interrupt_counts_sigusr[worker_thread_idx], 1);
#endif
}

@ -40,7 +40,7 @@ admissions_control_add(uint64_t admissions_estimate)
{
#ifdef ADMISSIONS_CONTROL
assert(admissions_estimate > 0);
atomic_fetch_add(&admissions_control_admitted, admissions_estimate);
//atomic_fetch_add(&admissions_control_admitted, admissions_estimate);
#ifdef LOG_ADMISSIONS_CONTROL
debuglog("Runtime Admitted: %lu / %lu\n", admissions_control_admitted, admissions_control_capacity);

@ -12,7 +12,14 @@
#include "tenant.h"
#include "tenant_functions.h"
#include "http_session_perf_log.h"
#include "sandbox_set_as_runnable.h"
extern thread_local int thread_id;
extern bool first_request_comming;
time_t t_start;
struct priority_queue* worker_queues[1024];
extern uint32_t runtime_worker_threads_count;
int rr_index = 0;
static void listener_thread_unregister_http_session(struct http_session *http);
static void panic_on_epoll_error(struct epoll_event *evt);
@ -55,7 +62,6 @@ listener_thread_initialize(void)
assert(ret == 0);
ret = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cs);
assert(ret == 0);
printf("\tListener core thread: %lx\n", listener_thread_id);
}
@ -191,6 +197,11 @@ on_client_request_arrival(int client_socket, const struct sockaddr *client_addre
static void
on_client_request_receiving(struct http_session *session)
{
if (first_request_comming == false){
t_start = time(NULL);
first_request_comming = true;
}
/* Read HTTP request */
int rc = http_session_receive_request(session, (void_star_cb)listener_thread_register_http_session);
if (likely(rc == 0)) {
@ -252,13 +263,24 @@ on_client_request_received(struct http_session *session)
}
/* If the global request scheduler is full, return a 429 to the client */
if (unlikely(global_request_scheduler_add(sandbox) == NULL)) {
/*if (unlikely(global_request_scheduler_add(sandbox) == NULL)) {
debuglog("Failed to add sandbox to global queue\n");
sandbox_free(sandbox);
session->state = HTTP_SESSION_EXECUTION_COMPLETE;
http_session_set_response_header(session, 429);
on_client_response_header_sending(session);
}*/
if (rr_index == runtime_worker_threads_count) {
rr_index = 0;
}
//struct timeval t_start,t_end;
//gettimeofday(&t_start, NULL);
local_runqueue_add_index(rr_index, sandbox);
//gettimeofday(&t_end, NULL);
//long cost = t_end.tv_sec * 1000000 + t_end.tv_usec - t_start.tv_sec * 1000000 - t_start.tv_usec;
//printf("%ld ", cost);
rr_index++;
}
static void
@ -401,6 +423,7 @@ on_client_socket_epoll_event(struct epoll_event *evt)
noreturn void *
listener_thread_main(void *dummy)
{
thread_id = 200;
struct epoll_event epoll_events[RUNTIME_MAX_EPOLL_EVENTS];
metrics_server_init();

@ -8,6 +8,7 @@
static struct local_runqueue_config local_runqueue;
thread_local uint32_t total_local_requests = 0;
#ifdef LOG_LOCAL_RUNQUEUE
thread_local uint32_t local_runqueue_count = 0;
#endif
@ -33,6 +34,16 @@ local_runqueue_add(struct sandbox *sandbox)
return local_runqueue.add_fn(sandbox);
}
void
local_runqueue_add_index(int index, struct sandbox *sandbox)
{
assert(local_runqueue.add_fn_idx != NULL);
#ifdef LOG_LOCAL_RUNQUEUE
local_runqueue_count++;
#endif
return local_runqueue.add_fn_idx(index, sandbox);
}
/**
* Delete a sandbox from the run queue
* @param sandbox to delete
@ -41,6 +52,7 @@ void
local_runqueue_delete(struct sandbox *sandbox)
{
assert(local_runqueue.delete_fn != NULL);
total_local_requests++;
#ifdef LOG_LOCAL_RUNQUEUE
local_runqueue_count--;
#endif

@ -12,6 +12,12 @@
#include "sandbox_functions.h"
#include "runtime.h"
extern thread_local int thread_id;
uint64_t total_held[1024] = {0};
uint64_t longest_held[1024] = {0};
extern struct priority_queue* worker_queues[1024];
extern thread_local int worker_thread_idx;
thread_local static struct priority_queue *local_runqueue_minheap;
/**
@ -21,7 +27,8 @@ thread_local static struct priority_queue *local_runqueue_minheap;
bool
local_runqueue_minheap_is_empty()
{
return priority_queue_length_nolock(local_runqueue_minheap) == 0;
//return priority_queue_length_nolock(local_runqueue_minheap) == 0;
return priority_queue_length(local_runqueue_minheap) == 0;
}
/**
@ -32,16 +39,40 @@ local_runqueue_minheap_is_empty()
void
local_runqueue_minheap_add(struct sandbox *sandbox)
{
int return_code = priority_queue_enqueue_nolock(local_runqueue_minheap, sandbox);
/*int return_code = priority_queue_enqueue_nolock(local_runqueue_minheap, sandbox);
if (unlikely(return_code == -ENOSPC)) {
struct priority_queue *temp = priority_queue_grow_nolock(local_runqueue_minheap);
if (unlikely(temp == NULL)) panic("Failed to grow local runqueue\n");
local_runqueue_minheap = temp;
return_code = priority_queue_enqueue_nolock(local_runqueue_minheap, sandbox);
if (unlikely(return_code == -ENOSPC)) panic("Thread Runqueue is full!\n");
}*/
int return_code = priority_queue_enqueue(local_runqueue_minheap, sandbox);
if (return_code != 0) {
if (return_code == -ENOSPC) {
panic("Thread Runqueue is full!\n");
} else {
printf("add request to local queue failed, exit\n");
exit(0);
}
}
}
void
local_runqueue_minheap_add_index(int index, struct sandbox *sandbox)
{
int return_code = priority_queue_enqueue(worker_queues[index], sandbox);
if (return_code != 0) {
if (return_code == -ENOSPC) {
panic("Thread Runqueue is full!\n");
} else {
printf("add request to local queue failed, exit\n");
exit(0);
}
}
}
/**
* Deletes a sandbox from the runqueue
* @param sandbox to delete
@ -51,7 +82,8 @@ local_runqueue_minheap_delete(struct sandbox *sandbox)
{
assert(sandbox != NULL);
int rc = priority_queue_delete_nolock(local_runqueue_minheap, sandbox);
//int rc = priority_queue_delete_nolock(local_runqueue_minheap, sandbox);
int rc = priority_queue_delete(local_runqueue_minheap, sandbox);
if (rc == -1) panic("Tried to delete sandbox %lu from runqueue, but was not present\n", sandbox->id);
}
@ -67,7 +99,8 @@ local_runqueue_minheap_get_next()
{
/* Get the deadline of the sandbox at the head of the local request queue */
struct sandbox *next = NULL;
int rc = priority_queue_top_nolock(local_runqueue_minheap, (void **)&next);
//int rc = priority_queue_top_nolock(local_runqueue_minheap, (void **)&next);
int rc = priority_queue_top(local_runqueue_minheap, (void **)&next);
if (rc == -ENOENT) return NULL;
@ -81,10 +114,12 @@ void
local_runqueue_minheap_initialize()
{
/* Initialize local state */
local_runqueue_minheap = priority_queue_initialize(RUNTIME_RUNQUEUE_SIZE, false, sandbox_get_priority);
local_runqueue_minheap = priority_queue_initialize(RUNTIME_RUNQUEUE_SIZE, true, sandbox_get_priority);
worker_queues[worker_thread_idx] = local_runqueue_minheap;
/* Register Function Pointers for Abstract Scheduling API */
struct local_runqueue_config config = { .add_fn = local_runqueue_minheap_add,
.add_fn_idx = local_runqueue_minheap_add_index,
.is_empty_fn = local_runqueue_minheap_is_empty,
.delete_fn = local_runqueue_minheap_delete,
.get_next_fn = local_runqueue_minheap_get_next };

@ -43,6 +43,9 @@ uint32_t runtime_quantum_us = 5000; /* 5ms */
uint64_t runtime_boot_timestamp;
pid_t runtime_pid = 0;
thread_local int thread_id = -1;
bool first_request_comming = false;
/**
* Returns instructions on use of CLI if used incorrectly
* @param cmd - The command the user entered
@ -448,7 +451,8 @@ main(int argc, char **argv)
printf("Runtime Environment:\n");
runtime_processor_speed_MHz = runtime_get_processor_speed_MHz();
//runtime_processor_speed_MHz = runtime_get_processor_speed_MHz();
runtime_processor_speed_MHz = 1500;
if (unlikely(runtime_processor_speed_MHz == 0)) panic("Failed to detect processor speed\n");
int heading_length = 30;

@ -9,6 +9,7 @@
#include <threads.h>
#include <unistd.h>
#include <ucontext.h>
#include <inttypes.h>
#include "arch/context.h"
#include "current_sandbox.h"
@ -25,14 +26,24 @@
#include "software_interrupt.h"
#include "software_interrupt_counts.h"
#ifdef SANDBOX_STATE_TOTALS
extern _Atomic uint32_t sandbox_state_totals[SANDBOX_STATE_COUNT];
#endif
extern uint64_t total_held[1024];
extern uint64_t longest_held[1024];
thread_local _Atomic volatile sig_atomic_t handler_depth = 0;
thread_local _Atomic volatile sig_atomic_t deferred_sigalrm = 0;
/***************************************
* Externs
**************************************/
extern thread_local uint32_t total_local_requests;
extern pthread_t *runtime_worker_threads;
extern time_t t_start;
extern thread_local int worker_thread_idx;
/**************************
* Private Static Inlines *
@ -74,6 +85,31 @@ propagate_sigalrm(siginfo_t *signal_info)
assert(signal_info->si_code == SI_TKILL);
}
}
/**
* A POSIX signal is delivered to only one thread.
* This function broadcasts the sigint signal to all other worker threads
*/
static inline void
sigint_propagate_workers_listener(siginfo_t *signal_info)
{
/* Signal was sent directly by the kernel user space, so forward to other threads */
if (signal_info->si_code == SI_KERNEL || signal_info->si_code == SI_USER) {
for (int i = 0; i < runtime_worker_threads_count; i++) {
if (pthread_self() == runtime_worker_threads[i]) continue;
/* All threads should have been initialized */
assert(runtime_worker_threads[i] != 0);
pthread_kill(runtime_worker_threads[i], SIGINT);
}
/* send to listener thread */
if (pthread_self() != listener_thread_id) {
pthread_kill(listener_thread_id, SIGINT);
}
} else {
/* Signal forwarded from another thread. Just confirm it resulted from pthread_kill */
assert(signal_info->si_code == SI_TKILL);
}
}
static inline bool
worker_thread_is_running_cooperative_scheduler(void)
@ -105,7 +141,7 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
/* Signals should not nest */
assert(handler_depth == 0);
atomic_fetch_add(&handler_depth, 1);
//atomic_fetch_add(&handler_depth, 1);
ucontext_t *interrupted_context = (ucontext_t *)interrupted_context_raw;
struct sandbox *current_sandbox = current_sandbox_get();
@ -139,7 +175,7 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
global_timeout_queue_process_promotions();
}
propagate_sigalrm(signal_info);
atomic_fetch_add(&deferred_sigalrm, 1);
//atomic_fetch_add(&deferred_sigalrm, 1);
}
break;
@ -164,7 +200,7 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
software_interrupt_counts_sigfpe_increment();
if (likely(current_sandbox && current_sandbox->state == SANDBOX_RUNNING_USER)) {
atomic_fetch_sub(&handler_depth, 1);
//atomic_fetch_sub(&handler_depth, 1);
current_sandbox_trap(WASM_TRAP_ILLEGAL_ARITHMETIC_OPERATION);
} else {
panic("Runtime SIGFPE\n");
@ -176,7 +212,7 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
software_interrupt_counts_sigsegv_increment();
if (likely(current_sandbox && current_sandbox->state == SANDBOX_RUNNING_USER)) {
atomic_fetch_sub(&handler_depth, 1);
//atomic_fetch_sub(&handler_depth, 1);
current_sandbox_trap(WASM_TRAP_OUT_OF_BOUNDS_LINEAR_MEMORY);
} else {
panic("Runtime SIGSEGV\n");
@ -184,6 +220,22 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
break;
}
case SIGINT: {
/* Stop the alarm timer first */
software_interrupt_disarm_timer();
sigint_propagate_workers_listener(signal_info);
/* calculate the throughput */
time_t t_end = time(NULL);
double seconds = difftime(t_end, t_start);
//double throughput = atomic_load(&sandbox_state_totals[SANDBOX_COMPLETE]) / seconds;
double throughput = total_local_requests / seconds;
uint32_t total_sandboxes_error = atomic_load(&sandbox_state_totals[SANDBOX_ERROR]);
printf("throughput is %f error request is %u global total request %d worker %d total requests is %u worker total_held %"PRIu64" longest_held %"PRIu64" listener total_held %"PRIu64" longest_held %"PRIu64"\n",
throughput, total_sandboxes_error, atomic_load(&sandbox_state_totals[SANDBOX_COMPLETE]), worker_thread_idx, total_local_requests, total_held[worker_thread_idx], longest_held[worker_thread_idx], total_held[200], longest_held[200]);
fflush(stdout);
pthread_exit(0);
}
default: {
const char *signal_name = strsignal(signal_type);
switch (signal_info->si_code) {
@ -200,7 +252,7 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
}
}
atomic_fetch_sub(&handler_depth, 1);
//atomic_fetch_sub(&handler_depth, 1);
return;
}
@ -268,9 +320,10 @@ software_interrupt_initialize(void)
sigaddset(&signal_action.sa_mask, SIGUSR1);
sigaddset(&signal_action.sa_mask, SIGFPE);
sigaddset(&signal_action.sa_mask, SIGSEGV);
sigaddset(&signal_action.sa_mask, SIGINT);
const int supported_signals[] = { SIGALRM, SIGUSR1, SIGFPE, SIGSEGV };
const size_t supported_signals_len = 4;
const int supported_signals[] = { SIGALRM, SIGUSR1, SIGFPE, SIGSEGV, SIGINT};
const size_t supported_signals_len = 5;
for (int i = 0; i < supported_signals_len; i++) {
int signal = supported_signals[i];

@ -18,6 +18,7 @@
#include "tenant_functions.h"
#include "priority_queue.h"
extern thread_local int thread_id;
/***************************
* Worker Thread State *
**************************/
@ -47,7 +48,7 @@ worker_thread_main(void *argument)
/* Index was passed via argument */
worker_thread_idx = *(int *)argument;
thread_id = worker_thread_idx;
/* Set my priority */
// runtime_set_pthread_prio(pthread_self(), 2);
pthread_setschedprio(pthread_self(), -20);
@ -65,6 +66,9 @@ worker_thread_main(void *argument)
software_interrupt_unmask_signal(SIGFPE);
software_interrupt_unmask_signal(SIGSEGV);
/* Unmask SIGINT signals */
software_interrupt_unmask_signal(SIGINT);
/* Unmask signals, unless the runtime has disabled preemption */
if (runtime_preemption_enabled) {
software_interrupt_unmask_signal(SIGALRM);

@ -0,0 +1,19 @@
[
{
"name": "gwu",
"port": 10030,
"replenishment-period-us": 0,
"max-budget-us": 0,
"routes": [
{
"route": "/fib",
"path": "empty.wasm.so",
"admissions-percentile": 70,
"expected-execution-us": 4000,
"relative-deadline-us": 16000,
"http-resp-content-type": "text/plain"
}
]
}
]

@ -0,0 +1,19 @@
[
{
"name": "gwu",
"port": 10030,
"replenishment-period-us": 0,
"max-budget-us": 0,
"routes": [
{
"route": "/fib",
"path": "fibonacci.wasm.so",
"admissions-percentile": 70,
"expected-execution-us": 4000,
"relative-deadline-us": 16000,
"http-resp-content-type": "text/plain"
}
]
}
]

@ -0,0 +1,5 @@
#!/bin/bash
pid=`ps -ef|grep "sledgert"|grep -v grep |awk '{print $2}'`
echo $pid
sudo kill -2 $pid

@ -0,0 +1,49 @@
import re
import os
import sys
from collections import defaultdict
#get all file names which contain key_str
def file_name(file_dir, key_str):
throughput_table = defaultdict(list)
for root, dirs, files in os.walk(file_dir):
if root != os.getcwd():
continue
for file_i in files:
if file_i.find(key_str) >= 0:
segs = file_i.split('-')
if len(segs) < 3:
continue
#print(file_i)
cores_num = segs[0]
concurrency = segs[2].split(".")[0]
#print("core:", cores_num, " concurrency:", concurrency)
get_values(cores_num, concurrency, file_i, throughput_table)
#file_table[key].append(file_i)
s_result = sorted(throughput_table.items())
for i in range(len(s_result)):
print(s_result[i])
for i in range(len(s_result)):
print(int(float(((s_result[i][1][0])))),end=" ")
print();
def get_values(core, concurrency, file_name, throughput_table):
cmd='grep "Requests/sec:" %s | awk \'{print $2}\'' % file_name
#cmd='python3 ~/sledge-serverless-framework/runtime/tests/meet_deadline_percentage.py %s 50' % file_name
rt=os.popen(cmd).read().strip()
#print(file_name, rt)
throughput_table[int(core)].append(rt)
if __name__ == "__main__":
import json
argv = sys.argv[1:]
if len(argv) < 1:
print("usage ", sys.argv[0], " file containing key word")
sys.exit()
file_name(os.getcwd(), argv[0])
#for key, value in files_tables.items():
# get_values(key, value, miss_deadline_rate, total_latency, running_times, preemption_count, total_miss_deadline_rate)

@ -0,0 +1,65 @@
import re
import os
import sys
from collections import defaultdict
#get all file names which contain key_str
def file_name(file_dir, key_str):
throughput_table = defaultdict(list)
errors_table = defaultdict(list)
for root, dirs, files in os.walk(file_dir):
if root != os.getcwd():
continue
for file_i in files:
if file_i.find(key_str) >= 0:
segs = file_i.split('-')
if len(segs) < 3:
continue
#print(file_i)
cores_num = segs[1]
concurrency = segs[3].split(".")[0]
print("core:", cores_num, " concurrency:", concurrency)
get_values(cores_num, concurrency, file_i, throughput_table, errors_table)
#file_table[key].append(file_i)
s_result = sorted(throughput_table.items())
#for i in range(len(s_result)):
# print(s_result[i], "errors request:", errors_table[s_result[i][0]])
for i in range(len(s_result)):
print(int(float(((s_result[i][1][0])))),end=" ")
print();
#sys.exit()
def get_values(core, concurrency, file_name, throughput_table, errors_table):
print("parse file:", file_name)
fo = open(file_name, "r+")
total_throughput = 0
for line in fo:
line = line.strip()
if "throughput is" in line:
i_th = float(line.split(" ")[2])
total_throughput += i_th
throughput_table[int(core)].append(total_throughput)
#cmd2='grep "throughput is" %s | awk \'{print $7}\'' % file_name
#rt2=os.popen(cmd2).read().strip()
#if len(rt2) != 0:
# errors = rt2.splitlines()[0]
# errors_table[int(core)].append(int(errors))
#else:
# errors_table[int(core)].append(0)
#print(file_name, rt2)
if __name__ == "__main__":
import json
argv = sys.argv[1:]
if len(argv) < 1:
print("usage ", sys.argv[0], " file containing key word")
sys.exit()
file_name(os.getcwd(), argv[0])
#for key, value in files_tables.items():
# get_values(key, value, miss_deadline_rate, total_latency, running_times, preemption_count, total_miss_deadline_rate)

@ -0,0 +1,58 @@
import json
import re
import os
import sys
from collections import defaultdict
#get all file names which contain key_str
def file_name(file_dir, key_str):
cost_table = defaultdict(list)
for root, dirs, files in os.walk(file_dir):
if root != os.getcwd():
continue
for file_i in files:
if file_i.find(key_str) >= 0:
segs = file_i.split('_')
if len(segs) < 2:
print("less than 2 segs", file_i)
continue
#print(file_i)
name = segs[0]
#print("core:", cores_num, " concurrency:", concurrency)
get_values(name, file_i, cost_table)
#file_table[key].append(file_i)
#s_result = sorted(throughput_table.items())
#for i in range(len(s_result)):
# print(s_result[i])
#for i in range(len(s_result)):
# print(int(float(((s_result[i][1][0])))),end=" ")
#print();
for key, value in cost_table.items():
print(key, len(value))
js = json.dumps(cost_table)
f = open("cost.txt", 'w')
f.write(js)
f.close()
def get_values(name, file_name, cost_table):
fo = open(file_name, "r+")
for line in fo:
line = line.strip()
if line[0].isdigit():
str_list = line.split(" ")
for i in range(len(str_list)):
cost_table[name].append(int(str_list[i]))
if __name__ == "__main__":
import json
argv = sys.argv[1:]
if len(argv) < 1:
print("usage ", sys.argv[0], " file containing key word")
sys.exit()
file_name(os.getcwd(), argv[0])
#for key, value in files_tables.items():
# get_values(key, value, miss_deadline_rate, total_latency, running_times, preemption_count, total_miss_deadline_rate)

@ -0,0 +1 @@
sudo cpupower frequency-set -g performance

@ -0,0 +1,42 @@
#!/bin/bash
function usage {
echo "$0 [cpu cores]"
exit 1
}
if [ $# != 1 ] ; then
usage
exit 1;
fi
core_num=$1
declare project_path="$(
cd "$(dirname "$0")/../.."
pwd
)"
echo $project_path
path=`pwd`
#export SLEDGE_DISABLE_PREEMPTION=true
#export SLEDGE_SIGALRM_HANDLER=TRIAGED
export SLEDGE_NWORKERS=$core_num
#export SLEDGE_SCHEDULER=EDF
#export SLEDGE_SANDBOX_PERF_LOG=$path/$output
#echo $SLEDGE_SANDBOX_PERF_LOG
cd $project_path/runtime/bin
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_fibonacci.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_big_fibonacci.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_armcifar10.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_png2bmp.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_image_processing.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/mulitple_linear_chain.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing3.json
LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/fib.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/empty.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_fibonacci.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_sodresize.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_sodresize.json

@ -0,0 +1,38 @@
#!/bin/bash
function usage {
echo "$0 [concurrency] [fib number]"
exit 1
}
if [ $# != 2 ] ; then
usage
exit 1;
fi
#cores_list=(1 2 4 10 20 40 50 60 77)
#cores_list=(50 60)
#cores_list=(1 2 4 6 8 10 20 30 40 50 60 70 77)
concurrency=$1
fib_num=$2
echo "fib num is $fib_num"
cores_list=(1 2 4 6 8 10 12 14 16 18 20 24 28 32 36 40 44 48 52 56 60 64 68 72 77)
#cores_list=(6 8 12 14 16 18 24 28 32 36 77)
#cores_list=(77)
ulimit -n 1000000
./kill_sledge.sh
for(( i=0;i<${#cores_list[@]};i++ )) do
hey_log=${cores_list[i]}"-$fib_num-$concurrency.log" #8-38-100
loadtest_log=${cores_list[i]}"-$fib_num-$concurrency.log"
server_log="server-"${cores_list[i]}"-$fib_num-$concurrency.log"
./start.sh ${cores_list[i]} > $server_log 2>&1 &
echo "sledge start with worker core ${cores_list[i]}"
taskset --cpu-list 80-159 hey -disable-compression -disable-keepalive -disable-redirects -z "60"s -c "$concurrency" -m POST -d "$fib_num" "http://127.0.0.1:10030/fib" > $hey_log
#taskset --cpu-list 80-159 hey -disable-compression -disable-keepalive -disable-redirects -z "60"s -c "$concurrency" "http://127.0.0.1:10030/fib" > $hey_log
./kill_sledge.sh
done
folder_name="$fib_num""_c$concurrency"
mkdir $folder_name
mv *.log $folder_name

@ -0,0 +1,9 @@
#!/bin/bash
#concurrency_list=(50 100 200 300 400 1000)
concurrency_list=(200 300 400 1000)
for(( i=0;i<${#concurrency_list[@]};i++ )) do
./test.sh ${concurrency_list[i]} 15
done
Loading…
Cancel
Save