Merge branch 'linear_chain_srsf' of https://github.com/lyuxiaosu/sledge-serverless-framework into linear_chain_srsf

main
xiaosuGW 3 years ago
commit 4c9877371b

@ -22,3 +22,5 @@ void local_runqueue_delete(struct sandbox *);
bool local_runqueue_is_empty(); bool local_runqueue_is_empty();
struct sandbox *local_runqueue_get_next(); struct sandbox *local_runqueue_get_next();
void local_runqueue_initialize(struct local_runqueue_config *config); void local_runqueue_initialize(struct local_runqueue_config *config);
void local_workload_add(struct sandbox *sandbox);
void local_workload_complete(struct sandbox *sandbox);

@ -142,7 +142,7 @@ static inline uint64_t
perf_window_get_percentile(struct perf_window *self, int percentile, int precomputed_index) perf_window_get_percentile(struct perf_window *self, int percentile, int precomputed_index)
{ {
assert(self != NULL); assert(self != NULL);
assert(percentile >= 50 && percentile <= 99); //assert(percentile >= 50 && percentile <= 99);
int size = self->count; int size = self->count;
//assert(size > 0); //assert(size > 0);
if (size == 0) { if (size == 0) {
@ -184,12 +184,13 @@ perf_window_print(struct perf_window *self)
float nighty_p = self->by_duration[PERF_WINDOW_BUFFER_SIZE * 90 / 100].execution_time/1000.0; float nighty_p = self->by_duration[PERF_WINDOW_BUFFER_SIZE * 90 / 100].execution_time/1000.0;
float nighty_night_p = self->by_duration[PERF_WINDOW_BUFFER_SIZE * 99 / 100].execution_time/1000.0; float nighty_night_p = self->by_duration[PERF_WINDOW_BUFFER_SIZE * 99 / 100].execution_time/1000.0;
mem_log("module %s perf window:\n", self->name); /*mem_log("module %s perf window:\n", self->name);
for (int i = 0; i < PERF_WINDOW_BUFFER_SIZE; i++) { for (int i = 0; i < PERF_WINDOW_BUFFER_SIZE; i++) {
sum += self->by_duration[i].execution_time; sum += self->by_duration[i].execution_time;
mem_log("%f,", self->by_duration[i].execution_time/1000.0); mem_log("%f,", self->by_duration[i].execution_time/1000.0);
} }
mem_log("\n"); mem_log("\n");
*/
float avg = (sum/(float)PERF_WINDOW_BUFFER_SIZE)/1000.0; float avg = (sum/(float)PERF_WINDOW_BUFFER_SIZE)/1000.0;
mem_log("min:%f,max:%f,fifty_p:%f,seventy_p:%f,eighty_p:%f,nighty_p:%f,nighty_night_p:%f,avg:%f\n", min,max,fifty_p,seventy_p,eighty_p,nighty_p,nighty_night_p, avg); mem_log("min:%f,max:%f,fifty_p:%f,seventy_p:%f,eighty_p:%f,nighty_p:%f,nighty_night_p:%f,avg:%f\n", min,max,fifty_p,seventy_p,eighty_p,nighty_p,nighty_night_p, avg);
} }

@ -9,6 +9,7 @@
#include "sandbox_request.h" #include "sandbox_request.h"
#include "memlogging.h" #include "memlogging.h"
extern __thread int worker_thread_idx;
/*************************** /***************************
* Public API * * Public API *
**************************/ **************************/
@ -110,11 +111,8 @@ sandbox_get_srsf_priority(void *element)
{ {
struct sandbox *sandbox = (struct sandbox *)element; struct sandbox *sandbox = (struct sandbox *)element;
uint64_t now = __getcycles(); uint64_t now = __getcycles();
int64_t remaining_slack = sandbox->remaining_slack - (now - sandbox->last_update_timestamp); uint64_t remaining_slack = sandbox->remaining_slack - (now - sandbox->last_update_timestamp);
if (remaining_slack < 0) { return remaining_slack;
return 0;
}
return remaining_slack;
}; };
/** /**
@ -218,24 +216,24 @@ sandbox_mem_print_perf(struct sandbox *sandbox)
/* If the log was not defined by an environment variable, early out */ /* If the log was not defined by an environment variable, early out */
if (runtime_sandbox_perf_log == NULL) return; if (runtime_sandbox_perf_log == NULL) return;
uint32_t total_time_us = sandbox->total_time / runtime_processor_speed_MHz; uint64_t total_time_us = sandbox->total_time / runtime_processor_speed_MHz;
uint32_t queued_us = (sandbox->allocation_timestamp - sandbox->enqueue_timestamp) uint64_t queued_us = (sandbox->allocation_timestamp - sandbox->enqueue_timestamp)
/ runtime_processor_speed_MHz; / runtime_processor_speed_MHz;
uint32_t initializing_us = sandbox->initializing_duration / runtime_processor_speed_MHz; uint64_t initializing_us = sandbox->initializing_duration / runtime_processor_speed_MHz;
uint32_t runnable_us = sandbox->runnable_duration / runtime_processor_speed_MHz; uint64_t runnable_us = sandbox->runnable_duration / runtime_processor_speed_MHz;
uint32_t running_us = sandbox->running_duration / runtime_processor_speed_MHz; uint64_t running_us = sandbox->running_duration / runtime_processor_speed_MHz;
uint32_t blocked_us = sandbox->blocked_duration / runtime_processor_speed_MHz; uint64_t blocked_us = sandbox->blocked_duration / runtime_processor_speed_MHz;
uint32_t returned_us = sandbox->returned_duration / runtime_processor_speed_MHz; uint64_t returned_us = sandbox->returned_duration / runtime_processor_speed_MHz;
if (sandbox->module->next_module == NULL) { if (sandbox->module->next_module == NULL) {
uint32_t total_time = (sandbox->completion_timestamp - sandbox->request_arrival_timestamp) / runtime_processor_speed_MHz; uint64_t total_time = (sandbox->completion_timestamp - sandbox->request_arrival_timestamp) / runtime_processor_speed_MHz;
bool miss_deadline = sandbox->completion_timestamp > sandbox->absolute_deadline ? true : false; bool miss_deadline = sandbox->completion_timestamp > sandbox->absolute_deadline ? true : false;
uint32_t delayed_us = (sandbox->completion_timestamp - sandbox->absolute_deadline) uint64_t delayed_us = (sandbox->completion_timestamp - sandbox->absolute_deadline)
/ runtime_processor_speed_MHz; / runtime_processor_speed_MHz;
if (miss_deadline) { if (miss_deadline) {
mem_log("%lu miss deadline, delayed %u us, actual cost %u module name %s\n", sandbox->id, delayed_us, total_time, sandbox->module->name); mem_log("%u miss deadline, delayed %lu us, actual cost %lu module name %s\n", sandbox->id, delayed_us, total_time, sandbox->module->name);
} else { } else {
mem_log("%lu meet deadline, actual cost %u module name %s\n", sandbox->id, total_time, sandbox->module->name); mem_log("%u meet deadline, actual cost %lu module name %s\n", sandbox->id, total_time, sandbox->module->name);
} }
} }
@ -244,7 +242,7 @@ sandbox_mem_print_perf(struct sandbox *sandbox)
* becomes more intelligent, then peak linear memory size needs to be tracked * becomes more intelligent, then peak linear memory size needs to be tracked
* seperately from current linear memory size. * seperately from current linear memory size.
*/ */
mem_log("%lu,%s():%d,%s,%u,%u,%u,%u,%u,%u,%u,%u,%u\n", sandbox->id, mem_log("%d,%u,%s():%d,%s,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%u\n", worker_thread_idx, sandbox->id,
sandbox->module->name, sandbox->module->port, sandbox_state_stringify(sandbox->state), sandbox->module->name, sandbox->module->port, sandbox_state_stringify(sandbox->state),
sandbox->module->relative_deadline_us, total_time_us, queued_us, initializing_us, runnable_us, sandbox->module->relative_deadline_us, total_time_us, queued_us, initializing_us, runnable_us,
running_us, blocked_us, returned_us, sandbox->linear_memory_size); running_us, blocked_us, returned_us, sandbox->linear_memory_size);

@ -16,6 +16,8 @@
#include "sandbox_types.h" #include "sandbox_types.h"
#include "scheduler.h" #include "scheduler.h"
extern uint64_t system_start_timestamp;
/** /**
* Receive and Parse the Request for the current sandbox * Receive and Parse the Request for the current sandbox
* @return 0 if message parsing complete, -1 on error * @return 0 if message parsing complete, -1 on error
@ -44,6 +46,9 @@ sandbox_receive_request(struct sandbox *sandbox)
if (recved < 0) { if (recved < 0) {
if (errno == EAGAIN) { if (errno == EAGAIN) {
//uint64_t block_time = __getcycles() - system_start_timestamp;
//mem_log("time %lu blocked, request id:%d name %s obj=%p remaining slack %lu\n", block_time,
// sandbox->id, sandbox->module->name, sandbox, sandbox->remaining_slack);
scheduler_block(); scheduler_block();
continue; continue;
} else { } else {

@ -51,7 +51,7 @@ sandbox_set_as_complete(struct sandbox *sandbox, sandbox_state_t last_state)
runtime_sandbox_total_decrement(last_state); runtime_sandbox_total_decrement(last_state);
/* Admissions Control Post Processing */ /* Admissions Control Post Processing */
admissions_info_update(&sandbox->module->admissions_info, sandbox->total_time / runtime_processor_speed_MHz); admissions_info_update(&sandbox->module->admissions_info, sandbox->running_duration);
admissions_control_subtract(sandbox->admissions_estimate); admissions_control_subtract(sandbox->admissions_estimate);
perf_window_print(&sandbox->module->admissions_info.perf_window); perf_window_print(&sandbox->module->admissions_info.perf_window);

@ -41,6 +41,7 @@ sandbox_set_as_error(struct sandbox *sandbox, sandbox_state_t last_state)
case SANDBOX_RUNNING: { case SANDBOX_RUNNING: {
sandbox->running_duration += duration_of_last_state; sandbox->running_duration += duration_of_last_state;
local_runqueue_delete(sandbox); local_runqueue_delete(sandbox);
local_workload_complete(sandbox);
break; break;
} }
default: { default: {

@ -51,6 +51,8 @@ sandbox_set_as_initialized(struct sandbox *sandbox, struct sandbox_request *sand
/* Copy the socket descriptor, address, and arguments of the client invocation */ /* Copy the socket descriptor, address, and arguments of the client invocation */
sandbox->absolute_deadline = sandbox_request->absolute_deadline; sandbox->absolute_deadline = sandbox_request->absolute_deadline;
sandbox->remaining_slack = sandbox_request->remaining_slack;
sandbox->last_update_timestamp = sandbox_request->last_update_timestamp;
sandbox->arguments = (void *)sandbox_request->arguments; sandbox->arguments = (void *)sandbox_request->arguments;
sandbox->client_socket_descriptor = sandbox_request->socket_descriptor; sandbox->client_socket_descriptor = sandbox_request->socket_descriptor;
memcpy(&sandbox->client_address, &sandbox_request->socket_address, sizeof(struct sockaddr)); memcpy(&sandbox->client_address, &sandbox_request->socket_address, sizeof(struct sockaddr));

@ -35,6 +35,7 @@ sandbox_set_as_returned(struct sandbox *sandbox, sandbox_state_t last_state)
sandbox->total_time = now - sandbox->enqueue_timestamp; sandbox->total_time = now - sandbox->enqueue_timestamp;
sandbox->running_duration += duration_of_last_state; sandbox->running_duration += duration_of_last_state;
local_runqueue_delete(sandbox); local_runqueue_delete(sandbox);
local_workload_complete(sandbox);
sandbox_free_linear_memory(sandbox); sandbox_free_linear_memory(sandbox);
break; break;
} }

@ -33,6 +33,7 @@ sandbox_set_as_runnable(struct sandbox *sandbox, sandbox_state_t last_state)
case SANDBOX_INITIALIZED: { case SANDBOX_INITIALIZED: {
sandbox->initializing_duration += duration_of_last_state; sandbox->initializing_duration += duration_of_last_state;
local_runqueue_add(sandbox); local_runqueue_add(sandbox);
local_workload_add(sandbox);
break; break;
} }
case SANDBOX_BLOCKED: { case SANDBOX_BLOCKED: {

@ -3,10 +3,12 @@
#include <assert.h> #include <assert.h>
#include <stdint.h> #include <stdint.h>
#include "memlogging.h"
#include "arch/getcycles.h" #include "arch/getcycles.h"
#include "panic.h" #include "panic.h"
#include "sandbox_types.h" #include "sandbox_types.h"
extern uint64_t system_start_timestamp;
static inline void static inline void
sandbox_set_as_running(struct sandbox *sandbox, sandbox_state_t last_state) sandbox_set_as_running(struct sandbox *sandbox, sandbox_state_t last_state)
{ {
@ -19,11 +21,16 @@ sandbox_set_as_running(struct sandbox *sandbox, sandbox_state_t last_state)
switch (last_state) { switch (last_state) {
case SANDBOX_RUNNABLE: { case SANDBOX_RUNNABLE: {
sandbox->remaining_slack -= now - sandbox->last_update_timestamp; //uint64_t start_execution = now - system_start_timestamp;
//uint64_t last = sandbox->last_update_timestamp;
//uint64_t last_rs = sandbox->remaining_slack;
sandbox->remaining_slack -= (now - sandbox->last_update_timestamp);
sandbox->last_update_timestamp = now; sandbox->last_update_timestamp = now;
sandbox->runnable_duration += duration_of_last_state; sandbox->runnable_duration += duration_of_last_state;
current_sandbox_set(sandbox); current_sandbox_set(sandbox);
runtime_worker_threads_deadline[worker_thread_idx] = sandbox->absolute_deadline; runtime_worker_threads_deadline[worker_thread_idx] = sandbox->absolute_deadline;
//mem_log("time %lu sandbox starts running, request id:%d name %s obj=%p remaining slack %lu, last_rs %lu now %lu last %lu \n", start_execution,
// sandbox->id, sandbox->module->name, sandbox, sandbox->remaining_slack, last_rs, now, last);
/* Does not handle context switch because the caller knows if we need to use fast or slow switched */ /* Does not handle context switch because the caller knows if we need to use fast or slow switched */
break; break;
} }
@ -33,11 +40,11 @@ sandbox_set_as_running(struct sandbox *sandbox, sandbox_state_t last_state)
} }
} }
sandbox->last_state_change_timestamp = now;
sandbox->state = SANDBOX_RUNNING;
/* State Change Bookkeeping */ /* State Change Bookkeeping */
sandbox_state_log_transition(sandbox->id, last_state, SANDBOX_RUNNING); sandbox_state_log_transition(sandbox->id, last_state, SANDBOX_RUNNING);
runtime_sandbox_total_increment(SANDBOX_RUNNING); runtime_sandbox_total_increment(SANDBOX_RUNNING);
runtime_sandbox_total_decrement(last_state); runtime_sandbox_total_decrement(last_state);
sandbox->last_state_change_timestamp = now;
sandbox->state = SANDBOX_RUNNING;
} }

@ -33,6 +33,7 @@ enum SCHEDULER
extern enum SCHEDULER scheduler; extern enum SCHEDULER scheduler;
extern _Atomic uint32_t scheduling_counter; extern _Atomic uint32_t scheduling_counter;
extern uint64_t system_start_timestamp;
static inline struct sandbox * static inline struct sandbox *
scheduler_edf_get_next() scheduler_edf_get_next()
@ -82,6 +83,11 @@ scheduler_srsf_get_next()
* This will be placed at the head of the local runqueue */ * This will be placed at the head of the local runqueue */
if (global_remaining_slack < local_remaining_slack) { if (global_remaining_slack < local_remaining_slack) {
if (global_request_scheduler_remove_if_earlier(&request, local_remaining_slack) == 0) { if (global_request_scheduler_remove_if_earlier(&request, local_remaining_slack) == 0) {
//uint64_t pop_time = __getcycles() - system_start_timestamp;
//mem_log("time %lu remove from GQ, request id:%d name %s remaining slack %lu\n", pop_time,
// request->id, request->module->name, request->remaining_slack);
assert(request != NULL); assert(request != NULL);
struct sandbox *global = sandbox_allocate(request); struct sandbox *global = sandbox_allocate(request);
if (!global) goto err_allocate; if (!global) goto err_allocate;
@ -198,17 +204,24 @@ static inline void
scheduler_preempt(ucontext_t *user_context) scheduler_preempt(ucontext_t *user_context)
{ {
assert(user_context != NULL); assert(user_context != NULL);
/* Process epoll to make sure that all runnable jobs are considered for execution */
worker_thread_execute_epoll_loop();
struct sandbox *current = current_sandbox_get(); struct sandbox *current = current_sandbox_get();
assert(current != NULL); assert(current != NULL);
assert(current->state == SANDBOX_RUNNING); assert(current->state == SANDBOX_RUNNING);
/* This is for better state-change bookkeeping */
uint64_t now = __getcycles();
uint64_t duration_of_last_state = now - current->last_state_change_timestamp;
current->running_duration += duration_of_last_state;
/* Process epoll to make sure that all runnable jobs are considered for execution */
worker_thread_execute_epoll_loop();
struct sandbox *next = scheduler_get_next(); struct sandbox *next = scheduler_get_next();
assert(next != NULL); assert(next != NULL);
/* This is for better state-change bookkeeping */
current->last_state_change_timestamp = __getcycles();
/* If current equals next, no switch is necessary, so resume execution */ /* If current equals next, no switch is necessary, so resume execution */
if (current == next) return; if (current == next) return;
@ -224,6 +237,7 @@ scheduler_preempt(ucontext_t *user_context)
/* Update current_sandbox to the next sandbox */ /* Update current_sandbox to the next sandbox */
assert(next->state == SANDBOX_RUNNABLE); assert(next->state == SANDBOX_RUNNABLE);
//printf("scheduler_preempt...\n");
sandbox_set_as_running(next, SANDBOX_RUNNABLE); sandbox_set_as_running(next, SANDBOX_RUNNABLE);
switch (next->ctxt.variant) { switch (next->ctxt.variant) {
@ -314,6 +328,7 @@ scheduler_switch_to(struct sandbox *next_sandbox)
} }
scheduler_log_sandbox_switch(current_sandbox, next_sandbox); scheduler_log_sandbox_switch(current_sandbox, next_sandbox);
//printf("scheduler_switch_to...\n");
sandbox_set_as_running(next_sandbox, next_sandbox->state); sandbox_set_as_running(next_sandbox, next_sandbox->state);
arch_context_switch(current_context, next_context); arch_context_switch(current_context, next_context);
} }

@ -14,7 +14,7 @@ admissions_info_initialize(struct admissions_info *self, char* module_name, int
{ {
assert(self != NULL); assert(self != NULL);
perf_window_initialize(&self->perf_window, module_name); perf_window_initialize(&self->perf_window, module_name);
if (unlikely(percentile < 50 || percentile > 99)) panic("Invalid admissions percentile"); //if (unlikely(percentile < 50 || percentile > 99)) panic("Invalid admissions percentile");
self->percentile = percentile; self->percentile = percentile;
self->control_index = PERF_WINDOW_BUFFER_SIZE * percentile / 100; self->control_index = PERF_WINDOW_BUFFER_SIZE * percentile / 100;
#ifdef ADMISSIONS_CONTROL #ifdef ADMISSIONS_CONTROL
@ -45,7 +45,7 @@ admission_info_get_percentile(struct admissions_info *self)
/* /*
* Adds an execution value to the perf window and calculates and caches and updated estimate * Adds an execution value to the perf window and calculates and caches and updated estimate
* @param self * @param self
* @param execution_duration * @param execution_duration in cycles
*/ */
void void
admissions_info_update(struct admissions_info *self, uint64_t execution_duration) admissions_info_update(struct admissions_info *self, uint64_t execution_duration)

@ -9,6 +9,8 @@
#include "module.h" #include "module.h"
#include "software_interrupt.h" #include "software_interrupt.h"
extern uint64_t system_start_timestamp;
__thread struct sandbox *worker_thread_current_sandbox = NULL; __thread struct sandbox *worker_thread_current_sandbox = NULL;
__thread struct sandbox_context_cache local_sandbox_context_cache = { __thread struct sandbox_context_cache local_sandbox_context_cache = {
@ -109,8 +111,13 @@ current_sandbox_start(void)
current_sandbox_disable_preemption(sandbox); current_sandbox_disable_preemption(sandbox);
sandbox->completion_timestamp = __getcycles(); sandbox->completion_timestamp = __getcycles();
/* Function code execution failed, terminate the request */
if (next_module != NULL) { if (sandbox->return_value < 0) {
/* TODO: Simply goto err is not perfect because not print out the response meesage of the function code.
* Should return 400 and the err message in the http response body.
*/
goto err;
} else if (next_module != NULL) {
/* Generate a new request, copy the current sandbox's output to the next request's buffer, and put it to the global queue */ /* Generate a new request, copy the current sandbox's output to the next request's buffer, and put it to the global queue */
ssize_t output_length = sandbox->request_response_data_length - sandbox->request_length; ssize_t output_length = sandbox->request_response_data_length - sandbox->request_length;
char * pre_func_output = (char *)malloc(output_length); char * pre_func_output = (char *)malloc(output_length);
@ -121,6 +128,9 @@ current_sandbox_start(void)
memcpy(pre_func_output, sandbox->request_response_data + sandbox->request_length, output_length); memcpy(pre_func_output, sandbox->request_response_data + sandbox->request_length, output_length);
uint64_t enqueue_timestamp = __getcycles(); uint64_t enqueue_timestamp = __getcycles();
//uint64_t current_rs = enqueue_timestamp - system_start_timestamp;
//mem_log("time %lu request id:%d executing, name:%s remaining slack %lu\n", current_rs,
// sandbox->id, sandbox->module->name, sandbox->remaining_slack);
struct sandbox_request *sandbox_request = struct sandbox_request *sandbox_request =
sandbox_request_allocate(next_module, false, sandbox->request_length, sandbox_request_allocate(next_module, false, sandbox->request_length,
next_module->name, sandbox->client_socket_descriptor, next_module->name, sandbox->client_socket_descriptor,

@ -1,6 +1,8 @@
#include "global_request_scheduler.h" #include "global_request_scheduler.h"
#include "memlogging.h"
#include "panic.h" #include "panic.h"
extern uint64_t system_start_timestamp;
/* Default uninitialized implementations of the polymorphic interface */ /* Default uninitialized implementations of the polymorphic interface */
__attribute__((noreturn)) static struct sandbox_request * __attribute__((noreturn)) static struct sandbox_request *
uninitialized_add(void *arg) uninitialized_add(void *arg)
@ -45,6 +47,11 @@ global_request_scheduler_initialize(struct global_request_scheduler_config *conf
struct sandbox_request * struct sandbox_request *
global_request_scheduler_add(struct sandbox_request *sandbox_request) global_request_scheduler_add(struct sandbox_request *sandbox_request)
{ {
uint64_t now = __getcycles();
uint64_t arrive_time = now - system_start_timestamp;
//mem_log("time %lu request id:%d arrived, name:%s remaining_slack %lu now %lu, sys_start %lu \n",
// arrive_time, sandbox_request->id, sandbox_request->module->name,
// sandbox_request->remaining_slack, now, system_start_timestamp);
assert(sandbox_request != NULL); assert(sandbox_request != NULL);
return global_request_scheduler.add_fn(sandbox_request); return global_request_scheduler.add_fn(sandbox_request);
} }

@ -76,10 +76,7 @@ sandbox_request_get_priority_srsf_fn(void *element)
{ {
struct sandbox_request *sandbox_request = (struct sandbox_request *)element; struct sandbox_request *sandbox_request = (struct sandbox_request *)element;
uint64_t now = __getcycles(); uint64_t now = __getcycles();
int64_t remaining_slack = sandbox_request->remaining_slack - (now - sandbox_request->last_update_timestamp); uint64_t remaining_slack = sandbox_request->remaining_slack - (now - sandbox_request->last_update_timestamp);
if (remaining_slack < 0) {
return 0;
}
return remaining_slack; return remaining_slack;
}; };

@ -8,6 +8,7 @@
#include "listener_thread.h" #include "listener_thread.h"
#include "runtime.h" #include "runtime.h"
extern uint64_t system_start_timestamp;
/* /*
* Descriptor of the epoll instance used to monitor the socket descriptors of registered * Descriptor of the epoll instance used to monitor the socket descriptors of registered
* serverless modules. The listener cores listens for incoming client requests through this. * serverless modules. The listener cores listens for incoming client requests through this.
@ -176,13 +177,14 @@ listener_thread_main(void *dummy)
} }
/* get total estimated execution time */ /* get total estimated execution time */
uint64_t estimated_execution_time = admission_info_get_percentile(&module->admissions_info); uint64_t estimated_execution_time = admission_info_get_percentile(&module->admissions_info);
struct module * next_module = module->next_module; struct module * next_module = module;
while(next_module) { while(next_module) {
estimated_execution_time += admission_info_get_percentile(&next_module->admissions_info); estimated_execution_time += admission_info_get_percentile(&next_module->admissions_info);
next_module = next_module->next_module; next_module = next_module->next_module;
} }
uint64_t remaining_slack = module->relative_deadline - estimated_execution_time; /* Adding system start timestamp to avoid negative remaining slack in the following update. They are all cycles */
uint64_t remaining_slack = system_start_timestamp + module->relative_deadline - estimated_execution_time;
/* Allocate a Sandbox Request */ /* Allocate a Sandbox Request */
struct sandbox_request *sandbox_request = struct sandbox_request *sandbox_request =

@ -2,15 +2,24 @@
#ifdef LOG_LOCAL_RUNQUEUE #ifdef LOG_LOCAL_RUNQUEUE
#include <stdint.h> #include <stdint.h>
#endif #endif
#include "memlogging.h"
#include "local_runqueue.h" #include "local_runqueue.h"
#include "admissions_control.h"
extern __thread int worker_thread_idx;
extern uint32_t runtime_processor_speed_MHz;
extern uint64_t system_start_timestamp;
static struct local_runqueue_config local_runqueue; static struct local_runqueue_config local_runqueue;
#ifdef LOG_LOCAL_RUNQUEUE #ifdef LOG_LOCAL_RUNQUEUE
__thread uint32_t local_runqueue_count = 0; __thread uint32_t local_runqueue_count = 0;
#endif #endif
__thread uint32_t local_workload_count = 0;
__thread uint32_t local_total_workload_count = 0;
/* The sum of requests count * requests' execution time */
__thread uint64_t local_realtime_workload_us = 0;
__thread uint64_t local_total_realtime_workload_us = 0;
/* Initializes a concrete implementation of the sandbox request scheduler interface */ /* Initializes a concrete implementation of the sandbox request scheduler interface */
void void
local_runqueue_initialize(struct local_runqueue_config *config) local_runqueue_initialize(struct local_runqueue_config *config)
@ -67,3 +76,40 @@ local_runqueue_get_next()
assert(local_runqueue.get_next_fn != NULL); assert(local_runqueue.get_next_fn != NULL);
return local_runqueue.get_next_fn(); return local_runqueue.get_next_fn();
}; };
/**
* The worker thread gets a new request, add the workload counter by 1
*/
void
local_workload_add(struct sandbox *sandbox) {
assert(sandbox);
uint64_t timestamp = __getcycles() - system_start_timestamp;
local_total_workload_count++;
local_workload_count++;
uint64_t estimated_execution_time = admission_info_get_percentile(&sandbox->module->admissions_info);
local_realtime_workload_us += estimated_execution_time / runtime_processor_speed_MHz;
local_total_realtime_workload_us += estimated_execution_time / runtime_processor_speed_MHz;
mem_log("time %lu thread %d workload %u total workload %u real-time workload(us) %lu total real-time workload %lu\n",
timestamp, worker_thread_idx, local_workload_count, local_total_workload_count, local_realtime_workload_us,
local_total_realtime_workload_us);
}
/**
* One request is complete on the worker thread, and decrease the workload counter by 1
*/
void
local_workload_complete(struct sandbox *sandbox) {
assert(sandbox);
uint64_t timestamp = __getcycles() - system_start_timestamp;
local_workload_count--;
uint64_t estimated_execution_time = admission_info_get_percentile(&sandbox->module->admissions_info);
if (local_realtime_workload_us < estimated_execution_time / runtime_processor_speed_MHz) {
local_realtime_workload_us = 0;
} else {
local_realtime_workload_us -= estimated_execution_time / runtime_processor_speed_MHz;
}
mem_log("time %lu thread %d workload %u total workload %u real-time workload(us) %lu total real-time workload %lu\n",
timestamp, worker_thread_idx, local_workload_count, local_total_workload_count, local_realtime_workload_us,
local_total_realtime_workload_us);
}

@ -30,7 +30,7 @@ uint32_t runtime_first_worker_processor = 1;
uint32_t runtime_processor_speed_MHz = 0; uint32_t runtime_processor_speed_MHz = 0;
uint32_t runtime_total_online_processors = 0; uint32_t runtime_total_online_processors = 0;
uint32_t runtime_worker_threads_count = 0; uint32_t runtime_worker_threads_count = 0;
uint64_t system_start_timestamp = 0;
FILE *runtime_sandbox_perf_log = NULL; FILE *runtime_sandbox_perf_log = NULL;
@ -226,7 +226,7 @@ runtime_configure()
printf("\tSandbox Performance Log: %s\n", runtime_sandbox_perf_log_path); printf("\tSandbox Performance Log: %s\n", runtime_sandbox_perf_log_path);
runtime_sandbox_perf_log = fopen(runtime_sandbox_perf_log_path, "w"); runtime_sandbox_perf_log = fopen(runtime_sandbox_perf_log_path, "w");
if (runtime_sandbox_perf_log == NULL) { perror("sandbox perf log"); } if (runtime_sandbox_perf_log == NULL) { perror("sandbox perf log"); }
fprintf(runtime_sandbox_perf_log, "id,function,state,deadline,actual,queued,initializing,runnable," fprintf(runtime_sandbox_perf_log, "threadid,id,function,state,deadline,actual,queued,initializing,runnable,"
"running,blocked,returned,memory\n"); "running,blocked,returned,memory\n");
} else { } else {
printf("\tSandbox Performance Log: Disabled\n"); printf("\tSandbox Performance Log: Disabled\n");
@ -341,6 +341,7 @@ main(int argc, char **argv)
exit(-1); exit(-1);
} }
system_start_timestamp = __getcycles();
printf("Starting the Sledge runtime\n"); printf("Starting the Sledge runtime\n");
log_compiletime_config(); log_compiletime_config();

@ -40,6 +40,7 @@ __thread _Atomic volatile sig_atomic_t software_interrupt_signal_depth
_Atomic volatile sig_atomic_t software_interrupt_deferred_sigalrm_max[RUNTIME_WORKER_THREAD_CORE_COUNT] = { 0 }; _Atomic volatile sig_atomic_t software_interrupt_deferred_sigalrm_max[RUNTIME_WORKER_THREAD_CORE_COUNT] = { 0 };
extern pthread_t listener_thread_id;
void void
software_interrupt_deferred_sigalrm_max_print() software_interrupt_deferred_sigalrm_max_print()
{ {
@ -106,10 +107,10 @@ sigalrm_propagate_workers(siginfo_t *signal_info)
* This function broadcasts the sigint signal to all other worker threads * This function broadcasts the sigint signal to all other worker threads
*/ */
static inline void static inline void
sigint_propagate_workers(siginfo_t *signal_info) sigint_propagate_workers_listener(siginfo_t *signal_info)
{ {
/* Signal was sent directly by the kernel, so forward to other threads */ /* Signal was sent directly by the kernel user space, so forward to other threads */
if (signal_info->si_code == SI_KERNEL) { if (signal_info->si_code == SI_KERNEL || signal_info->si_code == SI_USER) {
for (int i = 0; i < runtime_worker_threads_count; i++) { for (int i = 0; i < runtime_worker_threads_count; i++) {
if (pthread_self() == runtime_worker_threads[i]) continue; if (pthread_self() == runtime_worker_threads[i]) continue;
@ -117,6 +118,10 @@ sigint_propagate_workers(siginfo_t *signal_info)
assert(runtime_worker_threads[i] != 0); assert(runtime_worker_threads[i] != 0);
pthread_kill(runtime_worker_threads[i], SIGINT); pthread_kill(runtime_worker_threads[i], SIGINT);
} }
/* send to listener thread */
if (pthread_self() != listener_thread_id) {
pthread_kill(listener_thread_id, SIGINT);
}
} else { } else {
/* Signal forwarded from another thread. Just confirm it resulted from pthread_kill */ /* Signal forwarded from another thread. Just confirm it resulted from pthread_kill */
assert(signal_info->si_code == SI_TKILL); assert(signal_info->si_code == SI_TKILL);
@ -173,7 +178,7 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
atomic_fetch_add(&software_interrupt_deferred_sigalrm, 1); atomic_fetch_add(&software_interrupt_deferred_sigalrm, 1);
} else { } else {
/* A worker thread received a SIGALRM while running a preemptable sandbox, so preempt */ /* A worker thread received a SIGALRM while running a preemptable sandbox, so preempt */
assert(current_sandbox->state == SANDBOX_RUNNING); //assert(current_sandbox->state == SANDBOX_RUNNING);
scheduler_preempt(user_context); scheduler_preempt(user_context);
} }
goto done; goto done;
@ -181,19 +186,20 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
case SIGUSR1: { case SIGUSR1: {
assert(current_sandbox); assert(current_sandbox);
assert(current_sandbox->ctxt.variant == ARCH_CONTEXT_VARIANT_SLOW); assert(current_sandbox->ctxt.variant == ARCH_CONTEXT_VARIANT_SLOW);
atomic_fetch_add(&software_interrupt_SIGUSR_count, 1); atomic_fetch_add(&software_interrupt_SIGUSR_count, 1);
#ifdef LOG_PREEMPTION #ifdef LOG_PREEMPTION
debuglog("Total SIGUSR1 Received: %d\n", software_interrupt_SIGUSR_count); debuglog("Total SIGUSR1 Received: %d\n", software_interrupt_SIGUSR_count);
debuglog("Restoring sandbox: %lu, Stack %llu\n", current_sandbox->id, debuglog("Restoring sandbox: %lu, Stack %llu\n", current_sandbox->id,
current_sandbox->ctxt.mctx.gregs[REG_RSP]); current_sandbox->ctxt.mctx.gregs[REG_RSP]);
#endif #endif
uint64_t now = __getcycles();
current_sandbox->last_state_change_timestamp = now;
arch_mcontext_restore(&user_context->uc_mcontext, &current_sandbox->ctxt); arch_mcontext_restore(&user_context->uc_mcontext, &current_sandbox->ctxt);
goto done; goto done;
} }
case SIGINT: { case SIGINT: {
/* Only the thread that receives SIGINT from the kernel will broadcast SIGINT to other worker threads */ /* Only the thread that receives SIGINT from the kernel or user space will broadcast SIGINT to other worker threads */
sigint_propagate_workers(signal_info); sigint_propagate_workers_listener(signal_info);
dump_log_to_file(); dump_log_to_file();
/* terminate itself */ /* terminate itself */
pthread_exit(0); pthread_exit(0);

@ -30,6 +30,7 @@ __thread int worker_thread_epoll_file_descriptor;
__thread int worker_thread_idx; __thread int worker_thread_idx;
extern FILE *runtime_sandbox_perf_log; extern FILE *runtime_sandbox_perf_log;
extern uint64_t system_start_timestamp;
/*********************** /***********************
* Worker Thread Logic * * Worker Thread Logic *
**********************/ **********************/
@ -81,8 +82,14 @@ worker_thread_main(void *argument)
worker_thread_execute_epoll_loop(); worker_thread_execute_epoll_loop();
/* Switch to a sandbox if one is ready to run */ /* Switch to a sandbox if one is ready to run */
next_sandbox = scheduler_get_next(); next_sandbox = scheduler_get_next();
if (next_sandbox != NULL) { scheduler_switch_to(next_sandbox); } if (next_sandbox != NULL) {
//uint64_t start_execution = __getcycles() - system_start_timestamp;
//mem_log("time %lu pop from GQ, request id:%d name %s obj=%p remaining slack %lu last_update_time %lu \n", start_execution,
// next_sandbox->id, next_sandbox->module->name, next_sandbox, next_sandbox->remaining_slack, next_sandbox->last_update_timestamp);
scheduler_switch_to(next_sandbox);
}
/* Clear the completion queue */ /* Clear the completion queue */
local_completion_queue_free(); local_completion_queue_free();

@ -11,10 +11,12 @@ declare project_path="$(
path=`pwd` path=`pwd`
echo $project_path echo $project_path
cd $project_path/runtime/bin cd $project_path/runtime/bin
export SLEDGE_DISABLE_PREEMPTION=true export SLEDGE_SCHEDULER=SRSF
#export SLEDGE_DISABLE_PREEMPTION=true
export SLEDGE_SANDBOX_PERF_LOG=$path/srsf.log
export LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" export LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH"
gdb --eval-command="handle SIGUSR1 nostop" \ gdb --eval-command="handle SIGUSR1 nostop" \
--eval-command="set pagination off" \ --eval-command="set pagination off" \
--eval-command="set substitute-path /sledge/runtime $project_path/runtime" \ --eval-command="set substitute-path /sledge/runtime $project_path/runtime" \
# --eval-command="run ../tests/test_armcifar10.json" \ --eval-command="run ../tests/test_multiple_image_processing2.json" \
./sledgert ./sledgert

@ -0,0 +1,6 @@
#!/bin/bash
pid=`ps -ef|grep "sledgert"|grep -v grep |awk '{print $2}'`
echo $pid
kill -2 $pid

@ -1,13 +1,43 @@
import sys import sys
import json import json
from collections import defaultdict from collections import defaultdict
import numpy as np
def def_value(): def def_value():
return 0 return 0
def count_miss_or_meet_deadline_requests(file_dir): def count_miss_or_meet_deadline_requests(file_dir, percentage):
#### get execution time
running_time_dict = defaultdict(def_value)
queuing_times_dict = defaultdict(def_value)
total_times_dict = defaultdict(def_value)
runnable_times_dict = defaultdict(def_value)
blocked_times_dict = defaultdict(def_value)
initializing_times_dict = defaultdict(def_value)
execution_times_dict = defaultdict(def_value)
running_times = defaultdict(list)
queuing_times = defaultdict(list)
total_times = defaultdict(list)
runnable_times = defaultdict(list)
blocked_times = defaultdict(list)
initializing_times = defaultdict(list)
execution_times = defaultdict(list)
####
request_counter = defaultdict(def_value) request_counter = defaultdict(def_value)
total_time_dist = defaultdict(list) total_time_dist = defaultdict(list)
total_workload_dist = defaultdict(def_value)
total_real_time_workload_dist = defaultdict(def_value)
real_time_workload_times_dist = defaultdict(list)
real_time_workload_workloads_dist = defaultdict(list)
real_time_workload_requests_dist = defaultdict(list)
min_time = sys.maxsize
# list[0] is meet deadline number, list[1] is miss deadline number
delays_dict = defaultdict(list)
max_latency_dist = defaultdict(def_value)
total_deadline = 0
miss_deadline_dist = defaultdict(def_value)
meet_deadline_dist = defaultdict(def_value)
meet_deadline = 0 meet_deadline = 0
miss_deadline = 0 miss_deadline = 0
max_sc = 0 max_sc = 0
@ -18,19 +48,61 @@ def count_miss_or_meet_deadline_requests(file_dir):
meet_deadline += 1 meet_deadline += 1
name = line.split(" ")[8] name = line.split(" ")[8]
request_counter[name] += 1 request_counter[name] += 1
total_time = line.split(" ")[5] total_time = int(line.split(" ")[5])
total_time_dist[name].append(total_time) total_time_dist[name].append(total_time)
if total_time > max_latency_dist[name]:
max_latency_dist[name] = total_time
meet_deadline_dist[name] += 1
if "miss deadline" in line: if "miss deadline" in line:
miss_deadline += 1 miss_deadline += 1
name = line.split(" ")[11] name = line.split(" ")[11]
total_time = line.split(" ")[8] total_time = int(line.split(" ")[8])
if total_time > max_latency_dist[name]:
max_latency_dist[name] = total_time
delay = int(line.split(" ")[4])
func = line.split(" ")[11]
delays_dict[func].append(delay)
request_counter[name] += 1 request_counter[name] += 1
total_time_dist[name].append(total_time) total_time_dist[name].append(total_time)
miss_deadline_dist[name] += 1
#print("name:", name) #print("name:", name)
if "scheduling count" in line: if "scheduling count" in line:
s_c = int(line.split(" ")[3]) s_c = int(line.split(" ")[3])
if max_sc < s_c: if max_sc < s_c:
max_sc = s_c max_sc = s_c
if "total workload" in line:
thread = line.split(" ")[3]
time = line.split(" ")[1]
if min_time > int(time):
min_time = int(time)
real_time_workload = line.split(" ")[11]
total_workload = int(line.split(" ")[8])
total_real_time_workload = int(line.split(" ")[15])
real_time_request = line.split(" ")[5]
real_time_workload_times_dist[thread].append(int(time))
real_time_workload_workloads_dist[thread].append(int(real_time_workload))
real_time_workload_requests_dist[thread].append(int(real_time_request))
if total_workload_dist[thread] < total_workload:
total_workload_dist[thread] = total_workload
if total_real_time_workload_dist[thread] < total_real_time_workload:
total_real_time_workload_dist[thread] = total_real_time_workload
### calculate the execution time
if "memory" in line or "total_time" in line or "min" in line or "miss" in line or "meet" in line or "time " in line or "scheduling count" in line:
continue
t = line.split(",")
id = t[1]
func_idx = t[2][-9]
joined_key = id + "_" + func_idx
running_time_dict[joined_key] += int(t[9])
queuing_times_dict[joined_key] += int(t[6])
total_times_dict[joined_key] += int(t[5])
runnable_times_dict[joined_key] += int(t[8])
blocked_times_dict[joined_key] += int(t[10])
initializing_times_dict[joined_key] += int(t[7])
###
miss_deadline_percentage = (miss_deadline * 100) / (miss_deadline + meet_deadline) miss_deadline_percentage = (miss_deadline * 100) / (miss_deadline + meet_deadline)
print("meet deadline num:", meet_deadline) print("meet deadline num:", meet_deadline)
print("miss deadline num:", miss_deadline) print("miss deadline num:", miss_deadline)
@ -38,17 +110,122 @@ def count_miss_or_meet_deadline_requests(file_dir):
print("miss deadline percentage:", miss_deadline_percentage) print("miss deadline percentage:", miss_deadline_percentage)
print("scheduling counter:", max_sc) print("scheduling counter:", max_sc)
func_name_dict = {
"cifar10_1": "105k",
"cifar10_2": "305k",
"cifar10_3": "5k",
"cifar10_4": "40k",
"resize": "resize",
"fibonacci": "fibonacci"
}
func_name_with_id = {
"1": "105k",
"2": "305k",
"3": "5k",
"4": "40k"
}
### get execution time
for key,value in running_time_dict.items():
func_idx = key.split("_")[1]
running_times[func_idx].append(value)
for key,value in queuing_times_dict.items():
func_idx = key.split("_")[1]
queuing_times[func_idx].append(value)
for key,value in runnable_times_dict.items():
func_idx = key.split("_")[1]
runnable_times[func_idx].append(value)
for key,value in blocked_times_dict.items():
func_idx = key.split("_")[1]
blocked_times[func_idx].append(value)
for key,value in initializing_times_dict.items():
func_idx = key.split("_")[1]
initializing_times[func_idx].append(value)
for key,value in total_times_dict.items():
func_idx = key.split("_")[1]
total_times[func_idx].append(value)
for key,value in request_counter.items(): for key,value in request_counter.items():
print(key + ":" + str(value)) print(func_name_dict[key], ":", str(value), "proportion:", (100*value)/(meet_deadline + miss_deadline))
for key,value in total_time_dist.items():
a = np.array(value)
p = np.percentile(a, int(percentage))
print(func_name_dict[key] + " " + percentage + " percentage is:" + str(p) + " mean is:" + str(np.mean(value)) + " max latency is:" + str(max_latency_dist[key]))
for key,value in meet_deadline_dist.items():
miss_value = miss_deadline_dist[key]
total_request = miss_value + value
miss_rate = (miss_value * 100) / total_request
print(func_name_dict[key] + " miss deadline rate:" + str(miss_rate) + " miss count is:" + str(miss_value) + " total request:" + str(total_request))
for key,value in real_time_workload_times_dist.items():
real_time_workload_times_dist[key] = [x - min_time for x in value]
for key,value in running_times.items():
#print("function:", key, func_name_with_id[key], key)
print("function:", key)
print(np.median(total_times[key]), np.median(running_times[key]), np.median(queuing_times[key]), np.median(runnable_times[key]), np.median(blocked_times[key]), np.median(initializing_times[key]))
for key, value in delays_dict.items():
new_value = [i/1000 for i in value]
p99 = np.percentile(new_value, 99)
print("function:", key, " delays:", p99)
total_workload = 0
with open("total_workload.txt", "w") as f:
for key,value in total_workload_dist.items():
total_workload += value
#print("thread " + key + " total workload:" + str(value))
pair = [key + " "]
pair.append(str(value))
f.writelines(pair)
f.write("\n")
print("total workload is:", total_workload)
with open("total_real_time_workload.txt", "w") as f:
for key,value in total_real_time_workload_dist.items():
#print("thread " + key + " total real time workload:" + str(value))
pair = [key + " "]
pair.append(str(value))
f.writelines(pair)
f.write("\n")
js = json.dumps(total_time_dist) js = json.dumps(total_time_dist)
f = open("total_time.txt", 'w') f = open("total_time.txt", 'w')
f.write(js) f.write(js)
f.close() f.close()
#for key,value in total_time_dist.items(): js2 = json.dumps(real_time_workload_times_dist)
# print(key + ":", value) f2 = open("real_workload_times.txt", 'w')
f2.write(js2)
f2.close()
js3 = json.dumps(real_time_workload_workloads_dist)
f3 = open("real_workload_workloads.txt", 'w')
f3.write(js3)
f3.close()
js4 = json.dumps(real_time_workload_requests_dist)
f4 = open("real_workload_requests.txt", 'w')
f4.write(js4)
f4.close()
js5 = json.dumps(running_times)
f5 = open("running_time.txt", 'w')
f5.write(js5)
f5.close()
js6 = json.dumps(total_times)
f6 = open("total_time2.txt", 'w')
f6.write(js6)
f6.close()
js7 = json.dumps(delays_dict)
f7 = open("delays.txt", 'w')
f7.write(js7)
f7.close()
for key,value in total_time_dist.items():
print(key + ": time list length is ", len(value))
if __name__ == "__main__": if __name__ == "__main__":
argv = sys.argv[1:] argv = sys.argv[1:]
if len(argv) < 1: if len(argv) < 1:
print("usage ", sys.argv[0], " file dir") print("usage ", sys.argv[0], " file dir" " percentage")
sys.exit() sys.exit()
count_miss_or_meet_deadline_requests(argv[0]) count_miss_or_meet_deadline_requests(argv[0], argv[1])

@ -26,20 +26,20 @@ def parse_file(file_dir):
fo = open(file_dir, "r+") fo = open(file_dir, "r+")
next(fo) next(fo)
for line in fo: for line in fo:
if "module " in line: #if "module " in line:
#jump two lines #jump two lines
next(fo) # next(fo)
continue # continue
if "min" in line or "miss" in line or "meet" in line: if "total_time" in line or "min" in line or "miss" in line or "meet" in line or "time " in line or "scheduling count" in line:
continue continue
t = line.split(",") t = line.split(",")
id = t[0] id = t[1]
running_time_dict[id] += int(t[8]) running_time_dict[id] += int(t[9])
queuing_times_dict[id] += int(t[5]) queuing_times_dict[id] += int(t[6])
total_times_dict[id] += int(t[4]) total_times_dict[id] += int(t[5])
runnable_times_dict[id] += int(t[7]) runnable_times_dict[id] += int(t[8])
blocked_times_dict[id] += int(t[9]) blocked_times_dict[id] += int(t[10])
initializing_times_dict[id] += int(t[6]) initializing_times_dict[id] += int(t[7])
#execution_times_dict[id] += int(t[11]) #execution_times_dict[id] += int(t[11])
for key,value in running_time_dict.items(): for key,value in running_time_dict.items():
running_times.append(value) running_times.append(value)

@ -31,16 +31,16 @@ def parse_file(file_dir):
next(fo) next(fo)
for line in fo: for line in fo:
t = line.split(",") t = line.split(",")
id = t[0] id = t[1]
ids.append(id) ids.append(id)
running_time_dict[id].append(int(t[8])) running_time_dict[id].append(int(t[9]))
queuing_times_dict[id].append(int(t[5])) queuing_times_dict[id].append(int(t[6]))
total_times_dict[id].append(int(t[4])) total_times_dict[id].append(int(t[5]))
runnable_times_dict[id].append(int(t[7])) runnable_times_dict[id].append(int(t[8]))
blocked_times_dict[id].append(int(t[9])) blocked_times_dict[id].append(int(t[10]))
initializing_times_dict[id].append(int(t[6])) initializing_times_dict[id].append(int(t[7]))
#execution_times_dict[id].append(int(t[11])) #execution_times_dict[id].append(int(t[11]))
real_total_times_dict[id].append(int(t[11])) real_total_times_dict[id].append(int(t[12]))
print(running_time_dict[0]) print(running_time_dict[0])
print("request-id,sandbox-id,completion,blocked,running,queuing,init\n") print("request-id,sandbox-id,completion,blocked,running,queuing,init\n")
list_len = len(running_time_dict[0]) list_len = len(running_time_dict[0])

@ -18,7 +18,9 @@ declare project_path="$(
echo $project_path echo $project_path
path=`pwd` path=`pwd`
#export SLEDGE_DISABLE_PREEMPTION=true #export SLEDGE_DISABLE_PREEMPTION=true
export SLEDGE_SCHEDULER=SRTF export SLEDGE_SCHEDULER=SRSF
#export SLEDGE_NWORKERS=1
#export SLEDGE_SCHEDULER=EDF
export SLEDGE_SANDBOX_PERF_LOG=$path/$output export SLEDGE_SANDBOX_PERF_LOG=$path/$output
echo $SLEDGE_SANDBOX_PERF_LOG echo $SLEDGE_SANDBOX_PERF_LOG
cd $project_path/runtime/bin cd $project_path/runtime/bin
@ -27,7 +29,9 @@ cd $project_path/runtime/bin
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_armcifar10.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_armcifar10.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_png2bmp.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_png2bmp.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_image_processing.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_image_processing.json
LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/mulitple_linear_chain.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/mulitple_linear_chain.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing.json
LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing3.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_fibonacci.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_fibonacci.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_sodresize.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_sodresize.json
#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_sodresize.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_sodresize.json

@ -0,0 +1,18 @@
#!/bin/bash
function usage {
echo "$0 [vmstat file] [pidstat file]"
exit 1
}
if [ $# != 2 ] ; then
usage
exit 1;
fi
vmstat_file=$1
pidstat_file=$2
sledge_pid=`ps -ef|grep "sledgert"|grep -v grep |awk '{print $2}'`
vmstat 1 > $vmstat_file 2>&1 &
pidstat -w 1 150 -p $sledge_pid > $pidstat_file 2>&1 &

@ -0,0 +1,6 @@
#!/bin/bash
pid=`ps -ef|grep "vmstat"|grep -v grep |awk '{print $2}'`
echo $pid
kill -2 $pid

@ -3,7 +3,7 @@
"name": "resize1", "name": "resize1",
"path": "resize_wasm.so", "path": "resize_wasm.so",
"port": 10000, "port": 10000,
"relative-deadline-us": 55474, "relative-deadline-us": 273199,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
"http-req-content-type": "image/jpeg", "http-req-content-type": "image/jpeg",
@ -17,7 +17,7 @@
"name": "png2bmp1", "name": "png2bmp1",
"path": "C-Image-Manip_wasm.so", "path": "C-Image-Manip_wasm.so",
"port": 10001, "port": 10001,
"relative-deadline-us": 55474, "relative-deadline-us": 273199,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
"http-req-content-type": "image/png", "http-req-content-type": "image/png",
@ -31,7 +31,7 @@
"name": "cifar10_1", "name": "cifar10_1",
"path": "cifar10_wasm.so", "path": "cifar10_wasm.so",
"port": 10002, "port": 10002,
"relative-deadline-us": 55474, "relative-deadline-us": 273199,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
"http-req-content-type": "image/bmp", "http-req-content-type": "image/bmp",
@ -46,7 +46,7 @@
"name": "resize2", "name": "resize2",
"path": "resize_wasm.so", "path": "resize_wasm.so",
"port": 10003, "port": 10003,
"relative-deadline-us": 133722, "relative-deadline-us": 663177,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
"http-req-content-type": "image/jpeg", "http-req-content-type": "image/jpeg",
@ -60,7 +60,7 @@
"name": "png2bmp2", "name": "png2bmp2",
"path": "C-Image-Manip_wasm.so", "path": "C-Image-Manip_wasm.so",
"port": 10004, "port": 10004,
"relative-deadline-us": 133722, "relative-deadline-us": 663177,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
"http-req-content-type": "image/png", "http-req-content-type": "image/png",
@ -74,7 +74,7 @@
"name": "cifar10_2", "name": "cifar10_2",
"path": "cifar10_wasm.so", "path": "cifar10_wasm.so",
"port": 10005, "port": 10005,
"relative-deadline-us": 133722, "relative-deadline-us": 663177,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
"http-req-content-type": "image/bmp", "http-req-content-type": "image/bmp",
@ -89,7 +89,7 @@
"name": "resize3", "name": "resize3",
"path": "resize_wasm.so", "path": "resize_wasm.so",
"port": 10006, "port": 10006,
"relative-deadline-us": 14446, "relative-deadline-us": 61882,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
"http-req-content-type": "image/jpeg", "http-req-content-type": "image/jpeg",
@ -103,7 +103,7 @@
"name": "png2bmp3", "name": "png2bmp3",
"path": "C-Image-Manip_wasm.so", "path": "C-Image-Manip_wasm.so",
"port": 10007, "port": 10007,
"relative-deadline-us": 14446, "relative-deadline-us": 61882,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
"http-req-content-type": "image/png", "http-req-content-type": "image/png",
@ -117,7 +117,7 @@
"name": "cifar10_3", "name": "cifar10_3",
"path": "cifar10_wasm.so", "path": "cifar10_wasm.so",
"port": 10008, "port": 10008,
"relative-deadline-us": 14446, "relative-deadline-us": 61882,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
"http-req-content-type": "image/bmp", "http-req-content-type": "image/bmp",
@ -132,7 +132,7 @@
"name": "resize4", "name": "resize4",
"path": "resize_wasm.so", "path": "resize_wasm.so",
"port": 10009, "port": 10009,
"relative-deadline-us": 34420, "relative-deadline-us": 173824,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
"http-req-content-type": "image/jpeg", "http-req-content-type": "image/jpeg",
@ -146,7 +146,7 @@
"name": "png2bmp4", "name": "png2bmp4",
"path": "C-Image-Manip_wasm.so", "path": "C-Image-Manip_wasm.so",
"port": 10010, "port": 10010,
"relative-deadline-us": 34420, "relative-deadline-us": 173824,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
"http-req-content-type": "image/png", "http-req-content-type": "image/png",
@ -160,7 +160,7 @@
"name": "cifar10_4", "name": "cifar10_4",
"path": "cifar10_wasm.so", "path": "cifar10_wasm.so",
"port": 10011, "port": 10011,
"relative-deadline-us": 34420, "relative-deadline-us": 173824,
"argsize": 1, "argsize": 1,
"http-req-headers": [], "http-req-headers": [],
"http-req-content-type": "image/bmp", "http-req-content-type": "image/bmp",

Loading…
Cancel
Save