mt-dbf
Emil Abbasov 1 year ago
parent 1c968ff407
commit a230ad986d

@ -157,7 +157,14 @@
"sledge_abi_symbols.h": "c",
"mutex": "c",
"lock.h": "c",
"route_latency.h": "c"
"route_latency.h": "c",
"wasm_globals.h": "c",
"*.bak": "c",
"sandbox_set_as_initialized.h": "c",
"sandbox_set_as_allocated.h": "c",
"sandbox_total.h": "c",
"system_error": "c",
"metrics_server.h": "c"
},
"files.exclude": {
"**/.git": true,

@ -35,7 +35,7 @@ libsledge.clean:
.PHONY: runtime
runtime:
make -C runtime
./rsync.sh
.PHONY: runtime.clean
runtime.clean:

@ -12,7 +12,7 @@ LDFLAGS=-shared -fPIC -Wl,--export-dynamic,--whole-archive -L../libsledge/dist/
# LDFLAGS=-flto -fvisibility=hidden
# Strips out calls to assert() and disables debuglog
CFLAGS+=-DNDEBUG
# CFLAGS+=-DNDEBUG
dist:
mkdir -p dist

@ -5,7 +5,7 @@ INCLUDES := -Iinclude/
CFLAGS := -fPIC -O3 -flto -ftls-model=initial-exec
# Strips out calls to assert() and disables debuglog
CFLAGS+=-DNDEBUG
# CFLAGS+=-DNDEBUG
# CFI Sanitizer
# CFLAGS+=-fvisibility=default -fsanitize=cfi

@ -28,6 +28,7 @@ struct sledge_abi__wasm_memory {
uint64_t capacity; /* Size backed by actual pages */
uint64_t max; /* Soft cap in bytes. Defaults to 4GB */
uint8_t *buffer; /* Backing heap allocation. Different lifetime because realloc might move this */
uint64_t id;
};
/* This structure is the runtime representation of the unique state of a module instance

@ -18,6 +18,7 @@ INLINE float
get_f32(uint32_t offset)
{
assert(sledge_abi__current_wasm_module_instance.memory.buffer != NULL);
assert(offset < sledge_abi__current_wasm_module_instance.memory.size);
return *(float *)&sledge_abi__current_wasm_module_instance.memory.buffer[offset];
}
@ -26,6 +27,7 @@ INLINE double
get_f64(uint32_t offset)
{
assert(sledge_abi__current_wasm_module_instance.memory.buffer != NULL);
assert(offset < sledge_abi__current_wasm_module_instance.memory.size);
return *(double *)&sledge_abi__current_wasm_module_instance.memory.buffer[offset];
}
@ -34,6 +36,7 @@ INLINE int8_t
get_i8(uint32_t offset)
{
assert(sledge_abi__current_wasm_module_instance.memory.buffer != NULL);
assert(offset < sledge_abi__current_wasm_module_instance.memory.size);
return *(int8_t *)&sledge_abi__current_wasm_module_instance.memory.buffer[offset];
}
@ -42,6 +45,7 @@ INLINE int16_t
get_i16(uint32_t offset)
{
assert(sledge_abi__current_wasm_module_instance.memory.buffer != NULL);
assert(offset < sledge_abi__current_wasm_module_instance.memory.size);
return *(int16_t *)&sledge_abi__current_wasm_module_instance.memory.buffer[offset];
}
@ -50,6 +54,7 @@ INLINE int32_t
get_i32(uint32_t offset)
{
assert(sledge_abi__current_wasm_module_instance.memory.buffer != NULL);
assert(offset < sledge_abi__current_wasm_module_instance.memory.size);
return *(int32_t *)&sledge_abi__current_wasm_module_instance.memory.buffer[offset];
}
@ -58,6 +63,7 @@ INLINE int64_t
get_i64(uint32_t offset)
{
assert(sledge_abi__current_wasm_module_instance.memory.buffer != NULL);
assert(offset < sledge_abi__current_wasm_module_instance.memory.size);
return *(int64_t *)&sledge_abi__current_wasm_module_instance.memory.buffer[offset];
}
@ -67,6 +73,7 @@ INLINE void
set_f32(uint32_t offset, float value)
{
assert(sledge_abi__current_wasm_module_instance.memory.buffer != NULL);
assert(offset < sledge_abi__current_wasm_module_instance.memory.size);
*(float *)&sledge_abi__current_wasm_module_instance.memory.buffer[offset] = value;
}
@ -75,6 +82,7 @@ INLINE void
set_f64(uint32_t offset, double value)
{
assert(sledge_abi__current_wasm_module_instance.memory.buffer != NULL);
assert(offset < sledge_abi__current_wasm_module_instance.memory.size);
*(double *)&sledge_abi__current_wasm_module_instance.memory.buffer[offset] = value;
}
@ -83,6 +91,7 @@ INLINE void
set_i8(uint32_t offset, int8_t value)
{
assert(sledge_abi__current_wasm_module_instance.memory.buffer != NULL);
assert(offset < sledge_abi__current_wasm_module_instance.memory.size);
*(int8_t *)&sledge_abi__current_wasm_module_instance.memory.buffer[offset] = value;
}
@ -91,6 +100,7 @@ INLINE void
set_i16(uint32_t offset, int16_t value)
{
assert(sledge_abi__current_wasm_module_instance.memory.buffer != NULL);
assert(offset < sledge_abi__current_wasm_module_instance.memory.size);
*(int16_t *)&sledge_abi__current_wasm_module_instance.memory.buffer[offset] = value;
}
@ -99,6 +109,7 @@ INLINE void
set_i32(uint32_t offset, int32_t value)
{
assert(sledge_abi__current_wasm_module_instance.memory.buffer != NULL);
assert(offset < sledge_abi__current_wasm_module_instance.memory.size);
*(int32_t *)&sledge_abi__current_wasm_module_instance.memory.buffer[offset] = value;
}
@ -107,6 +118,7 @@ INLINE void
set_i64(uint32_t offset, int64_t value)
{
assert(sledge_abi__current_wasm_module_instance.memory.buffer != NULL);
assert(offset < sledge_abi__current_wasm_module_instance.memory.size);
*(int64_t *)&sledge_abi__current_wasm_module_instance.memory.buffer[offset] = value;
}

@ -0,0 +1,27 @@
#!/bin/bash
# rsync -ru --progress --exclude={'res','out.txt','out.dat','err.dat'} ./tests/* emil@c220g2-011017.wisc.cloudlab.us:/users/emil/sledge-server/tests/
# rsync -ru --progress --exclude={'res','out.txt','out.dat','err.dat'} ./tests/* emil@c220g2-011016.wisc.cloudlab.us:/users/emil/sledge-client/tests/
# rsync -ru --progress --exclude={'thirdparty','res','err.dat','out*','*.log'} ./tests ./runtime emil@c220g2-011314.wisc.cloudlab.us:/users/emil/sledge-server/
# rsync -ru --progress --exclude={'res','err.dat','out*','*.log'} ./tests emil@c220g2-011323.wisc.cloudlab.us:/users/emil/sledge-client/
rsync -ru --progress --exclude={'thirdparty','res-*','err.dat','out*','*.log','input*'} ./tests ./runtime emil@128.105.145.72:/users/emil/sledge-server/
rsync -ru --progress --exclude={'thirdparty','res-*','err.dat','out*','*.log','mt-juan/input-cnn','mt-emil/input-cnn'} ./tests ./runtime emil@128.105.145.71:/users/emil/sledge-client/
# rsync -ru --progress --exclude={'thirdparty','res-*','err.dat','out*','*.log','mt-juan/input-cnn','mt-emil/input-cnn'} ./tests ./runtime emil@128.105.145.70:/users/emil/sledge-client/
rsync -ru --progress --exclude={'thirdparty','res-*','err.dat','out*','*.log','input*'} ./tests ./runtime emil@128.105.145.70:/users/emil/sledge-server/
# rsync -ru --progress --exclude={'thirdparty','res-*','err.dat','out*','*.log'} ./tests ./runtime emil@128.105.145.132:/users/emil/sledge-client/
# If on a network where only 443 is allowed use this (after allowing port forwarding ssh to 443 on the server):
# rsync -ru -e 'ssh -p 443' --progress --exclude={'res','out.txt','out.dat','err.dat'} ./tests/* emil@server:/users/emil/sledge-server/tests/
# rsync -ru -e 'ssh -p 443' --progress --exclude={'res','out.txt','out.dat','err.dat'} ./tests/* emil@client:/users/emil/sledge-client/tests/
# lab-dell (don't forget to provide the private key in the config file inside .ssh folder)
# rsync -ru --progress --exclude={'thirdparty','res-*','err.dat','out*','*.log'} ./tests ./runtime lab@161.253.75.227:/home/lab/sledge-emil/
# CMU (don't forget to provide the private key in the config file inside .ssh folder)
# rsync -ru --progress --exclude={'thirdparty','res','err.dat','out*','*.log'} ./tests ./runtime gwu@arena0.andrew.cmu.edu:/home/gwu/sledge/
# esma
# rsync -ru --progress --exclude={'thirdparty','res-*','err.dat','out*','*.log'} ./tests emil@161.253.75.224:/home/emil/sledge-client/

@ -14,10 +14,10 @@ CFLAGS=-std=c18 -pthread
CFLAGS+=-D_GNU_SOURCE
# Release Flags
CFLAGS+=-O3 -flto
# CFLAGS+=-O3 -flto
# Debugging Flags
# CFLAGS+=-O0 -g3
CFLAGS+=-O0 -g3
# CFI Sanitizer
# CFLAGS+=-fvisibility=default -fsanitize=cfi
@ -43,6 +43,7 @@ CFLAGS += -DEXECUTION_HISTOGRAM
# It is recommended (not mandatory) to enable this flag along with the EXECUTION_HISTOGRAM flag:
# CFLAGS += -DADMISSIONS_CONTROL
# CFLAGS += -DTRAFFIC_CONTROL
# Debugging Flags
@ -62,6 +63,7 @@ CFLAGS += -DEXECUTION_HISTOGRAM
# Various Informational Logs for Debugging
# CFLAGS += -DLOG_EXECUTION_HISTOGRAM
# CFLAGS += -DLOG_ADMISSIONS_CONTROL
# CFLAGS += -DLOG_TRAFFIC_CONTROL
# CFLAGS += -DLOG_CONTEXT_SWITCHES
# CFLAGS += -DLOG_HTTP_PARSER
# CFLAGS += -DLOG_TENANT_LOADING
@ -74,7 +76,7 @@ CFLAGS += -DEXECUTION_HISTOGRAM
# This adds an array of sandbox states to all sandbox structs and appends states at each transition
# The history trucates when the number of elements equal SANDBOX_STATE_HISTORY_CAPACITY
# CFLAGS += -DLOG_STATE_CHANGES
CFLAGS += -DLOG_STATE_CHANGES
# This dumps per module *.csv files containing the cycle a sandbox has been in RUNNING when each
# page is allocated. This helps understand the relationship to memory allocation and execution time.

@ -56,7 +56,9 @@ arch_context_restore_fast(mcontext_t *active_context, struct arch_context *sandb
assert(sandbox_context != NULL);
/* Assumption: Base Context is only ever used by arch_context_switch */
assert(sandbox_context != &worker_thread_base_context);
// assert(sandbox_context != &worker_thread_base_context);
/* Assumption: Not switching to the same context */
assert(active_context != &sandbox_context->mctx);
assert(sandbox_context->regs[UREG_SP]);
assert(sandbox_context->regs[UREG_IP]);

@ -10,6 +10,10 @@
extern thread_local struct sandbox *worker_thread_current_sandbox;
void current_sandbox_start(void);
void current_sandbox_exit(void);
void interrupted_sandbox_exit(void);
int sandbox_validate_self_lifetime(struct sandbox *);
void sandbox_kill_self(struct sandbox *);
/**
* Getter for the current sandbox executing on this thread
@ -51,16 +55,27 @@ current_sandbox_set(struct sandbox *sandbox)
/* This is because the event core does not maintain core-assigned deadline */
if (!listener_thread_is_running()) runtime_worker_threads_deadline[worker_thread_idx] = UINT64_MAX;
} else {
assert(sandbox->state == SANDBOX_RUNNABLE || sandbox->state == SANDBOX_PREEMPTED);
// if(sandbox->state == SANDBOX_PREEMPTED && sandbox->original_owner_worker_idx != worker_thread_idx) printf("SAND_id: %lu, WASM_id: %lu, wrk: %d\n", sandbox->id, sledge_abi__current_wasm_module_instance.abi.memory.id, worker_thread_idx);
// else
// printf("Else SAND_id: %lu, WASM_id: %lu, wrk: %d, orig_wrk %d\n", sandbox->id, sledge_abi__current_wasm_module_instance.abi.memory.id, worker_thread_idx, sandbox->original_owner_worker_idx);
sledge_abi__current_wasm_module_instance.wasi_context = sandbox->wasi_context;
assert(sandbox->memory->abi.capacity > 0);
assert(sandbox->memory->abi.size > 0);
assert(sandbox->memory->abi.max > 0);
memcpy(&sledge_abi__current_wasm_module_instance.abi.memory, &sandbox->memory->abi,
sizeof(struct sledge_abi__wasm_memory));
assert(sledge_abi__current_wasm_module_instance.abi.memory.size == sandbox->memory->abi.size);
assert(sledge_abi__current_wasm_module_instance.abi.memory.id == sandbox->id);
sledge_abi__current_wasm_module_instance.abi.table = sandbox->module->indirect_table;
wasm_globals_update_if_used(&sandbox->globals, 0,
&sledge_abi__current_wasm_module_instance.abi.wasmg_0);
worker_thread_current_sandbox = sandbox;
if (!listener_thread_is_running())
runtime_worker_threads_deadline[worker_thread_idx] = sandbox->absolute_deadline;
}
barrier();
}
extern void current_sandbox_sleep();

@ -0,0 +1,75 @@
#pragma once
#include <stdlib.h>
#include "tenant.h"
#include "message.h"
#define DBF_USE_LINKEDLIST
// static const bool USING_AGGREGATED_GLOBAL_DBF = true;
/* Returns pointer back if successful, null otherwise */
// extern void *global_dbf;
extern void **global_virt_worker_dbfs;
extern void *global_worker_dbf;
struct demand_node {
struct ps_list list;
uint64_t abs_deadline;
uint64_t demand;
// uint64_t demand_sum;
// struct sandbox_metadata *sandbox_meta;
struct tenant *tenant;
};
typedef enum dbf_update_mode
{
DBF_CHECK_AND_ADD_DEMAND, /* normal mode for adding new sandbox demands */
DBF_FORCE_ADD_NEW_SANDBOX_DEMAND, /* work-conservation mode*/
DBF_FORCE_ADD_MANUAL_DEMAND, /* work-conservation mode*/
DBF_REDUCE_EXISTING_DEMAND, /* normal mode for reducing existing sandbox demands */
// DBF_CHECK_EXISTING_SANDBOX_EXTRA_DEMAND, /* special case when a sandbox goes over its expected exec */
DBF_DELETE_EXISTING_DEMAND /* normal mode for removing existing sandbox demand */
} dbf_update_mode_t;
typedef int (*dbf_get_worker_idx_fn_t)(void *);
typedef uint64_t (*dbf_get_time_of_oversupply_fn_t)(void *);
typedef void (*dbf_print_fn_t)(void *, uint64_t);
typedef bool (*dbf_try_update_demand_fn_t)(void *, uint64_t, uint64_t, uint64_t, uint64_t, dbf_update_mode_t, void *, struct sandbox_metadata *sandbox_meta);
typedef uint64_t (*dbf_get_demand_overgone_its_supply_at_fn_t)(void *, uint64_t, uint64_t, uint64_t);
typedef void (*dbf_free_fn_t)(void *);
struct dbf_config {
dbf_get_worker_idx_fn_t get_worker_idx_fn;
// dbf_get_max_relative_dl_fn_t get_max_relative_dl_fn;
dbf_get_time_of_oversupply_fn_t get_time_of_oversupply_fn;
dbf_print_fn_t print_fn;
// dbf_grow_fn_t grow_fn;
dbf_try_update_demand_fn_t try_update_demand_fn;
dbf_get_demand_overgone_its_supply_at_fn_t get_demand_overgone_its_supply_at_fn;
dbf_free_fn_t free_fn;
};
int dbf_get_worker_idx(void *);
// uint64_t dbf_get_max_relative_dl(void *);
uint64_t dbf_get_time_of_oversupply(void *);
void dbf_print(void *, uint64_t);
// void *dbf_grow(void *, uint64_t);
bool dbf_try_update_demand(void *, uint64_t, uint64_t, uint64_t, uint64_t, dbf_update_mode_t, void *, struct sandbox_metadata *sandbox_meta);
uint64_t dbf_get_demand_overgone_its_supply_at(void *, uint64_t, uint64_t, uint64_t);
void dbf_free(void *);
void dbf_plug_functions(struct dbf_config *config);
void *dbf_list_initialize(uint32_t, uint8_t, int, struct tenant *);
void *dbf_array_initialize(uint32_t, uint8_t, int, struct tenant *);
void *dbf_initialize(uint32_t num_of_workers, uint8_t reservation_percentile, int worker_idx, struct tenant *tenant);
bool
dbf_list_try_add_new_demand(void *dbf_raw, uint64_t start_time, uint64_t abs_deadline, uint64_t adjustment, struct sandbox_metadata *sm);
void
dbf_list_force_add_extra_slack(void *dbf_raw, struct sandbox_metadata *sm, uint64_t adjustment);
void
dbf_list_reduce_demand(struct sandbox_metadata *sm, uint64_t adjustment, bool delete_node);

@ -3,6 +3,7 @@
#include <stdint.h>
#include "sandbox_types.h"
#include "sandbox_functions.h"
/* Returns pointer back if successful, null otherwise */
typedef struct sandbox *(*global_request_scheduler_add_fn_t)(struct sandbox *);
@ -18,8 +19,13 @@ struct global_request_scheduler_config {
};
void global_request_scheduler_initialize(struct global_request_scheduler_config *config);
struct sandbox *global_request_scheduler_add(struct sandbox *);
int global_request_scheduler_remove(struct sandbox **);
int global_request_scheduler_remove_if_earlier(struct sandbox **, uint64_t targed_deadline);
uint64_t global_request_scheduler_peek(void);
void global_request_scheduler_initialize(struct global_request_scheduler_config *config);
struct sandbox *global_request_scheduler_add(struct sandbox *);
int global_request_scheduler_remove(struct sandbox **);
int global_request_scheduler_remove_if_earlier(struct sandbox **, uint64_t targed_deadline);
uint64_t global_request_scheduler_peek(void);
void global_request_scheduler_update_highest_priority(const void *element);
struct sandbox_metadata global_request_scheduler_peek_metadata();
// struct sandbox_metadata global_request_scheduler_peek_metadata_must_lock(const uint64_t now, struct sandbox *);
void global_default_update_highest_priority(const void *element);
struct sandbox_metadata global_default_peek_metadata();

@ -0,0 +1,5 @@
#pragma once
#include "global_request_scheduler.h"
void global_request_scheduler_mtdbf_initialize();

@ -36,6 +36,20 @@
"Connection: close\r\n"
#define HTTP_RESPONSE_404_NOT_FOUND_LENGTH 59
#define HTTP_RESPONSE_408_REQUEST_TIMEOUT \
"HTTP/1.1 408 Request Timeout\r\n" \
"Server: SLEdge\r\n" \
"Connection: close\r\n" \
"\r\n"
#define HTTP_RESPONSE_408_REQUEST_TIMEOUT_LENGTH 67
#define HTTP_RESPONSE_409_CONFLICT \
"HTTP/1.1 409 Conflict\r\n" \
"Server: SLEdge\r\n" \
"Connection: close\r\n" \
"\r\n"
#define HTTP_RESPONSE_409_CONFLICT_LENGTH 60
#define HTTP_RESPONSE_413_PAYLOAD_TOO_LARGE \
"HTTP/1.1 413 Payload Too Large\r\n" \
"Server: SLEdge\r\n" \
@ -70,6 +84,10 @@ http_header_build(int status_code)
return HTTP_RESPONSE_400_BAD_REQUEST;
case 404:
return HTTP_RESPONSE_404_NOT_FOUND;
case 408:
return HTTP_RESPONSE_408_REQUEST_TIMEOUT;
case 409:
return HTTP_RESPONSE_409_CONFLICT;
case 413:
return HTTP_RESPONSE_413_PAYLOAD_TOO_LARGE;
case 429:
@ -91,6 +109,10 @@ http_header_len(int status_code)
return HTTP_RESPONSE_400_BAD_REQUEST_LENGTH;
case 404:
return HTTP_RESPONSE_404_NOT_FOUND_LENGTH;
case 408:
return HTTP_RESPONSE_408_REQUEST_TIMEOUT_LENGTH;
case 409:
return HTTP_RESPONSE_409_CONFLICT_LENGTH;
case 413:
return HTTP_RESPONSE_413_PAYLOAD_TOO_LARGE_LENGTH;
case 429:

@ -49,10 +49,7 @@ http_router_add_route(http_router_t *router, struct route_config *config, struct
#endif
const uint64_t expected_execution = route.relative_deadline / 2;
#ifdef ADMISSIONS_CONTROL
/* Addmissions Control setup */
route.execution_histogram.estimated_execution = expected_execution;
#endif
#ifdef EXECUTION_HISTOGRAM
/* Execution Histogram setup */

@ -5,10 +5,26 @@
#include "http_session.h"
#include "module.h"
#include "ck_ring.h"
#include "sandbox_state.h"
#include "dbf.h"
#define LISTENER_THREAD_CORE_ID 1
#define LISTENER_THREAD_CORE_ID 1
#define LISTENER_THREAD_RING_SIZE 10240 /* the acutal size becomes 255 */
extern pthread_t listener_thread_id;
struct comm_with_worker {
ck_ring_t worker_ring;
struct message worker_ring_buffer[LISTENER_THREAD_RING_SIZE];
int worker_idx;
}; // __attribute__((aligned(CACHE_PAD))); ///// TODO: this necessary?
CK_RING_PROTOTYPE(message, message)
extern pthread_t listener_thread_id;
extern int listener_thread_epoll_file_descriptor;
extern struct comm_with_worker *comm_from_workers;
extern struct comm_with_worker *comm_from_workers_extra;
extern struct comm_with_worker *comm_to_workers;
void listener_thread_initialize(void);
noreturn void *listener_thread_main(void *dummy);
@ -23,3 +39,14 @@ listener_thread_is_running()
{
return pthread_self() == listener_thread_id;
}
static inline void
comm_with_workers_init(struct comm_with_worker *comm_with_workers)
{
assert(comm_with_workers);
for (int worker_idx = 0; worker_idx < runtime_worker_threads_count; worker_idx++) {
ck_ring_init(&comm_with_workers[worker_idx].worker_ring, LISTENER_THREAD_RING_SIZE);
comm_with_workers[worker_idx].worker_idx = worker_idx;
}
}

@ -17,8 +17,10 @@ struct local_runqueue_config {
local_runqueue_get_next_fn_t get_next_fn;
};
void local_runqueue_add(struct sandbox *);
void local_runqueue_delete(struct sandbox *);
bool local_runqueue_is_empty();
struct sandbox *local_runqueue_get_next();
void local_runqueue_initialize(struct local_runqueue_config *config);
void local_runqueue_add(struct sandbox *);
void local_runqueue_delete(struct sandbox *);
bool local_runqueue_is_empty();
struct sandbox *local_runqueue_get_next();
void local_runqueue_initialize(struct local_runqueue_config *config);
// void local_runqueue_update_highest_priority(const void *element);
// struct sandbox_metadata local_runqueue_peek_metadata();

@ -0,0 +1,7 @@
#pragma once
#include "module.h"
void local_runqueue_mtdbf_initialize();
size_t queue_length();

@ -0,0 +1,33 @@
#ifndef MESSAGE_H
#define MESSAGE_H
#include "runtime.h"
typedef enum
{
MESSAGE_CFW_PULLED_NEW_SANDBOX,
MESSAGE_CFW_REDUCE_DEMAND,
MESSAGE_CFW_DELETE_SANDBOX, /* normal mode for deleting new sandbox demands */
MESSAGE_CFW_EXTRA_DEMAND_REQUEST,
MESSAGE_CFW_WRITEBACK_PREEMPTION,
MESSAGE_CFW_WRITEBACK_OVERSHOOT,
MESSAGE_CTW_SHED_CURRENT_JOB
} message_type_t;
struct message {
uint64_t sandbox_id;
uint64_t adjustment;
uint64_t total_running_duration;
uint64_t remaining_exec;
uint64_t timestamp;
struct sandbox *sandbox;
struct sandbox_metadata *sandbox_meta;
message_type_t message_type;
int sender_worker_idx;
uint8_t state;
bool exceeded_estimation;
}; // PAGE_ALIGNED;
#endif /* MESSAGE_H */

@ -1,5 +1,10 @@
#pragma once
// #include <string.h>
// #include <sys/socket.h>
// #include <sys/types.h>
// #include <netdb.h>
#include "current_wasm_module_instance.h"
#include "pool.h"
#include "sledge_abi_symbols.h"
@ -113,9 +118,11 @@ module_initialize_pools(struct module *module)
{
/* Create only a single pool for the preprocessing module, since it is executed only by the event core. */
const int n = module->type == APP_MODULE ? runtime_worker_threads_count : 1;
for (int i = 0; i < n; i++) {
wasm_memory_pool_init(&module->pools[i].memory, false);
wasm_stack_pool_init(&module->pools[i].stack, false);
for (int i = 0; i < 1; i++) {
// wasm_memory_pool_init(&module->pools[i].memory, false);
// wasm_stack_pool_init(&module->pools[i].stack, false);
wasm_memory_pool_init(&module->pools[i].memory, true);
wasm_stack_pool_init(&module->pools[i].stack, true);
}
}
@ -123,7 +130,7 @@ static inline void
module_deinitialize_pools(struct module *module)
{
const int n = module->type == APP_MODULE ? runtime_worker_threads_count : 1;
for (int i = 0; i < n; i++) {
for (int i = 0; i < 1; i++) {
wasm_memory_pool_deinit(&module->pools[i].memory);
wasm_stack_pool_deinit(&module->pools[i].stack);
}
@ -167,7 +174,8 @@ module_allocate_stack(struct module *module)
{
assert(module != NULL);
struct wasm_stack *stack = wasm_stack_pool_remove_nolock(&module->pools[worker_thread_idx].stack);
// struct wasm_stack *stack = wasm_stack_pool_remove_nolock(&module->pools[worker_thread_idx].stack);
struct wasm_stack *stack = wasm_stack_pool_remove(&module->pools[0].stack);
if (stack == NULL) {
stack = wasm_stack_alloc(module->stack_size);
@ -178,10 +186,17 @@ module_allocate_stack(struct module *module)
}
static inline void
module_free_stack(struct module *module, struct wasm_stack *stack)
module_free_stack(struct module *module, struct wasm_stack *stack, int orig_wrk_idx)
{
assert(orig_wrk_idx >= 0);
// if (module->pools[worker_thread_idx].stack.size == RUNTIME_WORKER_POOL_SIZE) {
// assert(0);
// wasm_stack_free(stack);
// return;
// }
wasm_stack_reinit(stack);
wasm_stack_pool_add_nolock(&module->pools[worker_thread_idx].stack, stack);
// wasm_stack_pool_add_nolock(&module->pools[worker_thread_idx].stack, stack);
wasm_stack_pool_add(&module->pools[0].stack, stack);
}
static inline struct wasm_memory *
@ -198,7 +213,8 @@ module_allocate_linear_memory(struct module *module)
assert(starting_bytes <= (uint64_t)UINT32_MAX + 1);
assert(max_bytes <= (uint64_t)UINT32_MAX + 1);
struct wasm_memory *linear_memory = wasm_memory_pool_remove_nolock(&module->pools[worker_thread_idx].memory);
// struct wasm_memory *linear_memory = wasm_memory_pool_remove_nolock(&module->pools[worker_thread_idx].memory);
struct wasm_memory *linear_memory = wasm_memory_pool_remove(&module->pools[0].memory);
if (linear_memory == NULL) {
linear_memory = wasm_memory_alloc(starting_bytes, max_bytes);
if (unlikely(linear_memory == NULL)) return NULL;
@ -207,9 +223,26 @@ module_allocate_linear_memory(struct module *module)
return linear_memory;
}
thread_local static uint16_t max_size = 0;
static inline void
module_free_linear_memory(struct module *module, struct wasm_memory *memory)
module_free_linear_memory(struct module *module, struct wasm_memory *memory, int orig_wrk_idx)
{
assert(orig_wrk_idx >= 0);
// if (module->pools[worker_thread_idx].memory.size == RUNTIME_WORKER_POOL_SIZE) {
// assert(0);
// wasm_memory_free(memory);
// return;
// }
wasm_memory_reinit(memory, module->abi.starting_pages * WASM_PAGE_SIZE);
wasm_memory_pool_add_nolock(&module->pools[worker_thread_idx].memory, memory);
// wasm_memory_pool_add_nolock(&module->pools[worker_thread_idx].memory, memory);
wasm_memory_pool_add(&module->pools[0].memory, memory);
// if (17 < module->pools[0].memory.size) {
// // max_size = module->pools[0].memory.size;
// printf("Module %s - [Worker]=Sizes\n", module->path);
// for (int i=0; i< 1; i++) {
// printf("%u ", module->pools[i].memory.size);
// }
// printf("\n\n");
// }
}

@ -12,6 +12,7 @@
bool use_lock; \
lock_t lock; \
struct ps_list_head list; \
uint16_t size; \
}; \
\
static inline bool STRUCT_NAME##_pool_is_empty(struct STRUCT_NAME##_pool *self) \
@ -26,6 +27,7 @@
ps_list_head_init(&self->list); \
self->use_lock = use_lock; \
if (use_lock) lock_init(&self->lock); \
self->size = 0; \
} \
\
static inline void STRUCT_NAME##_pool_deinit(struct STRUCT_NAME##_pool *self) \
@ -52,6 +54,8 @@
obj = ps_list_head_first_d(&self->list, struct STRUCT_NAME); \
assert(obj); \
ps_list_rem_d(obj); \
assert(self->size > 0); \
self->size--; \
\
return obj; \
} \
@ -79,6 +83,8 @@
assert(!self->use_lock || lock_is_locked(&self->lock)); \
\
ps_list_head_add_d(&self->list, obj); \
self->size++; \
/*assert(self->size <= RUNTIME_WORKER_POOL_SIZE); */ \
} \
\
static inline void STRUCT_NAME##_pool_add(struct STRUCT_NAME##_pool *self, struct STRUCT_NAME *obj) \

@ -18,16 +18,20 @@
* @returns priority (a uint64_t)
*/
typedef uint64_t (*priority_queue_get_priority_fn_t)(void *element);
typedef void (*priority_queue_update_priority_fn_t)(const void *element);
typedef void (*priority_queue_update_idx_fn_t)(void *element, size_t idx);
/* We assume that priority is expressed in terms of a 64 bit unsigned integral */
struct priority_queue {
priority_queue_get_priority_fn_t get_priority_fn;
bool use_lock;
lock_t lock;
uint64_t highest_priority;
size_t size;
size_t capacity;
void *items[];
priority_queue_get_priority_fn_t get_priority_fn;
priority_queue_update_priority_fn_t update_priority_fn;
priority_queue_update_idx_fn_t update_idx_fn;
bool use_lock;
lock_t lock;
uint64_t highest_priority;
size_t size;
size_t capacity;
void *items[];
};
/**
@ -47,6 +51,11 @@ static inline void
priority_queue_update_highest_priority(struct priority_queue *priority_queue, const uint64_t priority)
{
priority_queue->highest_priority = priority;
if (priority_queue->update_priority_fn) {
// priority_queue->update_priority_fn((priority_queue->size > 0) ? priority_queue->items[1] : NULL);
priority_queue->update_priority_fn(priority_queue->items[1]);
}
}
/**
@ -60,6 +69,7 @@ priority_queue_append(struct priority_queue *priority_queue, void *new_item)
{
assert(priority_queue != NULL);
assert(new_item != NULL);
// assert(priority_queue->update_idx_fn != NULL);
assert(!priority_queue->use_lock || lock_is_locked(&priority_queue->lock));
int rc;
@ -67,6 +77,9 @@ priority_queue_append(struct priority_queue *priority_queue, void *new_item)
if (unlikely(priority_queue->size > priority_queue->capacity)) panic("PQ overflow");
if (unlikely(priority_queue->size == priority_queue->capacity)) goto err_enospc;
priority_queue->items[++priority_queue->size] = new_item;
if (priority_queue->update_idx_fn) {
priority_queue->update_idx_fn(priority_queue->items[priority_queue->size], priority_queue->size);
}
rc = 0;
done:
@ -116,6 +129,10 @@ priority_queue_percolate_up(struct priority_queue *priority_queue)
void *temp = priority_queue->items[i / 2];
priority_queue->items[i / 2] = priority_queue->items[i];
priority_queue->items[i] = temp;
if (priority_queue->update_idx_fn) {
priority_queue->update_idx_fn(priority_queue->items[i], i);
priority_queue->update_idx_fn(priority_queue->items[i / 2], i / 2);
}
/* If percolated to highest priority, update highest priority */
if (i / 2 == 1)
priority_queue_update_highest_priority(priority_queue, priority_queue->get_priority_fn(
@ -168,7 +185,7 @@ priority_queue_percolate_down(struct priority_queue *priority_queue, int parent_
assert(priority_queue != NULL);
assert(priority_queue->get_priority_fn != NULL);
assert(!priority_queue->use_lock || lock_is_locked(&priority_queue->lock));
assert(!listener_thread_is_running());
// assert(!listener_thread_is_running());
bool update_highest_value = parent_index == 1;
@ -183,6 +200,11 @@ priority_queue_percolate_down(struct priority_queue *priority_queue, int parent_
void *temp = priority_queue->items[smallest_child_index];
priority_queue->items[smallest_child_index] = priority_queue->items[parent_index];
priority_queue->items[parent_index] = temp;
if (priority_queue->update_idx_fn) {
priority_queue->update_idx_fn(priority_queue->items[smallest_child_index],
smallest_child_index);
priority_queue->update_idx_fn(priority_queue->items[parent_index], parent_index);
}
parent_index = smallest_child_index;
left_child_index = 2 * parent_index;
@ -216,8 +238,8 @@ priority_queue_dequeue_if_earlier_nolock(struct priority_queue *priority_queue,
assert(priority_queue != NULL);
assert(dequeued_element != NULL);
assert(priority_queue->get_priority_fn != NULL);
assert(!listener_thread_is_running());
assert(!priority_queue->use_lock || lock_is_locked(&priority_queue->lock));
// assert(!listener_thread_is_running());
int return_code;
@ -225,9 +247,13 @@ priority_queue_dequeue_if_earlier_nolock(struct priority_queue *priority_queue,
if (priority_queue_is_empty(priority_queue) || priority_queue->highest_priority >= target_deadline)
goto err_enoent;
*dequeued_element = priority_queue->items[1];
*dequeued_element = priority_queue->items[1];
if (priority_queue->update_idx_fn) { priority_queue->update_idx_fn(*dequeued_element, 0); }
priority_queue->items[1] = priority_queue->items[priority_queue->size];
priority_queue->items[priority_queue->size--] = NULL;
if (priority_queue->update_idx_fn && priority_queue->items[1]) {
priority_queue->update_idx_fn(priority_queue->items[1], 1);
}
priority_queue_percolate_down(priority_queue, 1);
return_code = 0;
@ -287,6 +313,29 @@ priority_queue_initialize(size_t capacity, bool use_lock, priority_queue_get_pri
return priority_queue;
}
/**
* Initializes some additional properties of Priority Queue Data structure. TODO: Unite with above
* @param capacity the number of elements to store in the data structure
* @param use_lock indicates that we want a concurrent data structure
* @param get_priority_fn pointer to a function that returns the priority of an element
* @return priority queue
*/
static inline struct priority_queue *
priority_queue_initialize_new(size_t capacity, bool use_lock, priority_queue_get_priority_fn_t get_priority_fn,
priority_queue_update_priority_fn_t update_priority_fn,
priority_queue_update_idx_fn_t update_idx_fn)
{
assert(update_idx_fn != NULL);
struct priority_queue *priority_queue = priority_queue_initialize(capacity, use_lock, get_priority_fn);
priority_queue->update_priority_fn = update_priority_fn;
priority_queue->update_idx_fn = update_idx_fn;
if (update_priority_fn) priority_queue->update_priority_fn(NULL);
return priority_queue;
}
/**
* Double capacity of priority queue
* Note: currently there is no equivalent call for PQs that are not thread-local and need to be locked because it is
@ -412,6 +461,10 @@ priority_queue_delete_nolock(struct priority_queue *priority_queue, void *value)
if (priority_queue->items[i] == value) {
priority_queue->items[i] = priority_queue->items[priority_queue->size];
priority_queue->items[priority_queue->size--] = NULL;
if (priority_queue->update_idx_fn) {
priority_queue->update_idx_fn(value, 0);
priority_queue->update_idx_fn(priority_queue->items[i], i);
}
priority_queue_percolate_down(priority_queue, i);
return 0;
}
@ -438,6 +491,52 @@ priority_queue_delete(struct priority_queue *priority_queue, void *value)
return rc;
}
/**
* @param priority_queue - the priority queue we want to delete from
* @param idx - the index of the value we want to delete
*/
static inline void
priority_queue_delete_by_idx_nolock(struct priority_queue *priority_queue, const void *value_to_remove,
const size_t idx)
{
assert(priority_queue != NULL);
assert(idx <= priority_queue->size);
assert(idx >= 1);
assert(priority_queue->update_idx_fn);
if (scheduler != SCHEDULER_MTDS && scheduler != SCHEDULER_MTDBF) assert(!listener_thread_is_running());
assert(!priority_queue->use_lock || lock_is_locked(&priority_queue->lock));
void *r = priority_queue->items[idx];
assert(r == value_to_remove);
priority_queue->update_idx_fn(r, 0);
if (idx == priority_queue->size) {
// priority_queue->size--;
priority_queue->items[priority_queue->size--] = NULL;
priority_queue_percolate_down(priority_queue, idx);
return;
}
priority_queue->items[idx] = priority_queue->items[priority_queue->size];
priority_queue->items[priority_queue->size--] = NULL;
priority_queue->update_idx_fn(priority_queue->items[idx], idx);
priority_queue_percolate_down(priority_queue, idx);
}
/**
* @param priority_queue - the priority queue we want to delete from
* @param idx - the index of the value we want to delete
* @returns 0 on success. -1 on not found
*/
static inline void
priority_queue_delete_by_idx(struct priority_queue *priority_queue, const void *value_to_remove, const size_t idx)
{
lock_node_t node = {};
lock_lock(&priority_queue->lock, &node);
priority_queue_delete_by_idx_nolock(priority_queue, value_to_remove, idx);
lock_unlock(&priority_queue->lock, &node);
}
/**
* @param priority_queue - the priority queue we want to add to
* @param dequeued_element a pointer to set to the dequeued element

@ -183,7 +183,13 @@ ps_list_ll_rem(struct ps_list *l)
for (iter = ps_list_head_first((head), __typeof__(*iter), lname); !ps_list_is_head((head), iter, lname); \
(iter) = ps_list_next(iter, lname))
/* Reverse iteration without mutating the list */
#define ps_list_foreach_rev(head, iter, lname) \
for (iter = ps_list_head_last((head), __typeof__(*iter), lname); !ps_list_is_head((head), iter, lname); \
(iter) = ps_list_prev(iter, lname))
#define ps_list_foreach_d(head, iter) ps_list_foreach(head, iter, PS_LIST_DEF_NAME)
#define ps_list_foreach_rev_d(head, iter) ps_list_foreach_rev(head, iter, PS_LIST_DEF_NAME)
/*
* Iteration where the current node can be ps_list_rem'ed.

@ -4,6 +4,9 @@
#include <stdint.h>
#include "execution_histogram.h"
// #include "admissions_info.h"
// #include "estimated_exec_info.h"
// #include "module.h"
#include "http_route_total.h"
#include "module.h"
#include "perf_window.h"

@ -5,6 +5,7 @@
#include <stdlib.h>
#include "admissions_control.h"
#include "traffic_control.h"
#include "runtime.h"
#include "scheduler_options.h"
@ -95,7 +96,7 @@ route_config_validate(struct route_config *config, bool *did_set)
return -1;
}
if (config->relative_deadline_us > (uint32_t)RUNTIME_RELATIVE_DEADLINE_US_MAX) {
if (config->relative_deadline_us == 0 || config->relative_deadline_us > (uint32_t)RUNTIME_RELATIVE_DEADLINE_US_MAX) {
fprintf(stderr, "Relative-deadline-us must be between 0 and %u, was %u\n",
(uint32_t)RUNTIME_RELATIVE_DEADLINE_US_MAX, config->relative_deadline_us);
return -1;

@ -24,10 +24,14 @@
#define RUNTIME_LOG_FILE "sledge.log"
#define RUNTIME_MAX_EPOLL_EVENTS 128
#define RUNTIME_MAX_TENANT_COUNT 32
#define RUNTIME_MAX_TENANT_COUNT 320
#define RUNTIME_RELATIVE_DEADLINE_US_MAX 3600000000 /* One Hour. Fits in uint32_t */
#define RUNTIME_RUNQUEUE_SIZE 256 /* Minimum guaranteed size. Might grow! */
#define RUNTIME_RUNQUEUE_SIZE 2560 /* Minimum guaranteed size. Might grow! */
#define RUNTIME_TENANT_QUEUE_SIZE 4096
#define RUNTIME_WORKER_POOL_SIZE 8 /* Set to zero if no pooling desired */
#define RUNTIME_MAX_CPU_UTIL_PERCENTILE 100
#define RUNTIME_MAX_ALIVE_SANDBOXES 1048576 // 1 << 20
enum RUNTIME_SIGALRM_HANDLER
{
@ -40,16 +44,22 @@ extern bool runtime_preemption_enabled;
extern bool runtime_worker_spinloop_pause_enabled;
extern uint32_t runtime_processor_speed_MHz;
extern uint32_t runtime_quantum_us;
extern uint64_t runtime_quantum;
extern enum RUNTIME_SIGALRM_HANDLER runtime_sigalrm_handler;
extern pthread_t *runtime_worker_threads;
extern uint32_t runtime_worker_threads_count;
extern int *runtime_worker_threads_argument;
extern uint64_t *runtime_worker_threads_deadline;
extern uint64_t runtime_boot_timestamp;
extern uint64_t runtime_max_deadline;
extern bool sandbox_refs[];
extern uint16_t extra_execution_slack_p;
extern void runtime_initialize(void);
extern void runtime_set_pthread_prio(pthread_t thread, unsigned int nice);
extern void runtime_set_resource_limits_to_max(void);
extern void runtime_cleanup();
void runtime_set_policy_and_prio();
/* External Symbols */
extern int expand_memory(void);
@ -67,3 +77,15 @@ runtime_print_sigalrm_handler(enum RUNTIME_SIGALRM_HANDLER variant)
return "TRIAGED";
}
}
static const bool USING_WORK_CONSERVATION = true;
static const bool USING_LOCAL_RUNQUEUE = false;
static const bool USING_TRY_LOCAL_EXTRA = true;
static const bool USING_WRITEBACK_FOR_PREEMPTION = !USING_LOCAL_RUNQUEUE;
static const bool USING_WRITEBACK_FOR_OVERSHOOT = false;
static const bool USING_AGGREGATED_GLOBAL_DBF = USING_LOCAL_RUNQUEUE || false;
static const bool USING_EARLIEST_START_FIRST = false;

@ -12,12 +12,14 @@
* Public API *
**************************/
struct sandbox *sandbox_alloc(struct module *module, struct http_session *session, struct route *route,
struct tenant *tenant, uint64_t admissions_estimate);
int sandbox_prepare_execution_environment(struct sandbox *sandbox);
void sandbox_free(struct sandbox *sandbox);
void sandbox_main(struct sandbox *sandbox);
void sandbox_switch_to(struct sandbox *next_sandbox);
struct sandbox *sandbox_alloc(struct module *module, struct http_session *session, uint64_t admissions_estimate,
uint64_t sandbox_alloc_timestamp);
struct sandbox_metadata *sandbox_meta_alloc(struct sandbox *sandbox);
int sandbox_prepare_execution_environment(struct sandbox *sandbox);
void sandbox_free(struct sandbox *sandbox);
void sandbox_main(struct sandbox *sandbox);
void sandbox_switch_to(struct sandbox *next_sandbox);
void sandbox_process_scheduler_updates(struct sandbox *sandbox);
/**
* Free Linear Memory, leaving stack in place
@ -28,7 +30,9 @@ sandbox_free_linear_memory(struct sandbox *sandbox)
{
assert(sandbox != NULL);
assert(sandbox->memory != NULL);
module_free_linear_memory(sandbox->module, (struct wasm_memory *)sandbox->memory);
// if (worker_thread_idx != sandbox->original_owner_worker_idx)
// printf("Me: %d, Orig: %d, Tenant: %s\n", worker_thread_idx, sandbox->original_owner_worker_idx, sandbox->tenant->name);
module_free_linear_memory(sandbox->module, (struct wasm_memory *)sandbox->memory, sandbox->original_owner_worker_idx);
sandbox->memory = NULL;
}
@ -48,13 +52,29 @@ static inline uint64_t
sandbox_get_priority(void *element)
{
struct sandbox *sandbox = (struct sandbox *)element;
if (scheduler == SCHEDULER_SJF) return sandbox->remaining_exec;
return sandbox->absolute_deadline;
}
static inline uint64_t
sandbox_get_priority_global(void *element)
{
struct sandbox *sandbox = (struct sandbox *)element;
return sandbox->absolute_deadline - sandbox->remaining_exec;
}
static inline void
sandbox_update_pq_idx_in_runqueue(void *element, size_t idx)
{
assert(element);
struct sandbox *sandbox = (struct sandbox *)element;
sandbox->pq_idx_in_runqueue = idx;
}
static inline void
sandbox_process_scheduler_updates(struct sandbox *sandbox)
local_sandbox_meta_update_pq_idx_in_tenant_queue(void *element, size_t idx)
{
if (tenant_is_paid(sandbox->tenant)) {
atomic_fetch_sub(&sandbox->tenant->remaining_budget, sandbox->last_state_duration);
}
assert(element);
struct sandbox_metadata *sandbox_meta = (struct sandbox_metadata *)element;
sandbox_meta->pq_idx_in_tenant_queue = idx;
}

@ -13,9 +13,25 @@ static inline void
sandbox_perf_log_print_header()
{
if (sandbox_perf_log == NULL) { perror("sandbox perf log"); }
fprintf(sandbox_perf_log, "id,tenant,route,state,deadline,actual,queued,uninitialized,allocated,initialized,"
"runnable,interrupted,preempted,"
"running_sys,running_user,asleep,returned,complete,error,proc_MHz,payload_size\n");
fprintf(sandbox_perf_log,
"id,tenant,route,state,deadline,actual,queued,uninitialized,allocated,initialized,"
"runnable,interrupted,preempted,"
"running_sys,running_user,asleep,returned,complete,error,proc_MHz,response_code,guarantee_type,payload_size\n");
}
/**
* Prints key performance metrics for a denied request (by AC) to perf_log
* This is defined by an environment variable
* @param module
*/
static inline void
sandbox_perf_log_print_denied_entry(struct tenant *tenant, struct route *route, uint16_t response_code)
{
/* If the log was not defined by an environment variable, early out */
if (sandbox_perf_log == NULL) return;
fprintf(sandbox_perf_log, "-1,%s,%s,Deny,%lu,0,0,0,0,0,0,0,0,0,0,0,0,0,0,%u,%u,0,0\n", tenant->name, route->route,
route->relative_deadline, runtime_processor_speed_MHz, response_code);
}
/**
@ -29,16 +45,15 @@ sandbox_perf_log_print_entry(struct sandbox *sandbox)
/* If the log was not defined by an environment variable, early out */
if (sandbox_perf_log == NULL) return;
uint64_t queued_duration = sandbox->timestamp_of.dispatched - sandbox->timestamp_of.allocation;
uint64_t queued_duration = sandbox->timestamp_of.dispatched - sandbox->timestamp_of.allocation; // TODO: Consider writeback
/*
* Assumption: A sandbox is never able to free pages. If linear memory management
* becomes more intelligent, then peak linear memory size needs to be tracked
* seperately from current linear memory size.
*/
fprintf(sandbox_perf_log,
"%lu,%s,%s,%s,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%u,%u,%d,%d\n", sandbox->id,
sandbox->tenant->name, sandbox->route->route, sandbox_state_stringify(sandbox->state),
fprintf(sandbox_perf_log, "%lu,%s,%s,%s,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%u,%u,%d,%d\n",
sandbox->id, sandbox->tenant->name, sandbox->route->route, sandbox_state_stringify(sandbox->state),
sandbox->route->relative_deadline, sandbox->total_time, queued_duration,
sandbox->duration_of_state[SANDBOX_UNINITIALIZED], sandbox->duration_of_state[SANDBOX_ALLOCATED],
sandbox->duration_of_state[SANDBOX_INITIALIZED], sandbox->duration_of_state[SANDBOX_RUNNABLE],
@ -46,7 +61,7 @@ sandbox_perf_log_print_entry(struct sandbox *sandbox)
sandbox->duration_of_state[SANDBOX_RUNNING_SYS], sandbox->duration_of_state[SANDBOX_RUNNING_USER],
sandbox->duration_of_state[SANDBOX_ASLEEP], sandbox->duration_of_state[SANDBOX_RETURNED],
sandbox->duration_of_state[SANDBOX_COMPLETE], sandbox->duration_of_state[SANDBOX_ERROR],
runtime_processor_speed_MHz, sandbox->response_code, 0, sandbox->payload_size);
runtime_processor_speed_MHz, sandbox->response_code, sandbox->global_queue_type, sandbox->payload_size);
}
static inline void

@ -20,11 +20,12 @@ sandbox_set_as_allocated(struct sandbox *sandbox)
{
assert(sandbox);
assert(sandbox->state == SANDBOX_UNINITIALIZED);
sandbox->state = SANDBOX_ALLOCATED;
uint64_t now = __getcycles();
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
sandbox->timestamp_of.allocation = now;
// sandbox->timestamp_of.allocation = now;
sandbox->timestamp_of.last_state_change = now;
sandbox_state_history_init(&sandbox->state_history);
sandbox_state_history_append(&sandbox->state_history, SANDBOX_ALLOCATED);

@ -29,15 +29,12 @@ sandbox_set_as_complete(struct sandbox *sandbox, sandbox_state_t last_state)
uint64_t now = __getcycles();
switch (last_state) {
case SANDBOX_RETURNED: {
sandbox->timestamp_of.completion = now;
case SANDBOX_RETURNED:
break;
}
default: {
default:
panic("Sandbox %lu | Illegal transition from %s to Error\n", sandbox->id,
sandbox_state_stringify(last_state));
}
}
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
@ -48,13 +45,11 @@ sandbox_set_as_complete(struct sandbox *sandbox, sandbox_state_t last_state)
sandbox_state_totals_increment(SANDBOX_COMPLETE);
sandbox_state_totals_decrement(last_state);
struct route *route = sandbox->route;
#ifdef EXECUTION_HISTOGRAM
/* Execution Histogram Post Processing */
const uint64_t execution_duration = sandbox->duration_of_state[SANDBOX_RUNNING_USER]
+ sandbox->duration_of_state[SANDBOX_RUNNING_SYS];
execution_histogram_update(&route->execution_histogram, execution_duration);
execution_histogram_update(&sandbox->route->execution_histogram, execution_duration);
#endif
#ifdef ADMISSIONS_CONTROL
@ -65,7 +60,7 @@ sandbox_set_as_complete(struct sandbox *sandbox, sandbox_state_t last_state)
/* Terminal State Logging for Sandbox */
sandbox_perf_log_print_entry(sandbox);
sandbox_summarize_page_allocations(sandbox);
route_latency_add(&route->latency, sandbox->total_time);
route_latency_add(&sandbox->route->latency, sandbox->total_time);
/* State Change Hooks */
sandbox_state_transition_from_hook(sandbox, last_state);

@ -33,40 +33,54 @@ sandbox_set_as_error(struct sandbox *sandbox, sandbox_state_t last_state)
uint64_t now = __getcycles();
switch (last_state) {
case SANDBOX_ALLOCATED:
case SANDBOX_INITIALIZED:
assert(sandbox->memory == NULL);
break;
case SANDBOX_RUNNING_SYS: {
local_runqueue_delete(sandbox);
sandbox_free_linear_memory(sandbox);
case SANDBOX_PREEMPTED:
/* Global work-shedding scenario, where we kill a job right after pullling from global queue */
if (USING_LOCAL_RUNQUEUE) {
local_runqueue_delete(sandbox);
} else {
assert(sandbox->owned_worker_idx == -2);
assert(sandbox->pq_idx_in_runqueue == 0);
}
// sandbox_free_linear_memory(sandbox);
break;
}
default: {
case SANDBOX_RUNNABLE: /* When using local queues */
case SANDBOX_RUNNING_SYS:
case SANDBOX_INTERRUPTED:
assert(sandbox->owned_worker_idx >= 0);
assert(sandbox->pq_idx_in_runqueue >= 1);
local_runqueue_delete(sandbox);
// sandbox_free_linear_memory(sandbox);
break;
default:
panic("Sandbox %lu | Illegal transition from %s to Error\n", sandbox->id,
sandbox_state_stringify(last_state));
}
}
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
sandbox->last_state_duration = now - sandbox->timestamp_of.last_state_change;
if (last_state == SANDBOX_RUNNING_SYS) {
sandbox->remaining_exec = (sandbox->remaining_exec > sandbox->last_state_duration)
? sandbox->remaining_exec - sandbox->last_state_duration
: 0;
}
if(last_state == SANDBOX_RUNNING_SYS) sandbox->last_running_state_duration += sandbox->last_state_duration;
sandbox->duration_of_state[last_state] += sandbox->last_state_duration;
sandbox->timestamp_of.last_state_change = now;
sandbox_state_history_append(&sandbox->state_history, SANDBOX_ERROR);
sandbox_state_totals_increment(SANDBOX_ERROR);
sandbox_state_totals_decrement(last_state);
sandbox->timestamp_of.completion = now;
sandbox->total_time = sandbox->timestamp_of.completion - sandbox->timestamp_of.allocation;
#ifdef ADMISSIONS_CONTROL
/* Admissions Control Post Processing */
admissions_control_subtract(sandbox->admissions_estimate);
#endif
/* Return HTTP session to listener core to be written back to client */
http_session_set_response_header(sandbox->http, 500);
const int http_status_code = (sandbox->response_code > 0) ? sandbox->response_code / 10 : 500;
http_session_set_response_header(sandbox->http, http_status_code);
sandbox->http->state = HTTP_SESSION_EXECUTION_COMPLETE;
http_session_send_response(sandbox->http, (void_star_cb)listener_thread_register_http_session);
sandbox->http = NULL;
@ -85,8 +99,8 @@ sandbox_set_as_error(struct sandbox *sandbox, sandbox_state_t last_state)
static inline void
sandbox_exit_error(struct sandbox *sandbox)
{
assert(sandbox->state == SANDBOX_RUNNING_SYS);
sandbox_set_as_error(sandbox, SANDBOX_RUNNING_SYS);
// assert(sandbox->state == SANDBOX_RUNNING_SYS);
sandbox_set_as_error(sandbox, sandbox->state);
sandbox_process_scheduler_updates(sandbox);
}

@ -24,14 +24,12 @@ sandbox_set_as_initialized(struct sandbox *sandbox, sandbox_state_t last_state)
uint64_t now = __getcycles();
switch (last_state) {
case SANDBOX_ALLOCATED: {
case SANDBOX_ALLOCATED:
break;
}
default: {
default:
panic("Sandbox %lu | Illegal transition from %s to Preempted\n", sandbox->id,
sandbox_state_stringify(last_state));
}
}
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);

@ -15,6 +15,7 @@ static inline void
sandbox_set_as_interrupted(struct sandbox *sandbox, sandbox_state_t last_state)
{
assert(sandbox);
assert(last_state == SANDBOX_RUNNING_USER);
/* WARNING: All code before this assignment is preemptable */
sandbox->state = SANDBOX_INTERRUPTED;
@ -25,10 +26,7 @@ sandbox_set_as_interrupted(struct sandbox *sandbox, sandbox_state_t last_state)
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
sandbox->last_state_duration = now - sandbox->timestamp_of.last_state_change;
assert(last_state == SANDBOX_RUNNING_USER);
sandbox->remaining_exec = (sandbox->remaining_exec > sandbox->last_state_duration)
? sandbox->remaining_exec - sandbox->last_state_duration
: 0;
sandbox->last_running_state_duration += sandbox->last_state_duration;
sandbox->duration_of_state[last_state] += sandbox->last_state_duration;
sandbox->timestamp_of.last_state_change = now;
/* We do not append SANDBOX_INTERRUPTED to the sandbox_state_history because it would quickly fill the buffer */
@ -44,8 +42,6 @@ static inline void
sandbox_interrupt(struct sandbox *sandbox)
{
sandbox_set_as_interrupted(sandbox, sandbox->state);
sandbox_process_scheduler_updates(sandbox);
}
@ -69,9 +65,9 @@ sandbox_interrupt_return(struct sandbox *sandbox, sandbox_state_t interrupted_st
sandbox_state_totals_increment(interrupted_state);
sandbox_state_totals_decrement(SANDBOX_INTERRUPTED);
if (sandbox->absolute_deadline < now) {
// printf("Interrupted Sandbox missed deadline already!\n");
}
// if (sandbox->absolute_deadline < now) {
// printf("Interrupted sandbox #%lu of %s missed deadline in worker #%d!\n", sandbox->id, sandbox->tenant->name, worker_thread_idx);
// }
barrier();
/* WARNING: Code after this assignment may be preemptable */

@ -27,14 +27,12 @@ sandbox_set_as_preempted(struct sandbox *sandbox, sandbox_state_t last_state)
uint64_t now = __getcycles();
switch (last_state) {
case SANDBOX_INTERRUPTED: {
case SANDBOX_INTERRUPTED:
break;
}
default: {
default:
panic("Sandbox %lu | Illegal transition from %s to Preempted\n", sandbox->id,
sandbox_state_stringify(last_state));
}
}
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
@ -55,4 +53,6 @@ sandbox_preempt(struct sandbox *sandbox)
{
assert(sandbox->state == SANDBOX_INTERRUPTED);
sandbox_set_as_preempted(sandbox, SANDBOX_INTERRUPTED);
// sandbox_process_scheduler_updates(sandbox);
}

@ -31,29 +31,28 @@ sandbox_set_as_returned(struct sandbox *sandbox, sandbox_state_t last_state)
uint64_t now = __getcycles();
switch (last_state) {
case SANDBOX_RUNNING_SYS: {
case SANDBOX_RUNNING_SYS:
local_runqueue_delete(sandbox);
sandbox_free_linear_memory(sandbox);
// sandbox_free_linear_memory(sandbox);
break;
}
default: {
default:
panic("Sandbox %lu | Illegal transition from %s to Returned\n", sandbox->id,
sandbox_state_stringify(last_state));
}
}
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
sandbox->last_state_duration = now - sandbox->timestamp_of.last_state_change;
sandbox->remaining_exec = (sandbox->remaining_exec > sandbox->last_state_duration)
? sandbox->remaining_exec - sandbox->last_state_duration
: 0;
sandbox->last_running_state_duration += sandbox->last_state_duration;
sandbox->duration_of_state[last_state] += sandbox->last_state_duration;
sandbox->timestamp_of.last_state_change = now;
sandbox_state_history_append(&sandbox->state_history, SANDBOX_RETURNED);
sandbox_state_totals_increment(SANDBOX_RETURNED);
sandbox_state_totals_decrement(last_state);
sandbox->timestamp_of.completion = now;
sandbox->total_time = sandbox->timestamp_of.completion - sandbox->timestamp_of.allocation;
http_session_set_response_header(sandbox->http, 200);
sandbox->http->state = HTTP_SESSION_EXECUTION_COMPLETE;
http_session_send_response(sandbox->http, (void_star_cb)listener_thread_register_http_session);

@ -29,20 +29,20 @@ sandbox_set_as_runnable(struct sandbox *sandbox, sandbox_state_t last_state)
uint64_t now = __getcycles();
switch (last_state) {
case SANDBOX_INITIALIZED: {
sandbox->timestamp_of.dispatched = now;
case SANDBOX_INITIALIZED:
if(sandbox->timestamp_of.dispatched == 0) sandbox->timestamp_of.dispatched = now;
local_runqueue_add(sandbox);
// sandbox->owned_worker_idx = worker_thread_idx;
assert(sandbox->original_owner_worker_idx == -2);
sandbox->original_owner_worker_idx = worker_thread_idx;
break;
}
case SANDBOX_ASLEEP: {
case SANDBOX_ASLEEP:
local_runqueue_add(sandbox);
break;
}
default: {
default:
panic("Sandbox %lu | Illegal transition from %s to Runnable\n", sandbox->id,
sandbox_state_stringify(last_state));
}
}
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
@ -63,5 +63,7 @@ static inline void
sandbox_wakeup(struct sandbox *sandbox)
{
assert(sandbox->state == SANDBOX_ASLEEP);
assert(0); // Now sandbox should not sleep
sandbox_set_as_runnable(sandbox, SANDBOX_ASLEEP);
}

@ -23,29 +23,22 @@ sandbox_set_as_running_sys(struct sandbox *sandbox, sandbox_state_t last_state)
uint64_t now = __getcycles();
switch (last_state) {
case SANDBOX_RUNNING_USER: {
case SANDBOX_RUNNING_USER:
assert(sandbox == current_sandbox_get());
assert(runtime_worker_threads_deadline[worker_thread_idx] == sandbox->absolute_deadline);
break;
}
case SANDBOX_RUNNABLE: {
case SANDBOX_RUNNABLE:
assert(sandbox);
break;
}
default: {
default:
panic("Sandbox %lu | Illegal transition from %s to Running Sys\n", sandbox->id,
sandbox_state_stringify(last_state));
}
}
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
sandbox->last_state_duration = now - sandbox->timestamp_of.last_state_change;
if (last_state == SANDBOX_RUNNING_USER) {
sandbox->remaining_exec = (sandbox->remaining_exec > sandbox->last_state_duration)
? sandbox->remaining_exec - sandbox->last_state_duration
: 0;
}
if(last_state == SANDBOX_RUNNING_USER) sandbox->last_running_state_duration += sandbox->last_state_duration;
sandbox->duration_of_state[last_state] += sandbox->last_state_duration;
sandbox->timestamp_of.last_state_change = now;
sandbox_state_history_append(&sandbox->state_history, SANDBOX_RUNNING_SYS);
@ -64,6 +57,4 @@ sandbox_syscall(struct sandbox *sandbox)
assert(sandbox->state == SANDBOX_RUNNING_USER);
sandbox_set_as_running_sys(sandbox, SANDBOX_RUNNING_USER);
sandbox_process_scheduler_updates(sandbox);
}

@ -19,29 +19,25 @@ sandbox_set_as_running_user(struct sandbox *sandbox, sandbox_state_t last_state)
uint64_t now = __getcycles();
switch (last_state) {
case SANDBOX_RUNNING_SYS: {
case SANDBOX_RUNNING_SYS:
assert(sandbox == current_sandbox_get());
assert(runtime_worker_threads_deadline[worker_thread_idx] == sandbox->absolute_deadline);
break;
}
case SANDBOX_PREEMPTED: {
case SANDBOX_PREEMPTED:
break;
}
default: {
default:
panic("Sandbox %lu | Illegal transition from %s to Running\n", sandbox->id,
sandbox_state_stringify(last_state));
}
}
/* State Change Bookkeeping */
assert(now > sandbox->timestamp_of.last_state_change);
sandbox->last_state_duration = now - sandbox->timestamp_of.last_state_change;
if (last_state == SANDBOX_RUNNING_SYS) {
sandbox->remaining_exec = (sandbox->remaining_exec > sandbox->last_state_duration)
? sandbox->remaining_exec - sandbox->last_state_duration
: 0;
}
if(last_state == SANDBOX_RUNNING_SYS) sandbox->last_running_state_duration += sandbox->last_state_duration;
sandbox->duration_of_state[last_state] += sandbox->last_state_duration;
sandbox->timestamp_of.last_state_change = now;
sandbox_state_history_append(&sandbox->state_history, SANDBOX_RUNNING_USER);
@ -52,8 +48,7 @@ sandbox_set_as_running_user(struct sandbox *sandbox, sandbox_state_t last_state)
sandbox_state_transition_from_hook(sandbox, last_state);
sandbox_state_transition_to_hook(sandbox, SANDBOX_RUNNING_USER);
if (last_state == SANDBOX_RUNNING_SYS)
sandbox_process_scheduler_updates(sandbox); // TODO: is this code preemptable? Ok to be?
assert(sandbox->memory->abi.size > 0);
barrier();
sandbox->state = SANDBOX_RUNNING_USER;

@ -15,6 +15,11 @@
#include "wasm_stack.h"
#include "wasm_types.h"
// #include "wasm_globals.h"
// #include "wasi.h"
// #include "listener_thread.h"
// #include "ck_ring.h"
/*********************
* Structs and Types *
********************/
@ -30,6 +35,7 @@ struct sandbox_timestamps {
#endif
};
struct sandbox_metadata;
struct sandbox {
/* used by ps_list's default name-based MACROS for the scheduling runqueue */
/* Keep as first member of sandbox struct to ensure ps_list maintains alignment */
@ -39,6 +45,14 @@ struct sandbox {
sandbox_state_t state;
struct sandbox_state_history state_history;
uint16_t response_code;
size_t pq_idx_in_runqueue;
size_t pq_idx_in_tenant_queue;
int owned_worker_idx;
int original_owner_worker_idx;
int global_queue_type;
uint8_t num_of_overshoots;
struct sandbox_metadata *sandbox_meta;
/* Accounting Info */
@ -57,13 +71,20 @@ struct sandbox {
struct wasm_memory *memory;
struct vec_wasm_global_t globals;
// uint64_t sizes[1000000];
// uint64_t sizesize;
/* Scheduling and Temporal State */
struct sandbox_timestamps timestamp_of;
uint64_t duration_of_state[SANDBOX_STATE_COUNT];
uint64_t last_state_duration;
uint64_t last_running_state_duration;
uint64_t remaining_exec;
uint64_t absolute_deadline;
bool exceeded_estimation;
bool writeback_preemption_in_progress;
bool writeback_overshoot_in_progress;
uint64_t admissions_estimate; /* estimated execution time (cycles) * runtime_admissions_granularity / relative
deadline (cycles) */
uint64_t total_time; /* Total time from Request to Response */
@ -72,5 +93,29 @@ struct sandbox {
/* System Interface State */
int32_t return_value;
wasi_context_t *wasi_context;
} PAGE_ALIGNED;
struct sandbox_metadata {
struct sandbox *sandbox_shadow;
struct tenant *tenant;
struct route *route;
struct priority_queue *tenant_queue;
uint64_t id;
uint64_t allocation_timestamp;
uint64_t absolute_deadline;
uint64_t remaining_exec;
uint64_t total_be_exec_cycles;
uint64_t extra_slack; /* cycles */
size_t pq_idx_in_tenant_queue;
int owned_worker_idx;
sandbox_state_t state;
bool exceeded_estimation;
bool terminated;
int global_queue_type;
int worker_id_virt;
uint16_t error_code;
struct job_node *trs_job_node;
struct demand_node *demand_node;
struct demand_node *local_dbf_demand_node;
}; // PAGE_ALIGNED;

@ -9,11 +9,13 @@
#include "global_request_scheduler_deque.h"
#include "global_request_scheduler_minheap.h"
#include "global_request_scheduler_mtds.h"
#include "global_request_scheduler_mtdbf.h"
#include "local_cleanup_queue.h"
#include "local_runqueue.h"
#include "local_runqueue_list.h"
#include "local_runqueue_minheap.h"
#include "local_runqueue_mtds.h"
#include "local_runqueue_mtdbf.h"
#include "panic.h"
#include "sandbox_functions.h"
#include "sandbox_set_as_interrupted.h"
@ -22,6 +24,7 @@
#include "sandbox_set_as_running_sys.h"
#include "sandbox_set_as_running_user.h"
#include "sandbox_types.h"
#include "sandbox_set_as_error.h"
#include "scheduler_options.h"
@ -66,7 +69,64 @@
static inline struct sandbox *
scheduler_mtdbf_get_next()
{
return NULL;
/* Get the deadline of the sandbox at the head of the local queue */
struct sandbox *local = local_runqueue_get_next();
uint64_t local_deadline = local == NULL ? UINT64_MAX : local->absolute_deadline;
uint64_t local_rem = local == NULL ? 0 : local->remaining_exec;
struct sandbox *global = NULL;
uint64_t now = __getcycles();
struct sandbox_metadata global_metadata = global_request_scheduler_peek_metadata();
/* Try to pull and allocate from the global queue if earlier
* This will be placed at the head of the local runqueue */
if(USING_EARLIEST_START_FIRST) {
if (global_metadata.absolute_deadline - global_metadata.remaining_exec >= local_deadline - local_rem) goto done;
} else {
if (global_metadata.absolute_deadline >= local_deadline) goto done;
}
if (global_request_scheduler_remove_if_earlier(&global, local_deadline) == 0) {
assert(global != NULL);
// assert(global->absolute_deadline < local_deadline);
if (sandbox_validate_self_lifetime(global) == 0) {
if (global->state == SANDBOX_INITIALIZED) {
sandbox_prepare_execution_environment(global);
sandbox_set_as_runnable(global, SANDBOX_INITIALIZED);
struct comm_with_worker *cfw = &comm_from_workers[worker_thread_idx];
assert(cfw);
struct message new_message = {
.sandbox = global,
.sandbox_id = global->id,
.sandbox_meta = global->sandbox_meta,
.state = global->state,
.sender_worker_idx = worker_thread_idx,
.exceeded_estimation = global->exceeded_estimation,
.message_type = MESSAGE_CFW_PULLED_NEW_SANDBOX,
.timestamp = now
};
if (!ck_ring_enqueue_spsc_message(&cfw->worker_ring, cfw->worker_ring_buffer, &new_message)) {
panic("Ring The buffer was full and the enqueue operation has failed.!");
}
} else {
assert(global->state == SANDBOX_PREEMPTED);
// debuglog("Resuming writeback\n");
local_runqueue_add(global);
// global->owned_worker_idx = worker_thread_idx;
}
assert(global->state == SANDBOX_RUNNABLE || global->state == SANDBOX_PREEMPTED);
// printf("Worker %i accepted a sandbox #%lu!\n", worker_thread_idx, global->id);
}
}
done:
/* Return what is at the head of the local runqueue or NULL if empty */
return local_runqueue_get_next();
}
static inline struct sandbox *
@ -204,7 +264,7 @@ scheduler_initialize()
{
switch (scheduler) {
case SCHEDULER_MTDBF:
/* TODO: loading */
global_request_scheduler_mtdbf_initialize();
break;
case SCHEDULER_MTDS:
global_request_scheduler_mtds_initialize();
@ -226,7 +286,7 @@ scheduler_runqueue_initialize()
{
switch (scheduler) {
case SCHEDULER_MTDBF:
// local_runqueue_mtdbf_initialize();
local_runqueue_mtdbf_initialize();
break;
case SCHEDULER_MTDS:
local_runqueue_mtds_initialize();
@ -284,12 +344,20 @@ scheduler_log_sandbox_switch(struct sandbox *current_sandbox, struct sandbox *ne
static inline void
scheduler_preemptive_switch_to(ucontext_t *interrupted_context, struct sandbox *next)
{
/* Switch to base context */
if (next == NULL) {
arch_context_restore_fast(&interrupted_context->uc_mcontext, &worker_thread_base_context);
current_sandbox_set(NULL);
return;
}
/* Switch to next sandbox */
switch (next->ctxt.variant) {
case ARCH_CONTEXT_VARIANT_FAST: {
assert(next->state == SANDBOX_RUNNABLE);
arch_context_restore_fast(&interrupted_context->uc_mcontext, &next->ctxt);
current_sandbox_set(next);
assert(sledge_abi__current_wasm_module_instance.abi.memory.id == next->id);
sandbox_set_as_running_sys(next, SANDBOX_RUNNABLE);
break;
}
@ -297,6 +365,7 @@ scheduler_preemptive_switch_to(ucontext_t *interrupted_context, struct sandbox *
assert(next->state == SANDBOX_PREEMPTED);
arch_context_restore_slow(&interrupted_context->uc_mcontext, &next->ctxt);
current_sandbox_set(next);
assert(sledge_abi__current_wasm_module_instance.abi.memory.id == next->id);
sandbox_set_as_running_user(next, SANDBOX_PREEMPTED);
break;
}
@ -307,6 +376,53 @@ scheduler_preemptive_switch_to(ucontext_t *interrupted_context, struct sandbox *
}
}
static inline int
scheduler_check_messages_from_listener()
{
int rc = 0;
assert(comm_to_workers);
struct message new_message = { 0 };
struct comm_with_worker *ctw = &comm_to_workers[worker_thread_idx];
assert(ctw);
assert(ctw->worker_idx == worker_thread_idx);
assert(ck_ring_size(&ctw->worker_ring) < LISTENER_THREAD_RING_SIZE);
while (ck_ring_dequeue_spsc_message(&ctw->worker_ring, ctw->worker_ring_buffer, &new_message)) {
assert(new_message.message_type == MESSAGE_CTW_SHED_CURRENT_JOB);
/* Check if the sandbox is still alive (not freed yet) */
if (sandbox_refs[new_message.sandbox_id % RUNTIME_MAX_ALIVE_SANDBOXES]) {
struct sandbox *sandbox_to_kill = new_message.sandbox;
assert(sandbox_to_kill);
assert(sandbox_to_kill->id == new_message.sandbox_id);
if (sandbox_to_kill->pq_idx_in_runqueue == 0 || sandbox_to_kill->owned_worker_idx != worker_thread_idx) {
/* Make sure the sandbox is in a non-terminal or asleep state (aka: still in the runqueue) */
new_message.sandbox = NULL;
new_message.sandbox_id = 0;
continue;
}
struct sandbox_metadata *sandbox_meta = sandbox_to_kill->sandbox_meta;
assert(sandbox_meta);
assert(sandbox_meta->sandbox_shadow == sandbox_to_kill);
assert(sandbox_meta->id == sandbox_to_kill->id);
assert(sandbox_meta->error_code > 0);
// printf("Worker#%d shedding sandbox #%lu\n", worker_thread_idx, sandbox_to_kill->id);
assert(sandbox_to_kill->response_code == 0);
sandbox_to_kill->response_code = sandbox_meta->error_code;
sandbox_exit_error(sandbox_to_kill);
local_cleanup_queue_add(sandbox_to_kill);
}
new_message.sandbox = NULL;
new_message.sandbox_id = 0;
}
return rc;
}
/**
* Call either at preemptions or blockings to update the scheduler-specific
* properties for the given tenant.
@ -316,15 +432,16 @@ scheduler_process_policy_specific_updates_on_interrupts(struct sandbox *interrup
{
switch (scheduler) {
case SCHEDULER_FIFO:
return;
case SCHEDULER_EDF:
case SCHEDULER_SJF:
sandbox_process_scheduler_updates(interrupted_sandbox);
return;
case SCHEDULER_MTDS:
sandbox_process_scheduler_updates(interrupted_sandbox);
local_timeout_queue_process_promotions();
return;
case SCHEDULER_MTDBF:
// scheduler_check_messages_from_listener();
scheduler_check_messages_from_listener();
if (interrupted_sandbox->state != SANDBOX_ERROR) {
sandbox_process_scheduler_updates(interrupted_sandbox);
}
@ -343,17 +460,25 @@ scheduler_preemptive_sched(ucontext_t *interrupted_context)
{
assert(interrupted_context != NULL);
/* Process epoll to make sure that all runnable jobs are considered for execution */
struct sandbox *interrupted_sandbox = current_sandbox_get();
assert(interrupted_sandbox != NULL);
assert(interrupted_sandbox->state == SANDBOX_INTERRUPTED);
// printf ("Worker #%d interrupted sandbox #%lu\n", worker_thread_idx, interrupted_sandbox->id);
scheduler_process_policy_specific_updates_on_interrupts(interrupted_sandbox);
struct sandbox *next = scheduler_get_next();
/* Assumption: the current sandbox is still there, even if the worker had to shed it from its runqueue above */
assert(interrupted_sandbox != NULL);
if (interrupted_sandbox->state == SANDBOX_ERROR) goto done;
if(!(interrupted_sandbox->state == SANDBOX_INTERRUPTED)) {
printf("sand state: %u\n", interrupted_sandbox->state);
}
assert(interrupted_sandbox->state == SANDBOX_INTERRUPTED);
/* Assumption: the current sandbox is on the runqueue, so the scheduler should always return something */
assert(next != NULL);
// assert(next != NULL); // Cannot assert, since the head of the global queue may have expired and cleaned before this
/* If current equals next, no switch is necessary, so resume execution */
if (interrupted_sandbox == next) {
@ -369,11 +494,48 @@ scheduler_preemptive_sched(ucontext_t *interrupted_context)
scheduler_log_sandbox_switch(interrupted_sandbox, next);
sandbox_preempt(interrupted_sandbox);
// Write back global at idx 0
wasm_globals_set_i64(&interrupted_sandbox->globals, 0, sledge_abi__current_wasm_module_instance.abi.wasmg_0,
true);
// Update global at idx 0
int rc = wasm_globals_set_i64(&interrupted_sandbox->globals, 0,
sledge_abi__current_wasm_module_instance.abi.wasmg_0, true);
assert(rc == 0);
arch_context_save_slow(&interrupted_sandbox->ctxt, &interrupted_context->uc_mcontext);
#ifdef TRAFFIC_CONTROL
if (USING_WRITEBACK_FOR_PREEMPTION || USING_WRITEBACK_FOR_OVERSHOOT) {
struct message new_message = {
.sandbox = interrupted_sandbox,
.sandbox_id = interrupted_sandbox->id,
.sandbox_meta = interrupted_sandbox->sandbox_meta,
.state = interrupted_sandbox->state,
.sender_worker_idx = worker_thread_idx,
.exceeded_estimation = interrupted_sandbox->exceeded_estimation,
.timestamp = interrupted_sandbox->timestamp_of.last_state_change,
.remaining_exec = interrupted_sandbox->remaining_exec
};
if (interrupted_sandbox->writeback_overshoot_in_progress) {
assert(USING_WRITEBACK_FOR_OVERSHOOT);
assert(interrupted_sandbox->remaining_exec == 0);
new_message.message_type = MESSAGE_CFW_WRITEBACK_OVERSHOOT;
new_message.adjustment = runtime_quantum;
}
else if (interrupted_sandbox->writeback_preemption_in_progress) {
assert(USING_WRITEBACK_FOR_PREEMPTION);
assert(USING_LOCAL_RUNQUEUE == false);
new_message.message_type = MESSAGE_CFW_WRITEBACK_PREEMPTION;
new_message.adjustment = 0;
} else panic("No writeback is in progress. Cannot be here!");
struct comm_with_worker *cfw = &comm_from_workers[worker_thread_idx];
if (!ck_ring_enqueue_spsc_message(&cfw->worker_ring, cfw->worker_ring_buffer, &new_message)) {
panic("Ring The buffer was full and the enqueue operation has failed.!");
}
}
#endif
/* CAUTION! Worker MUST NOT access interrupted sandbox after this point! */
done:
scheduler_preemptive_switch_to(interrupted_context, next);
}
@ -394,12 +556,14 @@ scheduler_cooperative_switch_to(struct arch_context *current_context, struct san
case SANDBOX_RUNNABLE: {
assert(next_context->variant == ARCH_CONTEXT_VARIANT_FAST);
current_sandbox_set(next_sandbox);
assert(sledge_abi__current_wasm_module_instance.abi.memory.id == next_sandbox->id);
sandbox_set_as_running_sys(next_sandbox, SANDBOX_RUNNABLE);
break;
}
case SANDBOX_PREEMPTED: {
assert(next_context->variant == ARCH_CONTEXT_VARIANT_SLOW);
current_sandbox_set(next_sandbox);
assert(sledge_abi__current_wasm_module_instance.abi.memory.id == next_sandbox->id);
/* arch_context_switch triggers a SIGUSR1, which transitions next_sandbox to running_user */
break;
}
@ -424,6 +588,7 @@ scheduler_switch_to_base_context(struct arch_context *current_context)
static inline void
scheduler_idle_loop()
{
int spin = 0, max_spin = 0;
while (true) {
/* Assumption: only called by the "base context" */
assert(current_sandbox_get() == NULL);
@ -435,6 +600,13 @@ scheduler_idle_loop()
struct sandbox *next_sandbox = scheduler_get_next();
if (next_sandbox != NULL) {
scheduler_cooperative_switch_to(&worker_thread_base_context, next_sandbox);
spin++;
if (spin > max_spin) {
max_spin = spin;
// printf("Worker #%d max useless spins #%d!\n", worker_thread_idx, max_spin);
}
} else {
spin = 0;
}
/* Clear the cleanup queue */
@ -479,6 +651,7 @@ scheduler_cooperative_sched(bool add_to_cleanup_queue)
/* If our sandbox slept and immediately woke up, we can just return */
if (next_sandbox == exiting_sandbox) {
assert(0); // Never happens, sandboxes don't sleep anymore
sandbox_set_as_running_sys(next_sandbox, SANDBOX_RUNNABLE);
current_sandbox_set(next_sandbox);
return;
@ -487,6 +660,7 @@ scheduler_cooperative_sched(bool add_to_cleanup_queue)
scheduler_log_sandbox_switch(exiting_sandbox, next_sandbox);
// Write back global at idx 0
assert(sledge_abi__current_wasm_module_instance.abi.wasmg_0 == 0);
wasm_globals_set_i64(&exiting_sandbox->globals, 0, sledge_abi__current_wasm_module_instance.abi.wasmg_0, true);
if (add_to_cleanup_queue) local_cleanup_queue_add(exiting_sandbox);
@ -503,8 +677,11 @@ scheduler_cooperative_sched(bool add_to_cleanup_queue)
static inline bool
scheduler_worker_would_preempt(int worker_idx)
{
assert(scheduler == SCHEDULER_EDF);
// assert(scheduler == SCHEDULER_EDF);
uint64_t local_deadline = runtime_worker_threads_deadline[worker_idx];
uint64_t global_deadline = global_request_scheduler_peek();
return global_deadline < local_deadline;
/* Only send a worker SIGARLM if it has a sandbox to execute (MTDBF)
or it needs to check the global queue for a new higher priority job */
return local_deadline < UINT64_MAX || global_deadline < local_deadline;
}

@ -9,7 +9,7 @@
#include "wasm_types.h"
struct sledge_abi_symbols {
void *handle;
void *handle; //////////////// TODO: Maybe make this local below?
sledge_abi__init_globals_fn_t initialize_globals;
sledge_abi__init_mem_fn_t initialize_memory;
sledge_abi__init_tbl_fn_t initialize_tables;

@ -13,7 +13,8 @@ enum MULTI_TENANCY_CLASS
};
struct tenant_timeout {
uint64_t timeout;
uint64_t timeout;
struct tenant *tenant;
struct perworker_tenant_sandbox_queue *pwt;
};
@ -32,6 +33,21 @@ struct tenant_global_request_queue {
_Atomic volatile enum MULTI_TENANCY_CLASS mt_class;
};
struct job_node {
struct ps_list list;
uint64_t exec;
uint64_t timestamp;
struct sandbox_metadata *sandbox_meta;
};
struct tenant_reservation_server {
uint64_t max_budget_guaranteed;
uint64_t budget_guaranteed;
uint64_t budget_best;
struct ps_list_head admitted_jobs_list;
struct ps_list_head admitted_BE_jobs_list;
};
struct tenant {
enum epoll_tag tag; /* Tag must be first member */
char *name;
@ -40,13 +56,19 @@ struct tenant {
struct module_database module_db;
struct map scratch_storage;
/* Deferrable Server Attributes */
uint64_t replenishment_period; /* cycles, not changing after init */
uint64_t max_budget; /* cycles, not changing after init */
_Atomic volatile int64_t remaining_budget; /* cycles left till next replenishment, can be negative */
/* Multi-Tenancy Attributes */
uint64_t max_relative_deadline;
uint64_t replenishment_period; /* cycles, not changing after init */
uint64_t max_budget; /* cycles, not changing after init */
_Atomic volatile int64_t remaining_budget; /* cycles left till next replenishment, can be negative */
uint8_t reservation_percentile; /* percentile of the overall reservation utilisation */
struct perworker_tenant_sandbox_queue *pwt_sandboxes;
struct tenant_global_request_queue *tgrq_requests;
struct priority_queue *local_sandbox_metas, *global_sandbox_metas;
struct tenant_reservation_server trs;
uint32_t num_of_overshooted_sandboxes;
uint8_t max_overshoot_of_same_sandbox;
};

@ -14,6 +14,7 @@ enum tenant_config_member
tenant_config_member_port,
tenant_config_member_replenishment_period_us,
tenant_config_member_max_budget_us,
tenant_config_reservation_percentile,
tenant_config_member_routes,
tenant_config_member_len
};
@ -23,8 +24,10 @@ struct tenant_config {
uint16_t port;
uint32_t replenishment_period_us;
uint32_t max_budget_us;
uint8_t reservation_percentile;
struct route_config *routes;
size_t routes_len;
uint32_t max_relative_deadline_us;
};
static inline void
@ -34,6 +37,7 @@ tenant_config_deinit(struct tenant_config *config)
config->name = NULL;
config->replenishment_period_us = 0;
config->max_budget_us = 0;
config->reservation_percentile = 0;
for (int i = 0; i < config->routes_len; i++) { route_config_deinit(&config->routes[i]); }
free(config->routes);
config->routes = NULL;
@ -48,9 +52,12 @@ tenant_config_print(struct tenant_config *config)
if (scheduler == SCHEDULER_MTDS) {
printf("[Tenant] Replenishment Period (us): %u\n", config->replenishment_period_us);
printf("[Tenant] Max Budget (us): %u\n", config->max_budget_us);
} else if (scheduler == SCHEDULER_MTDBF) {
printf("[Tenant] Reservation (%%): %u\n", config->reservation_percentile);
}
printf("[Tenant] Routes Size: %zu\n", config->routes_len);
for (int i = 0; i < config->routes_len; i++) { route_config_print(&config->routes[i]); }
printf("\n");
}
static inline int
@ -93,6 +100,17 @@ tenant_config_validate(struct tenant_config *config, bool *did_set)
(uint32_t)RUNTIME_RELATIVE_DEADLINE_US_MAX, config->max_budget_us);
return -1;
}
} else if (scheduler == SCHEDULER_MTDBF) {
if (did_set[tenant_config_reservation_percentile] == false) {
fprintf(stderr, "reservation-percentile field is required\n");
return -1;
}
if (config->replenishment_period_us > 100) {
fprintf(stderr, "Reservation-percentile must be between 0 and 100, was %u\n",
config->reservation_percentile);
return -1;
}
}
if (config->routes_len == 0) {

@ -9,8 +9,9 @@
#include "route_config_parse.h"
#include "tenant_config.h"
static const char *tenant_config_json_keys[tenant_config_member_len] = {"name", "port", "replenishment-period-us",
"max-budget-us", "routes"};
static const char *tenant_config_json_keys[tenant_config_member_len] = {
"name", "port", "replenishment-period-us", "max-budget-us", "reservation-percentile", "routes"
};
static inline int
tenant_config_set_key_once(bool *did_set, enum tenant_config_member member)
@ -79,6 +80,14 @@ tenant_config_parse(struct tenant_config *config, const char *json_buf, jsmntok_
tenant_config_json_keys[tenant_config_member_max_budget_us],
&config->max_budget_us);
if (rc < 0) return -1;
} else if (strcmp(key, tenant_config_json_keys[tenant_config_reservation_percentile]) == 0) {
if (!has_valid_type(tokens[i], key, JSMN_PRIMITIVE, json_buf)) return -1;
if (tenant_config_set_key_once(did_set, tenant_config_reservation_percentile) == -1) return -1;
int rc = parse_uint8_t(tokens[i], json_buf,
tenant_config_json_keys[tenant_config_reservation_percentile],
&config->reservation_percentile);
if (rc < 0) return -1;
} else if (strcmp(key, tenant_config_json_keys[tenant_config_member_routes]) == 0) {
if (!has_valid_type(tokens[i], key, JSMN_ARRAY, json_buf)) return -1;
if (tenant_config_set_key_once(did_set, tenant_config_member_routes) == -1) return -1;
@ -92,6 +101,9 @@ tenant_config_parse(struct tenant_config *config, const char *json_buf, jsmntok_
i++;
i = route_config_parse(&(config->routes)[route_idx], json_buf, tokens, i, tokens_size);
if (i == -1) return -1;
if (config->routes[route_idx].relative_deadline_us > config->max_relative_deadline_us) {
config->max_relative_deadline_us = config->routes[route_idx].relative_deadline_us;
}
}
} else {

@ -12,17 +12,37 @@
#include "scheduler_options.h"
#include "tenant.h"
#include "tenant_config.h"
#include "priority_queue.h"
#include "sandbox_functions.h"
#include "dbf.h"
#define REPLENISHMENT_PERIOD (runtime_max_deadline)
int tenant_listen(struct tenant *tenant);
int listener_thread_register_tenant(struct tenant *tenant);
void tenant_preprocess(struct http_session *session);
int tenant_database_add(struct tenant *tenant);
struct tenant *tenant_database_find_by_name(char *name);
struct tenant *tenant_database_find_by_socket_descriptor(int socket_descriptor);
struct tenant *tenant_database_find_by_port(uint16_t port);
struct tenant *tenant_database_find_by_ptr(void *ptr);
void tenant_database_print_reservations();
void tenant_database_init_reservations();
void tenant_database_replenish_all();
struct tenant *tenant_database_find_tenant_most_oversupply(struct tenant *tenant_to_exclude, uint64_t time_of_oversupply, bool weak_shed, struct sandbox_metadata **sandbox_meta_to_remove);
typedef void (*tenant_database_foreach_cb_t)(struct tenant *, void *, void *);
void tenant_database_foreach(tenant_database_foreach_cb_t, void *, void *);
typedef void (*tenant_database_foreach_cb_t)(struct tenant *, void *);
void tenant_database_foreach(tenant_database_foreach_cb_t, void *);
static inline uint64_t
sandbox_meta_get_priority(void *element)
{
struct sandbox_metadata *sandbox_meta = (struct sandbox_metadata *)element;
return sandbox_meta->absolute_deadline;
}
static inline int
static inline void
tenant_policy_specific_init(struct tenant *tenant, struct tenant_config *config)
{
switch (scheduler) {
@ -31,21 +51,20 @@ tenant_policy_specific_init(struct tenant *tenant, struct tenant_config *config)
case SCHEDULER_EDF:
case SCHEDULER_SJF:
break;
case SCHEDULER_MTDS:
case SCHEDULER_MTDS: {
/* Deferable Server Initialization */
tenant->replenishment_period = (uint64_t)config->replenishment_period_us * runtime_processor_speed_MHz;
tenant->max_budget = (uint64_t)config->max_budget_us * runtime_processor_speed_MHz;
tenant->remaining_budget = tenant->max_budget;
tenant->pwt_sandboxes = (struct perworker_tenant_sandbox_queue *)malloc(
runtime_worker_threads_count * sizeof(struct perworker_tenant_sandbox_queue));
if (!tenant->pwt_sandboxes) {
fprintf(stderr, "Failed to allocate tenant_sandboxes array: %s\n", strerror(errno));
return -1;
};
config->replenishment_period_us = 0;
config->max_budget_us = 0;
memset(tenant->pwt_sandboxes, 0,
runtime_worker_threads_count * sizeof(struct perworker_tenant_sandbox_queue));
tenant->pwt_sandboxes = (struct perworker_tenant_sandbox_queue *)
calloc(runtime_worker_threads_count, sizeof(struct perworker_tenant_sandbox_queue));
if (!tenant->pwt_sandboxes) {
panic("Failed to allocate tenant_sandboxes array: %s\n", strerror(errno));
}
for (int i = 0; i < runtime_worker_threads_count; i++) {
tenant->pwt_sandboxes[i].sandboxes = priority_queue_initialize(RUNTIME_TENANT_QUEUE_SIZE, false,
@ -58,20 +77,34 @@ tenant_policy_specific_init(struct tenant *tenant, struct tenant_config *config)
}
/* Initialize the tenant's global request queue */
tenant->tgrq_requests = malloc(sizeof(struct tenant_global_request_queue));
tenant->tgrq_requests = calloc(1, sizeof(struct tenant_global_request_queue));
if (!tenant->tgrq_requests) {
panic("Failed to allocate tenant global request queue: %s\n", strerror(errno));
}
tenant->tgrq_requests->sandbox_requests = priority_queue_initialize(RUNTIME_TENANT_QUEUE_SIZE, false,
sandbox_get_priority);
tenant->tgrq_requests->tenant = tenant;
tenant->tgrq_requests->mt_class = (tenant->replenishment_period == 0) ? MT_DEFAULT : MT_GUARANTEED;
tenant->tgrq_requests->mt_class = (tenant_is_paid(tenant)) ? MT_GUARANTEED : MT_DEFAULT;
tenant->tgrq_requests->tenant_timeout.tenant = tenant;
tenant->tgrq_requests->tenant_timeout.pwt = NULL;
break;
}
case SCHEDULER_MTDBF:
tenant->reservation_percentile = config->reservation_percentile;
config->reservation_percentile = 0;
ps_list_head_init(&tenant->trs.admitted_jobs_list);
ps_list_head_init(&tenant->trs.admitted_BE_jobs_list);
tenant->local_sandbox_metas =
priority_queue_initialize_new(RUNTIME_RUNQUEUE_SIZE, false, sandbox_meta_get_priority, NULL,
local_sandbox_meta_update_pq_idx_in_tenant_queue);
tenant->global_sandbox_metas =
priority_queue_initialize_new(RUNTIME_RUNQUEUE_SIZE, false, sandbox_meta_get_priority, NULL,
local_sandbox_meta_update_pq_idx_in_tenant_queue);
break;
}
return 0;
}
static inline struct tenant *
@ -81,8 +114,9 @@ tenant_alloc(struct tenant_config *config)
if (existing_tenant != NULL) panic("Tenant %s is already initialized\n", existing_tenant->name);
existing_tenant = tenant_database_find_by_port(config->port);
if (existing_tenant != NULL)
if (existing_tenant != NULL) {
panic("Tenant %s is already configured with port %u\n", existing_tenant->name, config->port);
}
struct tenant *tenant = (struct tenant *)calloc(1, sizeof(struct tenant));
@ -91,12 +125,14 @@ tenant_alloc(struct tenant_config *config)
tenant->name = config->name;
config->name = NULL;
tenant->max_relative_deadline = (uint64_t)config->max_relative_deadline_us * runtime_processor_speed_MHz;
tcp_server_init(&tenant->tcp_server, config->port);
http_router_init(&tenant->router, config->routes_len);
module_database_init(&tenant->module_db);
map_init(&tenant->scratch_storage);
/* Deferrable Server init */
/* Scheduling Policy specific tenant init */
tenant_policy_specific_init(tenant, config);
for (int i = 0; i < config->routes_len; i++) {
@ -176,12 +212,145 @@ get_next_timeout_of_tenant(uint64_t replenishment_period)
+ ((now - runtime_boot_timestamp) / replenishment_period + 1) * replenishment_period;
}
#ifdef TRAFFIC_CONTROL
/**
* Start the tenant as a server listening at tenant->port
* @param tenant
* @returns 0 on success, -1 on error
*/
int tenant_listen(struct tenant *tenant);
int listener_thread_register_tenant(struct tenant *tenant);
void tenant_preprocess(struct http_session *session);
static void
tenant_print_jobs(struct tenant *tenant)
{
struct tenant_reservation_server *trs = &tenant->trs;
struct job_node *head = NULL;
int i = 0;
printf("\nTenant Guaranteed Budget: %lu/%lu\n", trs->budget_guaranteed, trs->max_budget_guaranteed);
ps_list_foreach_d(&trs->admitted_jobs_list, head)
{
printf("GR-Job #%d: Arrival:%lu\t Exec:%lu\n", i++, head->timestamp, head->exec);
}
i = 0;
printf("\nTenant Best Effort Budget: %lu/%lu\n", trs->budget_best, UINT64_MAX);
ps_list_foreach_d(&trs->admitted_BE_jobs_list, head)
{
printf("BE-Job #%d: Arrival:%lu\t Exec:%lu\n", i++, head->timestamp, head->exec);
}
}
static void
tenant_replenish(struct tenant *tenant, uint64_t now)
{
struct tenant_reservation_server *trs = &tenant->trs;
struct job_node *head = ps_list_head_first_d(&trs->admitted_jobs_list, struct job_node);
while (!ps_list_is_head_d(&trs->admitted_jobs_list, head)) {
assert(now > head->timestamp);
if (now - head->timestamp < REPLENISHMENT_PERIOD) break;
struct job_node *tmp_next = ps_list_next_d(head);
trs->budget_guaranteed += head->exec;
if (head->sandbox_meta) {
assert(head->sandbox_meta->trs_job_node);
head->sandbox_meta->trs_job_node = NULL;
}
ps_list_rem_d(head);
free(head);
head = tmp_next;
}
assert(trs->budget_guaranteed <= trs->max_budget_guaranteed);
head = ps_list_head_first_d(&trs->admitted_BE_jobs_list, struct job_node);
while (!ps_list_is_head_d(&trs->admitted_BE_jobs_list, head)) {
assert(now >= head->timestamp);
if (now - head->timestamp < REPLENISHMENT_PERIOD) break;
struct job_node *tmp_next = ps_list_next_d(head);
trs->budget_best += head->exec;
ps_list_rem_d(head);
free(head);
head = tmp_next;
}
}
static inline bool
tenant_can_admit_guaranteed(struct tenant *tenant, uint64_t now, uint64_t adjustment)
{
tenant_replenish(tenant, now);
return tenant->trs.budget_guaranteed >= adjustment;
}
static bool
tenant_try_add_job_as_guaranteed(struct tenant *tenant, uint64_t arrival_time, uint64_t adjustment, struct sandbox_metadata *sandbox_meta)
{
assert(adjustment > 0);
struct tenant_reservation_server *trs = &tenant->trs;
if (trs->budget_guaranteed < adjustment) return false;
assert(sandbox_meta->trs_job_node == NULL);
assert(sandbox_meta);
assert(sandbox_meta->terminated == false);
struct job_node *new_node = (struct job_node *)malloc(sizeof(struct job_node));
ps_list_init_d(new_node);
new_node->exec = adjustment;
new_node->timestamp = arrival_time;
new_node->sandbox_meta = sandbox_meta;
assert(ps_list_singleton_d(new_node));
struct job_node *tail = ps_list_head_last_d(&trs->admitted_jobs_list, struct job_node);
ps_list_add_d(tail, new_node);
sandbox_meta->trs_job_node = new_node;
assert(trs->budget_guaranteed >= adjustment);
trs->budget_guaranteed -= adjustment;
return true;
}
static void
tenant_force_add_job_as_best(struct tenant *tenant, uint64_t arrival_time, uint64_t adjustment)
{
assert(adjustment > 0);
struct tenant_reservation_server *trs = &tenant->trs;
struct job_node *new_node = (struct job_node *)malloc(sizeof(struct job_node));
ps_list_init_d(new_node);
new_node->exec = adjustment;
new_node->timestamp = arrival_time;
assert(ps_list_singleton_d(new_node));
struct job_node *tail = ps_list_head_last_d(&trs->admitted_BE_jobs_list, struct job_node);
ps_list_add_d(tail, new_node);
assert(trs->budget_best > adjustment);
trs->budget_best -= adjustment;
}
static void
tenant_reduce_guaranteed_job_demand(struct tenant *tenant, uint64_t adjustment, struct sandbox_metadata *sandbox_meta)
{
assert(sandbox_meta);
// assert(sandbox_meta->terminated == false); // TODO WHY FIRES???
assert(sandbox_meta->global_queue_type == 1);
assert(sandbox_meta->trs_job_node);
struct tenant_reservation_server *trs = &tenant->trs;
struct job_node *node = sandbox_meta->trs_job_node;
assert(node->sandbox_meta == sandbox_meta);
assert(node->exec >= adjustment);
node->exec -= adjustment;
trs->budget_guaranteed += adjustment;
sandbox_meta->trs_job_node->sandbox_meta = NULL;
sandbox_meta->trs_job_node = NULL;
}
#endif

@ -0,0 +1,16 @@
#pragma once
#include <stdbool.h>
#include <stdint.h>
#define TRAFFIC_CONTROL
// #define LOG_TRAFFIC_CONTROL
typedef struct tenant tenant; // TODO: Why get circular dependency here?
typedef struct sandbox_metadata sandbox_metadata;
typedef enum dbf_update_mode dbf_update_mode_t;
void traffic_control_initialize(void);
void traffic_control_log_decision(const int admissions_case_num, const bool admitted);
uint64_t traffic_control_decide(struct sandbox_metadata *sandbox_meta, uint64_t start_time, uint64_t estimated_execution, int *denial_code, int *worker_id_v);
uint64_t traffic_control_shed_work(struct tenant *tenant_to_exclude, uint64_t time_of_oversupply, int *worker_id_virt_just_shed, bool weak_shed);

@ -137,9 +137,10 @@ wasm_memory_reinit(struct wasm_memory *wasm_memory, uint64_t initial)
static INLINE int32_t
wasm_memory_expand(struct wasm_memory *wasm_memory, uint64_t size_to_expand)
{
assert(wasm_memory);
uint64_t target_size = wasm_memory->abi.size + size_to_expand;
if (unlikely(target_size > wasm_memory->abi.max)) {
fprintf(stderr, "wasm_memory_expand - Out of Memory!. %lu out of %lu\n", wasm_memory->abi.size,
fprintf(stderr, "wasm_memory_expand - Out of Memory! target_size=%lu, size=%lu, max=%lu\n", target_size, wasm_memory->abi.size,
wasm_memory->abi.max);
return -1;
}
@ -153,6 +154,7 @@ wasm_memory_expand(struct wasm_memory *wasm_memory, uint64_t size_to_expand)
int rc = mprotect(wasm_memory->abi.buffer, target_size, PROT_READ | PROT_WRITE);
if (rc != 0) {
perror("wasm_memory_expand mprotect");
assert(0);
return -1;
}
@ -169,6 +171,9 @@ wasm_memory_get_size(struct wasm_memory *wasm_memory)
return wasm_memory->abi.size;
}
/**
* @brief Copy the segments into the linear memory
*/
static INLINE void
wasm_memory_initialize_region(struct wasm_memory *wasm_memory, uint32_t offset, uint32_t region_size, uint8_t region[])
{
@ -180,7 +185,7 @@ wasm_memory_initialize_region(struct wasm_memory *wasm_memory, uint32_t offset,
* instructions. These functions are intended to be used by the runtime to interacts with linear memories. */
/**
* Translates WASM offsets into runtime VM pointers
* Translates WASM offsets into runtime Virtual Memory pointers
* @param offset an offset into the WebAssembly linear memory
* @param bounds_check the size of the thing we are pointing to
* @return void pointer to something in WebAssembly linear memory

@ -125,6 +125,6 @@ wasm_stack_reinit(struct wasm_stack *wasm_stack)
assert(wasm_stack->low == wasm_stack->buffer + /* guard page */ PAGE_SIZE);
assert(wasm_stack->high == wasm_stack->low + wasm_stack->capacity);
explicit_bzero(wasm_stack->low, wasm_stack->capacity);
// explicit_bzero(wasm_stack->low, wasm_stack->capacity);
ps_list_init_d(wasm_stack);
}

@ -6,5 +6,6 @@
extern thread_local struct arch_context worker_thread_base_context;
extern thread_local int worker_thread_idx;
extern thread_local void *worker_dbf;
void *worker_thread_main(void *return_code);

@ -68,6 +68,22 @@ current_sandbox_exit()
assert(0);
}
/**
* @brief Exit from the executing sandbox during an interrup.
*
* Should be called within the signal handler (preemptive scheduler)
* This places the current sandbox on the cleanup queue.
*/
void interrupted_sandbox_exit(void)
{
struct sandbox *exiting_sandbox = current_sandbox_get();
assert(exiting_sandbox != NULL);
assert(exiting_sandbox->state == SANDBOX_INTERRUPTED);
sandbox_exit_error(exiting_sandbox);
local_cleanup_queue_add(exiting_sandbox);
}
void
current_sandbox_wasm_trap_handler(int trapno)
{
@ -99,7 +115,10 @@ current_sandbox_wasm_trap_handler(int trapno)
break;
}
debuglog("%s", error_message);
// debuglog("%s - Tenant: %s, Route: %s", error_message, sandbox->tenant->name, sandbox->route->route);
debuglog("%s - T: %s, id: %lu, exceeded: %u, rem_exec: %lu, premp: %u, state: %u, abi->size: %lu", error_message, sandbox->tenant->name, sandbox->id,
sandbox->exceeded_estimation, sandbox->remaining_exec, sandbox->writeback_preemption_in_progress, sandbox->state,
sandbox->memory->abi.size);
current_sandbox_exit();
assert(0);
}
@ -112,8 +131,8 @@ current_sandbox_init()
assert(sandbox != NULL);
assert(sandbox->state == SANDBOX_RUNNING_SYS);
int rc = 0;
char *error_message = NULL;
// int rc = 0;
// char *error_message = NULL;
/* Initialize sandbox memory */
struct module *current_module = sandbox_get_module(sandbox);
@ -142,10 +161,10 @@ current_sandbox_init()
return sandbox;
err:
debuglog("%s", error_message);
current_sandbox_exit();
return NULL;
// err:
// debuglog("%s", error_message);
// current_sandbox_exit();
// return NULL;
}
extern noreturn void
@ -157,8 +176,8 @@ current_sandbox_fini()
char *error_message = "";
sandbox_syscall(sandbox);
sandbox->timestamp_of.completion = __getcycles();
sandbox->total_time = sandbox->timestamp_of.completion - sandbox->timestamp_of.allocation;
// sandbox->timestamp_of.completion = __getcycles();
// sandbox->total_time = sandbox->timestamp_of.completion - sandbox->timestamp_of.allocation;
assert(sandbox->state == SANDBOX_RUNNING_SYS);
@ -169,6 +188,7 @@ done:
current_sandbox_exit();
assert(0);
err:
assert(0);
debuglog("%s", error_message);
assert(sandbox->state == SANDBOX_RUNNING_SYS);
@ -195,3 +215,26 @@ current_sandbox_start(void)
if (sandbox->module->type == APP_MODULE) current_sandbox_fini();
}
int
sandbox_validate_self_lifetime(struct sandbox *sandbox)
{
if (sandbox->response_code != 0) goto err;
const uint64_t now = __getcycles();
if (sandbox->absolute_deadline >= now + (!sandbox->exceeded_estimation ? sandbox->remaining_exec : 0)) return 0;
// dbf_try_update_demand(worker_dbf, sandbox->timestamp_of.dispatched,
// sandbox->route->relative_deadline, sandbox->absolute_deadline, sandbox->remaining_exec,
// DBF_DELETE_EXISTING_DEMAND, NULL, NULL);
assert(sandbox->response_code == 0);
sandbox->response_code = 4081;
err:
sandbox_exit_error(sandbox);
// sandbox_free(sandbox);
// sandbox_free_linear_memory(sandbox);
local_cleanup_queue_add(sandbox);
return -1;
}

@ -0,0 +1,335 @@
#include <string.h>
#include <assert.h>
#include "tenant.h"
#include "runtime.h"
#include "arch/getcycles.h"
#include "math.h"
#include "message.h"
#include "panic.h"
#include "dbf.h"
struct tenant;
struct dbf_array {
struct tenant *tenant;
int worker_idx;
// uint32_t idx_oversupply;
uint64_t max_relative_deadline;
uint64_t base_supply; /* supply amount for time 1 */
uint64_t time_of_oversupply;
uint64_t max_absolute_deadline;
uint32_t capacity;
uint64_t demands[];
};
static inline int
dbf_array_get_worker_idx(void * dbf_raw)
{
assert(dbf_raw);
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
return dbf->worker_idx;
}
/*static inline uint64_t
dbf_array_get_max_relative_dl(void * dbf_raw)
{
assert(dbf_raw);
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
return dbf->max_relative_deadline;
}*/
static inline uint64_t
dbf_array_get_time_of_oversupply(void * dbf_raw)
{
assert(dbf_raw);
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
return dbf->time_of_oversupply;
}
static void
dbf_array_print(void *dbf_raw, uint64_t start_time)
{
assert(dbf_raw != NULL);
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
printf("DBF INFO:\n\
\t WorkerIDX: \t%d\n\
\t Capacity: \t%u\n\
\t Max Rel DL: \t%lu\n\
\t Max Abs DL (ms): \t%lu\n\
\t Basic Supply: \t%lu\n\n",
dbf->worker_idx, dbf->capacity, dbf->max_relative_deadline, dbf->max_absolute_deadline/runtime_quantum, dbf->base_supply);
for (int i = 0; i < dbf->capacity; i++) {
if (dbf->demands[i] > 0) printf("demands[%d] = %lu\n", i, dbf->demands[i]);
}
}
// static void *
// dbf_array_grow(void *dbf_raw, uint64_t new_max_relative_deadline)
// {
// assert(dbf_raw != NULL);
// struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
// uint32_t new_capacity = new_max_relative_deadline / runtime_quantum /* * 2 */; // NOT adding 1 for final leftovers
// struct dbf_array *new_dbf = realloc(dbf, sizeof(struct dbf_array) + sizeof(uint64_t) * new_capacity);
// if (new_dbf == NULL) panic("Failed to grow dbf\n");
// memset(new_dbf->demands, 0, new_capacity * sizeof(uint64_t));
// new_dbf->capacity = new_capacity;
// new_dbf->max_relative_deadline = new_max_relative_deadline;
// return new_dbf;
// }
/*
static bool
dbf_array_check_supply_quick(struct dbf_array *dbf_raw, uint64_t start_time, uint64_t abs_deadline, uint64_t adjustment)
{
assert(dbf_raw != NULL);
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
// assert(start_time < abs_deadline);
if (start_time >= abs_deadline) return true;
const uint32_t live_deadline_len = ceil((abs_deadline - start_time) / (double)runtime_quantum);
const uint32_t abs_deadline_idx = (abs_deadline / runtime_quantum) % dbf->capacity;
const uint64_t max_supply_at_deadline = live_deadline_len * dbf->base_supply;
return (dbf->demands[abs_deadline_idx] + adjustment <= max_supply_at_deadline);
}
*/
static bool
dbf_array_try_update_demand(void *dbf_raw, uint64_t start_time, uint64_t route_relative_deadline,
uint64_t abs_deadline, uint64_t adjustment, dbf_update_mode_t dbf_update_mode,
void *new_message_raw, struct sandbox_metadata *sandbox_meta)
{
assert(dbf_raw != NULL);
assert(start_time < abs_deadline);
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
struct message *new_message = (struct message *) new_message_raw;
if (abs_deadline > dbf->max_absolute_deadline) dbf->max_absolute_deadline = abs_deadline;
if (adjustment == 0) goto done;
// const uint32_t max_relative_deadline_len = dbf->max_relative_deadline / runtime_quantum;
const uint32_t live_deadline_len = round((abs_deadline - start_time) / (double)runtime_quantum);
// const uint32_t live_deadline_len = (abs_deadline - start_time) / runtime_quantum;
// const uint32_t live_deadline_len = abs_deadline/runtime_quantum - start_time/runtime_quantum;
const uint32_t abs_deadline_idx = (abs_deadline / runtime_quantum) % dbf->capacity;
// const uint32_t start_time_idx = (start_time / runtime_quantum) % dbf->capacity;
// if (start_time_idx == abs_deadline_idx) goto done;
// assert(live_deadline_len <= max_relative_deadline_len);
assert(live_deadline_len <= dbf->capacity);
bool demand_is_below_supply = true;
for (uint32_t i = abs_deadline_idx, iter = 0; i < abs_deadline_idx + dbf->capacity /*iter< (start_time_idx-abs_deadline_idx+dbf->capacity)%dbf->capacity*/; i++, iter++) {
uint32_t circular_i = i % dbf->capacity;
const uint64_t max_supply_at_time_i = (live_deadline_len + iter) * dbf->base_supply;
const uint64_t prev_demand = dbf->demands[circular_i];
switch (dbf_update_mode) {
case DBF_CHECK_AND_ADD_DEMAND:
dbf->demands[circular_i] += adjustment;
if (dbf->demands[circular_i] > max_supply_at_time_i) {
/* Undo DBF adding if over supply detected */
for (uint32_t j = abs_deadline_idx; j <= i; j++) {
dbf->demands[j % dbf->capacity] -= adjustment;
}
dbf->time_of_oversupply = iter;
goto err_demand_over_supply;
}
break;
/*case DBF_CHECK_EXISTING_SANDBOX_EXTRA_DEMAND:
if (dbf->demands[circular_i] + adjustment > max_supply_at_time_i) {
dbf->time_of_oversupply = iter;
goto err_demand_over_supply;
}
break;*/
case DBF_FORCE_ADD_NEW_SANDBOX_DEMAND:
/* [Work Conservation Scenario] Only applicable for tenant and global dbf! */
assert(dbf->worker_idx < 0);
dbf->demands[circular_i] += adjustment;
assert(prev_demand < dbf->demands[circular_i]);
if (demand_is_below_supply && dbf->demands[circular_i] > max_supply_at_time_i) {
dbf->time_of_oversupply = iter;
demand_is_below_supply = false;
}
break;
case DBF_REDUCE_EXISTING_DEMAND:
dbf->demands[circular_i] -= adjustment;
if (prev_demand < dbf->demands[circular_i]) {
printf("DBF_REDUCE_EXISTING_DEMAND\n");
printf("Worker ID: %d\n", dbf->worker_idx);
// printf("Tenant Reservation: %u\n", new_message->reserv);
printf("Sandbox ID: %lu\n", new_message->sandbox_id);
// printf("Sandbox Response Code: %u\n", new_message->sandbox_response_code);
printf("Basic supply: %lu\n", dbf->base_supply);
printf("Cap=%u\n", dbf->capacity);
printf("Abs_dest_idx=%u\n", abs_deadline_idx);
printf("live_deadline_len=%u\n", live_deadline_len);
printf("i=%u, cir_i = %u, iter = %u\n", i, circular_i, iter);
printf("max_supply_at_time_i = %lu\n\n", max_supply_at_time_i);
printf("Prev_demand[%u]=%lu\n\n", circular_i, prev_demand);
printf("demand[%u]=%lu\n\n", circular_i, dbf->demands[circular_i]);
// printf("sandbox_state=%u, if_case=%d\n", new_message->state, new_message->if_case);
printf("exceeded_estimation=%d\n", new_message->exceeded_estimation);
printf("Adjustment=%lu\n", adjustment);
// printf("last_exec_duration=%lu, prev_rem_exec=%ld, rem_exec=%ld\n",
// new_message->last_exec_dur, new_message->prev_rem_exec,
// new_message->remaining_execution);
dbf_print(dbf, start_time);
panic("Interger Underflow -> Tried reducing demand, but it actually went over supply!");
}
break;
}
}
done:
return demand_is_below_supply;
err_demand_over_supply:
demand_is_below_supply = false;
goto done;
}
static uint64_t
dbf_array_get_demand_overgone_its_supply_at(void *dbf_raw, uint64_t start_time, uint64_t abs_deadline, uint64_t time_of_oversupply)
{
assert(dbf_raw != NULL);
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
time_of_oversupply = abs_deadline + time_of_oversupply*runtime_quantum;
if (time_of_oversupply > dbf->max_absolute_deadline) {
printf("abs: %lu, time_of_oversupply: %lu, dbf_abs: %lu\n", abs_deadline, time_of_oversupply, dbf->max_absolute_deadline);
time_of_oversupply = dbf->max_absolute_deadline;
}
assert(start_time < time_of_oversupply);
// const uint32_t live_deadline_len = (time_of_oversupply - start_time) / runtime_quantum;
// const uint32_t live_deadline_len = time_of_oversupply/runtime_quantum - start_time/runtime_quantum;
const uint32_t live_deadline_len = round((time_of_oversupply - start_time) / (double)runtime_quantum);
const uint32_t abs_deadline_idx = (time_of_oversupply / runtime_quantum) % dbf->capacity;
uint64_t demand_overgone = 0;
uint32_t circular_i = (time_of_oversupply/runtime_quantum) % dbf->capacity;
const uint64_t max_supply_at_time_i = live_deadline_len * dbf->base_supply;
const uint64_t curr_demand_at_time_i = dbf->demands[circular_i];
if (curr_demand_at_time_i > max_supply_at_time_i) {
demand_overgone = curr_demand_at_time_i - max_supply_at_time_i;
}
return demand_overgone;
}
/*
static uint64_t
dbf_array_get_demand_overgone_its_supply_at(void *dbf_raw, uint64_t start_time, uint64_t abs_deadline, uint64_t time_of_oversupply)
{
assert(dbf_raw != NULL);
assert(start_time < abs_deadline);
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
const uint32_t live_deadline_len = ceil((abs_deadline - start_time) / (double)runtime_quantum);
const uint32_t abs_deadline_idx = (abs_deadline / runtime_quantum) % dbf->capacity;
uint64_t demand_overgone = 0;
uint32_t circular_i = (abs_deadline_idx + time_of_oversupply) % dbf->capacity;
const uint64_t max_supply_at_time_i = (live_deadline_len + time_of_oversupply) * dbf->base_supply;
const uint64_t curr_demand_at_time_i = dbf->demands[circular_i];
if (curr_demand_at_time_i > max_supply_at_time_i) {
demand_overgone = curr_demand_at_time_i - max_supply_at_time_i;
}
return demand_overgone;
}
static uint64_t
dbf_array_get_demand_overgone_its_supply_at__BAK(void *dbf_raw, uint64_t start_time, uint64_t abs_deadline)
{
assert(dbf_raw != NULL);
assert(start_time < abs_deadline);
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
const uint32_t live_deadline_len = ceil((abs_deadline - start_time) / (double)runtime_quantum);
const uint32_t absolute_arrival_idx = start_time / runtime_quantum % dbf->capacity;
uint64_t demand_overgone = 0;
const uint32_t abs_deadline_idx = (abs_deadline / runtime_quantum) % dbf->capacity;
// assert(live_deadline_len<=route_relative_deadline_len);
for (uint32_t i = abs_deadline_idx, iter = 0; i < abs_deadline_idx + live_deadline_len; i++, iter++) {
uint32_t circular_i = i % dbf->capacity;
const uint64_t max_supply_at_time_i = (live_deadline_len + iter) * dbf->base_supply;
const uint64_t curr_demand_at_time_i = dbf->demands[circular_i];
if (curr_demand_at_time_i > max_supply_at_time_i) {
if (curr_demand_at_time_i - max_supply_at_time_i > demand_overgone) {
demand_overgone = curr_demand_at_time_i - max_supply_at_time_i;
}
}
}
return demand_overgone;
}
*/
static void
dbf_array_free(void *dbf)
{
assert(dbf != NULL);
free(dbf);
}
void *
dbf_array_initialize(uint32_t num_of_workers, uint8_t reservation_percentile, int worker_idx, struct tenant *tenant)
{
struct dbf_config config = {
.get_worker_idx_fn = dbf_array_get_worker_idx,
// .get_max_relative_dl_fn = dbf_array_get_max_relative_dl,
// .get_idx_oversuplly_fn = dbf_array_get_idx_oversuplly,
.get_time_of_oversupply_fn = dbf_array_get_time_of_oversupply,
.print_fn = dbf_array_print,
// .print_supply_fn = dbf_array_print_suply,
// .grow_fn = dbf_array_grow,
.try_update_demand_fn = dbf_array_try_update_demand,
.get_demand_overgone_its_supply_at_fn = dbf_array_get_demand_overgone_its_supply_at,
.free_fn = dbf_array_free
};
dbf_plug_functions(&config);
assert(runtime_max_deadline > 0);
uint32_t capacity = runtime_max_deadline / runtime_quantum /* * 2 */; // NOT adding 1 for final leftovers
struct dbf_array *dbf = (struct dbf_array *)calloc(1, sizeof(struct dbf_array) + sizeof(uint64_t) * capacity);
dbf->capacity = capacity;
dbf->max_relative_deadline = runtime_max_deadline;
dbf->worker_idx = worker_idx;
// uint32_t cpu_factor = (num_of_workers == 1) ? 1 : num_of_workers * RUNTIME_MAX_CPU_UTIL_PERCENTILE / 100;
dbf->base_supply = runtime_quantum * num_of_workers * reservation_percentile * RUNTIME_MAX_CPU_UTIL_PERCENTILE / 10000;
return dbf;
}

@ -0,0 +1,80 @@
#include <string.h>
#include <assert.h>
#include "dbf.h"
static struct dbf_config dbf_conf;
// void *global_dbf_temp;
int
dbf_get_worker_idx(void *dbf)
{
assert(dbf_conf.get_worker_idx_fn != NULL);
return dbf_conf.get_worker_idx_fn(dbf);
}
// uint64_t
// dbf_get_max_relative_dl(void *dbf)
// {
// assert(dbf_conf.get_max_relative_dl_fn != NULL);
// return dbf_conf.get_max_relative_dl_fn(dbf);
// }
uint64_t
dbf_get_time_of_oversupply(void *dbf)
{
assert(dbf_conf.get_time_of_oversupply_fn != NULL);
return dbf_conf.get_time_of_oversupply_fn(dbf);
}
void
dbf_print(void *dbf, uint64_t start_time)
{
assert(dbf_conf.print_fn != NULL);
return dbf_conf.print_fn(dbf, start_time);
}
// void *
// dbf_grow(void *dbf, uint64_t new_max_relative_deadline)
// {
// assert(dbf_conf.grow_fn != NULL);
// return dbf_conf.grow_fn(dbf, new_max_relative_deadline);
// }
bool
dbf_try_update_demand(void *dbf, uint64_t start_time, uint64_t route_relative_deadline, uint64_t abs_deadline,
uint64_t adjustment, dbf_update_mode_t dbf_update_mode, void *new_message, struct sandbox_metadata *sandbox_meta)
{
assert(dbf_conf.try_update_demand_fn != NULL);
return dbf_conf.try_update_demand_fn(dbf, start_time, route_relative_deadline, abs_deadline, adjustment,
dbf_update_mode, new_message, sandbox_meta);
}
uint64_t
dbf_get_demand_overgone_its_supply_at(void *dbf, uint64_t start_time, uint64_t abs_deadline, uint64_t time_of_oversupply)
{
assert(dbf_conf.get_demand_overgone_its_supply_at_fn != NULL);
return dbf_conf.get_demand_overgone_its_supply_at_fn(dbf, start_time, abs_deadline, time_of_oversupply);
}
void
dbf_free(void *dbf)
{
assert(dbf_conf.free_fn != NULL);
return dbf_conf.free_fn(dbf);
}
void
dbf_plug_functions(struct dbf_config *config)
{
memcpy(&dbf_conf, config, sizeof(struct dbf_config));
}
void
*dbf_initialize(uint32_t num_of_workers, uint8_t reservation_percentile, int worker_idx, struct tenant *tenant)
{
#ifdef DBF_USE_LINKEDLIST
return dbf_list_initialize(num_of_workers, reservation_percentile, worker_idx, tenant);
#else
return dbf_array_initialize(num_of_workers, reservation_percentile, worker_idx, tenant);
#endif
}

@ -0,0 +1,199 @@
#include <assert.h>
#include "dbf.h"
#include "sandbox_types.h"
struct dbf_list {
struct tenant *tenant;
int worker_idx;
uint64_t max_relative_deadline;
double base_supply; /* supply amount for time 1 */
uint64_t time_of_oversupply;
uint64_t demand_total;
struct ps_list_head demands_list;
};
static inline int
dbf_list_get_worker_idx(void * dbf_raw)
{
assert(dbf_raw);
struct dbf_list *dbf = (struct dbf_list *)dbf_raw;
return dbf->worker_idx;
}
/*static inline uint64_t
dbf_list_get_max_relative_dl(void * dbf_raw)
{
assert(dbf_raw);
struct dbf_list *dbf = (struct dbf_list *)dbf_raw;
return dbf->max_relative_deadline;
}*/
static inline uint64_t
dbf_list_get_time_of_oversupply(void * dbf_raw)
{
assert(dbf_raw);
struct dbf_list *dbf = (struct dbf_list *)dbf_raw;
return dbf->time_of_oversupply;
}
static void
dbf_list_print(void *dbf_raw, uint64_t start_time)
{
assert(dbf_raw != NULL);
struct dbf_list *dbf = (struct dbf_list *)dbf_raw;
printf("DBF INFO LL:\n\
\t WorkerIDX: \t%d\n\
\t Basic Supply: \t%lf\n\n", dbf->worker_idx, dbf->base_supply);
struct demand_node *node = NULL;
uint64_t demand_sum = 0;
ps_list_foreach_d(&dbf->demands_list, node)
{
const uint32_t live_deadline_len = node->abs_deadline - start_time;
const uint64_t max_supply_at_time_i = live_deadline_len * dbf->base_supply;
demand_sum += node->demand;
uint64_t over = 0;
if (demand_sum >= max_supply_at_time_i) over = demand_sum - max_supply_at_time_i;
printf("demand_at[%lu] = %lu, t=%s, demand_sum=%lu/supply=%lu, demand_over=%lu\n", node->abs_deadline, node->demand, node->tenant->name, demand_sum, max_supply_at_time_i, over);
}
}
bool
dbf_list_try_add_new_demand(void *dbf_raw, uint64_t start_time, uint64_t abs_deadline, uint64_t adjustment, struct sandbox_metadata *sm)
{
assert(dbf_raw != NULL);
assert(start_time < abs_deadline);
assert(sm);
assert(sm->demand_node == NULL);
assert(adjustment > 0);
// if (adjustment == 0) return false;
struct dbf_list *dbf = (struct dbf_list *)dbf_raw;
struct demand_node *node = NULL;
uint64_t past_deadline_demand = 0;
uint64_t demand_sum = 0;
ps_list_foreach_d(&dbf->demands_list, node)
{
if (node->abs_deadline <= start_time) past_deadline_demand = demand_sum;
else if (node->abs_deadline >= abs_deadline) break;
demand_sum += node->demand;
}
struct demand_node *node_spot = node;
assert(abs_deadline != node->abs_deadline);
assert(abs_deadline == sm->absolute_deadline);
demand_sum += adjustment;
const uint64_t live_deadline_len = abs_deadline - start_time;
const uint64_t max_supply_at_time_i = live_deadline_len * dbf->base_supply; // + past_deadline_demand;
if (demand_sum > max_supply_at_time_i) {
dbf->time_of_oversupply = abs_deadline;
goto err;
}
while(!ps_list_is_head_d(&dbf->demands_list, node)) {
struct demand_node *tmp_next = ps_list_next_d(node);
const uint64_t live_deadline_len = node->abs_deadline - start_time;
const uint64_t max_supply_at_time_i = live_deadline_len * dbf->base_supply; // + past_deadline_demand;
demand_sum += node->demand;
if (demand_sum > max_supply_at_time_i) {
dbf->time_of_oversupply = node->abs_deadline;
goto err;
}
node = tmp_next;
}
struct demand_node *new_node = (struct demand_node*) malloc(sizeof(struct demand_node));
ps_list_init_d(new_node);
new_node->abs_deadline = abs_deadline;
new_node->demand = adjustment;
new_node->tenant = sm->tenant;
// new_node->sandbox_meta = sm;
sm->demand_node = new_node;
assert(ps_list_singleton_d(new_node));
ps_list_append_d(node_spot, new_node);
dbf->demand_total = demand_sum + adjustment;
return true;
err:
return false;
}
void
dbf_list_force_add_extra_slack(void *dbf_raw, struct sandbox_metadata *sm, uint64_t adjustment)
{
assert(dbf_raw != NULL);
assert(sm);
assert(sm->demand_node);
assert(adjustment > 0);
struct demand_node *node = sm->demand_node;
assert(node->abs_deadline == sm->absolute_deadline);
assert(node->demand >= adjustment);
node->demand += adjustment;
struct dbf_list *dbf = (struct dbf_list *)dbf_raw;
dbf->demand_total += adjustment;
}
void
dbf_list_reduce_demand(struct sandbox_metadata *sm, uint64_t adjustment, bool delete_node)
{
assert(sm);
assert(sm->demand_node);
assert(delete_node || adjustment > 0);
struct demand_node *node = sm->demand_node;
assert(node->abs_deadline == sm->absolute_deadline);
assert(node->demand >= adjustment);
node->demand -= adjustment;
// assert(dbf->demand_total >= adjustment);
// dbf->demand_total -= adjustment;
if (delete_node) {
assert(node->demand == 0);
/* Clean up empty and repetitive nodes */
ps_list_rem_d(node);
free(node);
node = NULL;
}
}
static void
dbf_list_free(void *dbf)
{
assert(dbf != NULL);
free(dbf);
}
void *
dbf_list_initialize(uint32_t num_of_workers, uint8_t reservation_percentile, int worker_idx, struct tenant *tenant)
{
struct dbf_config config = {
// .try_update_demand_fn = dbf_list_try_add_new_demand,
.get_worker_idx_fn = dbf_list_get_worker_idx,
.get_time_of_oversupply_fn = dbf_list_get_time_of_oversupply,
.print_fn = dbf_list_print,
.free_fn = dbf_list_free
};
dbf_plug_functions(&config);
assert(runtime_max_deadline > 0);
struct dbf_list *dbf = (struct dbf_list *)calloc(1, sizeof(struct dbf_list));
ps_list_head_init(&dbf->demands_list);
dbf->max_relative_deadline = runtime_max_deadline;
dbf->worker_idx = worker_idx;
// uint32_t cpu_factor = (num_of_workers == 1) ? 1 : num_of_workers * RUNTIME_MAX_CPU_UTIL_PERCENTILE / 100;
dbf->base_supply = /*runtime_quantum * */1.0*num_of_workers * reservation_percentile * RUNTIME_MAX_CPU_UTIL_PERCENTILE / 10000;
dbf->tenant = tenant;
return dbf;
}

@ -3,6 +3,8 @@
#include "global_request_scheduler.h"
#include "panic.h"
static struct sandbox_metadata global_highest_priority_metadata;
/* Default uninitialized implementations of the polymorphic interface */
noreturn static struct sandbox *
uninitialized_add(struct sandbox *arg)
@ -87,3 +89,45 @@ global_request_scheduler_peek()
{
return global_request_scheduler.peek_fn();
}
/**
* Peeks at the metadata of the highest priority sandbox
* @returns metadata of the highest priority sandbox
*/
struct sandbox_metadata
global_request_scheduler_peek_metadata()
{
return global_highest_priority_metadata;
}
/**
* Updates the metadata of the highest priority sandbox
* @param element the highest priority sandbox
*/
void
global_request_scheduler_update_highest_priority(const void *element)
{
if (element == NULL) {
global_highest_priority_metadata.absolute_deadline = UINT64_MAX;
global_highest_priority_metadata.tenant = NULL;
global_highest_priority_metadata.route = NULL;
global_highest_priority_metadata.allocation_timestamp = 0;
global_highest_priority_metadata.remaining_exec = 0;
global_highest_priority_metadata.id = 0;
global_highest_priority_metadata.global_queue_type = 0;
global_highest_priority_metadata.exceeded_estimation = false;
global_highest_priority_metadata.state = SANDBOX_UNINITIALIZED;
return;
}
const struct sandbox *sandbox = element;
global_highest_priority_metadata.absolute_deadline = sandbox->absolute_deadline;
global_highest_priority_metadata.tenant = sandbox->tenant;
global_highest_priority_metadata.route = sandbox->route;
global_highest_priority_metadata.allocation_timestamp = sandbox->timestamp_of.allocation;
global_highest_priority_metadata.remaining_exec = sandbox->remaining_exec;
// global_highest_priority_metadata.remaining_exec = sandbox->remaining_execution_original;
global_highest_priority_metadata.id = sandbox->id;
global_highest_priority_metadata.exceeded_estimation = sandbox->exceeded_estimation;
global_highest_priority_metadata.state = sandbox->state;
}

@ -61,23 +61,13 @@ global_request_scheduler_minheap_peek(void)
return priority_queue_peek(global_request_scheduler_minheap);
}
uint64_t
sandbox_get_priority_fn(void *element)
{
struct sandbox *sandbox = (struct sandbox *)element;
if (scheduler == SCHEDULER_SJF) return sandbox->remaining_exec;
assert(scheduler == SCHEDULER_EDF);
return sandbox->absolute_deadline;
};
/**
* Initializes the variant and registers against the polymorphic interface
*/
void
global_request_scheduler_minheap_initialize()
{
global_request_scheduler_minheap = priority_queue_initialize(4096, true, sandbox_get_priority_fn);
global_request_scheduler_minheap = priority_queue_initialize(4096, true, sandbox_get_priority);
struct global_request_scheduler_config config = {.add_fn = global_request_scheduler_minheap_add,
.remove_fn = global_request_scheduler_minheap_remove,

@ -0,0 +1,204 @@
#include <assert.h>
#include <errno.h>
#include "global_request_scheduler.h"
#include "listener_thread.h"
#include "panic.h"
#include "priority_queue.h"
#include "runtime.h"
#include "tenant_functions.h"
#include "sandbox_set_as_error.h"
#include "dbf.h"
#include "local_cleanup_queue.h"
struct priority_queue *global_request_scheduler_mtdbf;
lock_t global_lock;
// int max_global_runqueue_len = 0; //////////
/**
* Pushes a sandbox request to the global runqueue
* @param sandbox
* @returns pointer to request if added. NULL otherwise
*/
static struct sandbox *
global_request_scheduler_mtdbf_add(struct sandbox *sandbox)
{
assert(sandbox);
assert(global_request_scheduler_mtdbf);
assert(listener_thread_is_running());
lock_node_t node = {};
lock_lock(&global_lock, &node);
int rc = priority_queue_enqueue_nolock(global_request_scheduler_mtdbf, sandbox);
if (rc != 0) {
assert(sandbox->response_code == 0);
sandbox->response_code = 4293;
sandbox = NULL; // TODO: FIX ME
goto done;
}
sandbox->owned_worker_idx = -1;
// if(priority_queue_length_nolock(global_request_scheduler_mtdbf) > max_global_runqueue_len) {
// max_global_runqueue_len = priority_queue_length_nolock(global_request_scheduler_mtdbf);
// printf("Global MAX Queue Length: %u\n", max_global_runqueue_len);
// }
// printf("GlobalLen: %d, Tenant: %s, Tenant-G: %d, Tenant-L: %d\n\n", priority_queue_length_nolock(global_request_scheduler_mtdbf), sandbox->tenant->name,
// priority_queue_length_nolock(sandbox->tenant->global_sandbox_metas), priority_queue_length_nolock(sandbox->tenant->local_sandbox_metas));
done:
lock_unlock(&global_lock, &node);
return sandbox;
}
/**
* @param pointer to the pointer that we want to set to the address of the removed sandbox request
* @returns 0 if successful, -ENOENT if empty
*/
int
global_request_scheduler_mtdbf_remove(struct sandbox **removed_sandbox)
{
/* This function won't be used with the MTDS scheduler. Keeping merely for the polymorhism. */
return -1;
}
/**
* @param removed_sandbox pointer to set to removed sandbox request
* @param target_deadline the deadline that the request must be earlier than to dequeue
* @returns 0 if successful, -ENOENT if empty or if request isn't earlier than target_deadline
*/
int
global_request_scheduler_mtdbf_remove_if_earlier(struct sandbox **removed_sandbox, uint64_t target_deadline)
{
int rc = -ENOENT;
const uint64_t now = __getcycles();
struct sandbox *local = local_runqueue_get_next();
uint64_t local_rem = local == NULL ? 0 : local->remaining_exec;
lock_node_t node = {};
lock_lock(&global_lock, &node);
struct sandbox_metadata global_metadata = global_request_scheduler_peek_metadata();
uint64_t global_deadline = global_metadata.absolute_deadline;
if(USING_EARLIEST_START_FIRST) {
if (global_deadline - global_metadata.remaining_exec >= target_deadline - local_rem) goto err_enoent;
} else {
if (global_deadline >= target_deadline) goto err_enoent;
}
// if (global_deadline == UINT64_MAX) goto err_enoent;
/* Spot the sandbox to remove */
struct sandbox *top_sandbox = NULL;
rc = priority_queue_top_nolock(global_request_scheduler_mtdbf, (void **)&top_sandbox);
assert(top_sandbox);
assert(top_sandbox->absolute_deadline == global_deadline);
assert(top_sandbox->remaining_exec == global_metadata.remaining_exec);
assert(top_sandbox->state == SANDBOX_INITIALIZED || top_sandbox->state == SANDBOX_PREEMPTED);
assert(top_sandbox->response_code == 0);
if (top_sandbox->sandbox_meta->terminated) {
assert(top_sandbox->sandbox_meta->error_code > 0);
top_sandbox->response_code = top_sandbox->sandbox_meta->error_code;
} else if (global_deadline < now + (!top_sandbox->exceeded_estimation ? top_sandbox->remaining_exec : 0)) {
top_sandbox->response_code = top_sandbox->state == SANDBOX_INITIALIZED ? 4080 : 4082;
} else if (USING_LOCAL_RUNQUEUE) {
struct tenant *tenant = top_sandbox->tenant;
struct route *route = top_sandbox->route;
// assert(dbf_get_worker_idx(worker_dbf) == worker_thread_idx);
// if (!dbf_try_update_demand(worker_dbf, now, route->relative_deadline,
// global_deadline, top_sandbox->remaining_exec, DBF_CHECK_AND_ADD_DEMAND, NULL, NULL)) {
// goto err_enoent;
// }
}
else if(local) {
assert(USING_WRITEBACK_FOR_PREEMPTION);
assert(local->state == SANDBOX_INTERRUPTED);
assert(local->writeback_preemption_in_progress == false);
assert(local->owned_worker_idx >= 0);
assert(local->pq_idx_in_runqueue >= 1);
local->writeback_preemption_in_progress = true;
local_runqueue_delete(local);
// local->response_code = 5000;
// interrupted_sandbox_exit();
}
top_sandbox->timestamp_of.dispatched = now; // remove the same op from scheduler validate and set_as_runable
top_sandbox->owned_worker_idx = -2;
// printf("Worker %i accepted a sandbox #%lu!\n", worker_thread_idx, top_sandbox->id);
rc = priority_queue_dequeue_nolock(global_request_scheduler_mtdbf, (void **)removed_sandbox);
assert(rc == 0);
assert(*removed_sandbox == top_sandbox);
assert(top_sandbox->state == SANDBOX_INITIALIZED || top_sandbox->state == SANDBOX_PREEMPTED);
lock_unlock(&global_lock, &node);
done:
return rc;
err_enoent:
lock_unlock(&global_lock, &node);
rc = -ENOENT;
goto done;
}
/**
* @param removed_sandbox pointer to set to removed sandbox request
* @param target_deadline the deadline that the request must be earlier than to dequeue
* @param mt_class the multi-tenancy class of the global request to compare the target deadline against
* @returns 0 if successful, -ENOENT if empty or if request isn't earlier than target_deadline
*/
int
global_request_scheduler_mtdbf_remove_with_mt_class(struct sandbox **removed_sandbox, uint64_t target_deadline,
enum MULTI_TENANCY_CLASS target_mt_class)
{
/* This function won't be used with the MTDBF scheduler. Keeping merely for the polymorhism. */
return -1;
}
/**
* Peek at the priority of the highest priority task without having to take the lock
* Because this is a min-heap PQ, the highest priority is the lowest 64-bit integer
* This is used to store an absolute deadline
* @returns value of highest priority value in queue or ULONG_MAX if empty
*/
static uint64_t
global_request_scheduler_mtdbf_peek(void)
{
return priority_queue_peek(global_request_scheduler_mtdbf);
}
/**
* Initializes the variant and registers against the polymorphic interface
*/
void
global_request_scheduler_mtdbf_initialize()
{
global_request_scheduler_mtdbf = priority_queue_initialize_new(RUNTIME_RUNQUEUE_SIZE, false, USING_EARLIEST_START_FIRST ? sandbox_get_priority_global : sandbox_get_priority,
global_request_scheduler_update_highest_priority,
sandbox_update_pq_idx_in_runqueue);
lock_init(&global_lock);
struct global_request_scheduler_config config = {
.add_fn = global_request_scheduler_mtdbf_add,
.remove_fn = global_request_scheduler_mtdbf_remove,
.remove_if_earlier_fn = global_request_scheduler_mtdbf_remove_if_earlier,
.peek_fn = global_request_scheduler_mtdbf_peek
};
global_request_scheduler_initialize(&config);
}
void
global_request_scheduler_mtdbf_free()
{
priority_queue_free(global_request_scheduler_mtdbf);
}

@ -1040,6 +1040,7 @@ wasi_snapshot_preview1_backing_poll_oneoff(wasi_context_t *context, const __wasi
noreturn void
wasi_snapshot_preview1_backing_proc_exit(wasi_context_t *context, __wasi_exitcode_t exitcode)
{
// panic("This path should not be reachable\n");
current_sandbox_fini();
assert(0);
}

@ -1,6 +1,8 @@
#include <stdint.h>
#include <unistd.h>
#include "admissions_control.h"
#include "traffic_control.h"
#include "arch/getcycles.h"
#include "execution_regression.h"
#include "global_request_scheduler.h"
@ -15,6 +17,13 @@
#include "tenant.h"
#include "tenant_functions.h"
#include "sandbox_perf_log.h"
#include "http_session_perf_log.h"
#include "ck_ring.h"
#include "priority_queue.h"
#include "global_request_scheduler_mtdbf.h"
#include "sandbox_set_as_error.h"
static void listener_thread_unregister_http_session(struct http_session *http);
static void panic_on_epoll_error(struct epoll_event *evt);
@ -35,6 +44,11 @@ int listener_thread_epoll_file_descriptor;
pthread_t listener_thread_id;
struct comm_with_worker *comm_from_workers, *comm_to_workers;
extern lock_t global_lock;
static struct sandbox_metadata *global_sandbox_meta = NULL;
/**
* Initializes the listener thread, pinned to core 0, and starts to listen for requests
*/
@ -42,6 +56,12 @@ void
listener_thread_initialize(void)
{
printf("Starting listener thread\n");
comm_from_workers = calloc(runtime_worker_threads_count, sizeof(struct comm_with_worker));
comm_to_workers = calloc(runtime_worker_threads_count, sizeof(struct comm_with_worker));
comm_with_workers_init(comm_from_workers);
comm_with_workers_init(comm_to_workers);
cpu_set_t cs;
CPU_ZERO(&cs);
@ -55,14 +75,14 @@ listener_thread_initialize(void)
assert(ret == 0);
ret = pthread_setaffinity_np(listener_thread_id, sizeof(cpu_set_t), &cs);
assert(ret == 0);
ret = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cs);
if (geteuid() != 0) ret = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cs);
assert(ret == 0);
printf("\tListener core thread: %lx\n", listener_thread_id);
}
/**
* @brief Registers a serverless tenant on the listener thread's epoll descriptor
* @brief Registers a serverless http session on the listener thread's epoll descriptor
**/
void
listener_thread_register_http_session(struct http_session *http)
@ -99,7 +119,7 @@ listener_thread_register_http_session(struct http_session *http)
}
/**
* @brief Registers a serverless tenant on the listener thread's epoll descriptor
* @brief Registers a serverless http session on the listener thread's epoll descriptor
**/
static void
listener_thread_unregister_http_session(struct http_session *http)
@ -153,6 +173,402 @@ listener_thread_register_metrics_server()
return rc;
}
static void
check_messages_from_workers()
{
#ifdef TRAFFIC_CONTROL
assert(comm_from_workers);
assert(comm_to_workers);
for (int worker_idx = 0; worker_idx < runtime_worker_threads_count; worker_idx++) {
struct message new_message = { 0 };
struct comm_with_worker *cfw = &comm_from_workers[worker_idx];
struct comm_with_worker *ctw = &comm_to_workers[worker_idx];
assert(cfw);
assert(ctw);
assert(cfw->worker_idx == worker_idx);
assert(ctw->worker_idx == worker_idx);
const uint64_t now = __getcycles();
while (ck_ring_dequeue_spsc_message(&cfw->worker_ring, cfw->worker_ring_buffer, &new_message)) {
assert(new_message.sender_worker_idx == worker_idx);
assert(new_message.sandbox_meta);
struct sandbox_metadata *sandbox_meta = new_message.sandbox_meta;
assert(new_message.sandbox_id == sandbox_meta->id);
struct tenant *tenant = sandbox_meta->tenant;
struct route *route = sandbox_meta->route;
const uint64_t absolute_deadline = sandbox_meta->absolute_deadline;
const uint64_t allocation_timestamp = sandbox_meta->allocation_timestamp;
assert(tenant);
assert(route);
assert(absolute_deadline > 0);
assert(allocation_timestamp > 0);
sandbox_meta->exceeded_estimation = new_message.exceeded_estimation;
sandbox_meta->state = new_message.state;
sandbox_meta->owned_worker_idx = new_message.sender_worker_idx;
switch (new_message.message_type)
{
case MESSAGE_CFW_PULLED_NEW_SANDBOX: {
assert(sandbox_meta->state == SANDBOX_RUNNABLE);
if (sandbox_meta->terminated) continue;
if (sandbox_meta->pq_idx_in_tenant_queue) {
assert(sandbox_meta->global_queue_type == 2);
assert(sandbox_meta->tenant_queue == tenant->global_sandbox_metas);
priority_queue_delete_by_idx_nolock(tenant->global_sandbox_metas,
sandbox_meta,sandbox_meta->pq_idx_in_tenant_queue);
if(unlikely(priority_queue_enqueue_nolock(tenant->local_sandbox_metas, sandbox_meta))){
panic("Failed to add sandbox_meta to tenant metadata queue");
}
sandbox_meta->tenant_queue = tenant->local_sandbox_metas;
}
break;
}
case MESSAGE_CFW_DELETE_SANDBOX: {
assert(sandbox_meta->state == SANDBOX_RETURNED || sandbox_meta->state == SANDBOX_ERROR);
// assert(new_message.adjustment > 0);
assert(sandbox_meta->terminated || sandbox_meta->remaining_exec > 0);
if (!sandbox_meta->terminated){
assert(sandbox_meta->worker_id_virt >= 0);
// assert(sandbox_meta->remaining_exec == new_message.adjustment);
void *global_dbf = global_virt_worker_dbfs[sandbox_meta->worker_id_virt];
dbf_list_reduce_demand(sandbox_meta, sandbox_meta->remaining_exec + sandbox_meta->extra_slack, true);
sandbox_meta->demand_node = NULL;
// sandbox_meta->extra_slack = 0;
// sandbox_meta->remaining_exec -= new_message.adjustment;
assert(sandbox_meta->remaining_exec == new_message.remaining_exec);
}
// assert(sandbox_meta->remaining_exec == 0);
if (sandbox_meta->trs_job_node) tenant_reduce_guaranteed_job_demand(tenant, sandbox_meta->remaining_exec, sandbox_meta);
// if (sandbox_meta->global_queue_type == 2 && sandbox_meta->state == SANDBOX_RETURNED) sandbox_meta->total_be_exec_cycles += new_message.adjustment;
if (sandbox_meta->total_be_exec_cycles > 0) tenant_force_add_job_as_best(tenant, now, sandbox_meta->total_be_exec_cycles);
sandbox_meta->terminated = true;
break;
}
case MESSAGE_CFW_REDUCE_DEMAND: {
assert(new_message.adjustment > 0);
if (sandbox_meta->global_queue_type == 2) {
sandbox_meta->total_be_exec_cycles += new_message.adjustment;
// tenant_force_add_job_as_best(tenant, now, sandbox_meta->total_be_exec_cycles);
}
if (sandbox_meta->terminated) break;
assert(sandbox_meta->demand_node);
assert(sandbox_meta->worker_id_virt >= 0);
void *global_dbf = global_virt_worker_dbfs[sandbox_meta->worker_id_virt];
dbf_list_reduce_demand(sandbox_meta, new_message.adjustment, false);
sandbox_meta->remaining_exec -= new_message.adjustment;
assert(sandbox_meta->remaining_exec == new_message.remaining_exec);
if (sandbox_meta->remaining_exec == 0 && sandbox_meta->extra_slack < runtime_quantum) {
dbf_list_reduce_demand(sandbox_meta, sandbox_meta->extra_slack, true);
sandbox_meta->extra_slack = 0;
sandbox_meta->demand_node = NULL;
}
break;
}
case MESSAGE_CFW_EXTRA_DEMAND_REQUEST: {
assert(USING_TRY_LOCAL_EXTRA);
assert(new_message.exceeded_estimation);
assert(new_message.adjustment == runtime_quantum);
if (sandbox_meta->terminated) break;
assert(sandbox_meta->remaining_exec == 0);
if (absolute_deadline <= now) {
assert(sandbox_meta->error_code == 0);
if (sandbox_meta->extra_slack > 0) {
assert(sandbox_meta->demand_node);
dbf_list_reduce_demand(sandbox_meta, sandbox_meta->extra_slack, true);
sandbox_meta->demand_node = NULL;
// sandbox_meta->extra_slack = 0;
}
assert(sandbox_meta->demand_node == NULL);
sandbox_meta->error_code = 4081;
sandbox_meta->terminated = true;
break;
}
if (sandbox_meta->global_queue_type == 1) {
assert(sandbox_meta->trs_job_node);
assert(sandbox_meta->trs_job_node->sandbox_meta);
sandbox_meta->trs_job_node->sandbox_meta = NULL;
sandbox_meta->trs_job_node = NULL;
}
int return_code = 0;
int worker_id_v = -1;
uint64_t work_admitted = 0;
if (sandbox_meta->extra_slack >= runtime_quantum) {
bool tenant_can_admit = tenant_try_add_job_as_guaranteed(tenant, now, runtime_quantum, sandbox_meta);
return_code = tenant_can_admit ? 1 : 2;
worker_id_v = sandbox_meta->worker_id_virt;
work_admitted = 1;
sandbox_meta->extra_slack -= runtime_quantum;
} else {
assert(sandbox_meta->demand_node == NULL);
assert(sandbox_meta->extra_slack == 0);
work_admitted = traffic_control_decide(sandbox_meta, now, runtime_quantum, &return_code, &worker_id_v);
}
if (work_admitted == 0) {
// debuglog("No global supply left");
assert(return_code == 4295 || return_code == 4296);
assert(sandbox_meta->demand_node == NULL);
assert(sandbox_meta->extra_slack == 0);
assert(sandbox_meta->error_code == 0);
sandbox_meta->error_code = return_code;
sandbox_meta->terminated = true;
break;
}
assert(worker_id_v >= 0);
sandbox_meta->remaining_exec = runtime_quantum;
sandbox_meta->worker_id_virt = worker_id_v;
// TODO: Fix the BE budget calculation for when promote/demote happens
if (sandbox_meta->global_queue_type == 2 && return_code == 1) {
assert(sandbox_meta->pq_idx_in_tenant_queue >= 1);
priority_queue_delete_by_idx_nolock(tenant->local_sandbox_metas,
sandbox_meta,sandbox_meta->pq_idx_in_tenant_queue);
sandbox_meta->tenant_queue = NULL;
// printf("promote!\n");
} else if (sandbox_meta->global_queue_type == 1 && return_code == 2) {
assert(sandbox_meta->pq_idx_in_tenant_queue == 0);
if(unlikely(priority_queue_enqueue_nolock(tenant->local_sandbox_metas, sandbox_meta))){
panic("Failed to add sandbox_meta to tenant metadata queue");
}
sandbox_meta->tenant_queue = tenant->local_sandbox_metas;
// printf("demote!\n");
}
sandbox_meta->global_queue_type = return_code;
break;
}
case MESSAGE_CFW_WRITEBACK_PREEMPTION: {
assert(USING_WRITEBACK_FOR_PREEMPTION);
assert(USING_LOCAL_RUNQUEUE == false);
assert (sandbox_meta->state == SANDBOX_PREEMPTED);
struct sandbox *preempted_sandbox = new_message.sandbox;
assert(preempted_sandbox);
assert(preempted_sandbox == sandbox_meta->sandbox_shadow);
assert(preempted_sandbox->sandbox_meta == sandbox_meta);
assert(preempted_sandbox->id == new_message.sandbox_id);
assert(preempted_sandbox->state == SANDBOX_PREEMPTED);
assert(preempted_sandbox->writeback_preemption_in_progress);
assert(preempted_sandbox->absolute_deadline == absolute_deadline);
assert(preempted_sandbox->response_code == 0);
assert(preempted_sandbox->remaining_exec > 0);
if (sandbox_meta->terminated) {
assert(sandbox_meta->error_code > 0);
assert(preempted_sandbox->response_code == 0);
// preempted_sandbox->response_code = sandbox_meta->error_code;
// break;
// printf("terminated - %s\n", tenant->name);
} else if (absolute_deadline < now + (!preempted_sandbox->exceeded_estimation ? preempted_sandbox->remaining_exec : 0)) {
// printf("missed - %s\n", tenant->name);
/* // if (absolute_deadline < now + preempted_sandbox->remaining_execution_original) {
assert(sandbox_meta->terminated == false);
assert(sandbox_meta->remaining_exec == preempted_sandbox->remaining_exec);
assert(sandbox_meta->worker_id_virt >= 0);
void *global_dbf = global_virt_worker_dbfs[sandbox_meta->worker_id_virt];
dbf_try_update_demand(global_dbf, allocation_timestamp,
route->relative_deadline,
absolute_deadline, sandbox_meta->remaining_exec,
DBF_DELETE_EXISTING_DEMAND, NULL, NULL);
sandbox_meta->remaining_exec -= new_message.adjustment;
assert(sandbox_meta->remaining_exec == new_message.remaining_exec);
if (sandbox_meta->trs_job_node) {
assert(sandbox_meta->global_queue_type == 1);
tenant_update_job_node(tenant, sandbox_meta->remaining_exec, TRS_REDUCE_EXISTING_DEMAND, sandbox_meta);
}
if (sandbox_meta->global_queue_type == 2) {
assert(tenant->trs.best_effort_cycles >= sandbox_meta->remaining_exec);
tenant->trs.best_effort_cycles -= sandbox_meta->remaining_exec;
}
sandbox_meta->terminated = true;
assert(preempted_sandbox->response_code == 0);
preempted_sandbox->response_code = 4082;
break; */
assert(sandbox_meta->error_code == 0);
if (sandbox_meta->remaining_exec + sandbox_meta->extra_slack > 0) {
assert(sandbox_meta->demand_node);
dbf_list_reduce_demand(sandbox_meta, sandbox_meta->remaining_exec + sandbox_meta->extra_slack, true);
sandbox_meta->demand_node = NULL;
// sandbox_meta->remaining_exec = 0;
// sandbox_meta->extra_slack = 0;
}
assert(sandbox_meta->demand_node == NULL);
sandbox_meta->error_code = 4082;
sandbox_meta->terminated = true;
}
assert(sandbox_meta->terminated || sandbox_meta->remaining_exec == preempted_sandbox->remaining_exec);
if (unlikely(global_request_scheduler_add(preempted_sandbox) == NULL)) {
// TODO: REDUCE DBF, for now just panic!
panic("Failed to add the preempted_sandbox to global queue\n");
}
preempted_sandbox->writeback_preemption_in_progress = false;
break;
}
case MESSAGE_CFW_WRITEBACK_OVERSHOOT: {
assert(USING_WRITEBACK_FOR_OVERSHOOT);
assert (sandbox_meta->state == SANDBOX_PREEMPTED);
struct sandbox *preempted_sandbox = new_message.sandbox;
assert(preempted_sandbox);
assert(preempted_sandbox == sandbox_meta->sandbox_shadow);
assert(preempted_sandbox->sandbox_meta == sandbox_meta);
assert(preempted_sandbox->id == new_message.sandbox_id);
assert(preempted_sandbox->sandbox_meta == sandbox_meta);
assert(preempted_sandbox->state == SANDBOX_PREEMPTED);
assert(preempted_sandbox->writeback_overshoot_in_progress);
assert(preempted_sandbox->absolute_deadline == absolute_deadline);
assert(preempted_sandbox->response_code == 0);
assert(preempted_sandbox->remaining_exec == 0);
assert(new_message.remaining_exec == 0);
assert(sandbox_meta->remaining_exec == 0);
assert(new_message.adjustment == runtime_quantum);
if (sandbox_meta->terminated) {
assert(sandbox_meta->error_code > 0);
preempted_sandbox->response_code = sandbox_meta->error_code;
break;
}
if (absolute_deadline <= now) {
sandbox_meta->terminated = true;
preempted_sandbox->response_code = 4082;
break;
}
if (sandbox_meta->global_queue_type == 1) {
assert(sandbox_meta->trs_job_node);
assert(sandbox_meta->trs_job_node->sandbox_meta);
sandbox_meta->trs_job_node->sandbox_meta = NULL;
sandbox_meta->trs_job_node = NULL;
}
int return_code = 0;
int worker_id_v = -1;
uint64_t work_admitted = 0;
work_admitted = traffic_control_decide(sandbox_meta, now, runtime_quantum,
&return_code, &worker_id_v);
if (work_admitted == 0) {
// debuglog("No global supply left");
assert(return_code == 4290 || return_code == 4291);
preempted_sandbox->response_code = return_code + 5;
sandbox_meta->terminated = true;
break;
}
assert(worker_id_v >= 0);
sandbox_meta->remaining_exec = runtime_quantum;
sandbox_meta->worker_id_virt = worker_id_v;
if (sandbox_meta->global_queue_type == 2 && return_code == 1) {
assert(sandbox_meta->pq_idx_in_tenant_queue >= 1);
priority_queue_delete_by_idx_nolock(tenant->local_sandbox_metas,
sandbox_meta,sandbox_meta->pq_idx_in_tenant_queue);
} else if (sandbox_meta->global_queue_type == 1 && return_code == 2) {
assert(sandbox_meta->pq_idx_in_tenant_queue == 0);
if(unlikely(priority_queue_enqueue_nolock(tenant->local_sandbox_metas, sandbox_meta))){
panic("Failed to add sandbox_meta to tenant metadata queue");
}
}
sandbox_meta->global_queue_type = return_code;
preempted_sandbox->remaining_exec = runtime_quantum;
preempted_sandbox->writeback_overshoot_in_progress = false;
if (unlikely(global_request_scheduler_add(preempted_sandbox) == NULL)){
// TODO: REDUCE DBF, for now just panic!
panic("Failed to add the preempted_sandbox to global queue\n");
}
break;
}
default:
panic("Unknown message type received by the listener!");
break;
} // end switch 1
if (sandbox_meta->terminated == false) continue;
assert(sandbox_meta->demand_node == NULL);
if (sandbox_meta->pq_idx_in_tenant_queue) {
assert(sandbox_meta->global_queue_type == 2);
assert(sandbox_meta->tenant_queue);
priority_queue_delete_by_idx_nolock(sandbox_meta->tenant_queue,
sandbox_meta,sandbox_meta->pq_idx_in_tenant_queue);
sandbox_meta->tenant_queue = NULL;
}
switch (new_message.message_type) {
case MESSAGE_CFW_EXTRA_DEMAND_REQUEST:
if (sandbox_refs[new_message.sandbox_id % RUNTIME_MAX_ALIVE_SANDBOXES]){
new_message.message_type = MESSAGE_CTW_SHED_CURRENT_JOB;
if (!ck_ring_enqueue_spsc_message(&ctw->worker_ring, ctw->worker_ring_buffer, &new_message)) {
panic("Ring buffer was full and enqueue has failed!")
}
pthread_kill(runtime_worker_threads[new_message.sender_worker_idx], SIGALRM);
} else {
// printf("already dead\n");
}
break;
case MESSAGE_CFW_REDUCE_DEMAND:
break;
case MESSAGE_CFW_DELETE_SANDBOX:
assert (sandbox_meta->state == SANDBOX_RETURNED || sandbox_meta->state == SANDBOX_ERROR);
free(sandbox_meta);
break;
case MESSAGE_CFW_WRITEBACK_PREEMPTION:
case MESSAGE_CFW_WRITEBACK_OVERSHOOT:
// assert(preempted_sandbox);
// assert(preempted_sandbox->id == sandbox_meta->id);
// assert(preempted_sandbox->state == SANDBOX_PREEMPTED);
// sandbox_set_as_error(preempted_sandbox, SANDBOX_PREEMPTED);
// sandbox_free(preempted_sandbox);
// free(sandbox_meta);
break;
default:
panic ("Unknown message type!");
break;
} // end switch 2
// memset(&new_message, 0, sizeof(new_message));
} // end while
} // end for
#endif
}
static void
panic_on_epoll_error(struct epoll_event *evt)
{
@ -239,56 +655,201 @@ static void
on_client_request_received(struct http_session *session)
{
assert(session->state == HTTP_SESSION_RECEIVED_REQUEST);
session->request_downloaded_timestamp = __getcycles();
struct route *route = session->route;
uint64_t estimated_execution = route->execution_histogram.estimated_execution;
uint64_t work_admitted = 1;
const uint64_t now = __getcycles();
session->request_downloaded_timestamp = now;
struct tenant *tenant = session->tenant;
struct route *route = session->route;
http_route_total_increment_request(&session->route->metrics);
// uint64_t estimated_execution = route->execution_histogram.estimated_execution; // By defaulat this is half of deadline
// uint64_t work_admitted = 1;
// struct route *route = http_router_match_route(&tenant->router, session->http_request.full_url);
// if (route == NULL) {
// debuglog("Route: %s did not match any routes\n", session->http_request.full_url);
// session->state = HTTP_SESSION_EXECUTION_COMPLETE;
// http_session_set_response_header(session, 404);
// on_client_response_header_sending(session);
// return;
// }
// session->route = route;
// http_route_total_increment_request(&session->route->metrics);
#if defined TRAFFIC_CONTROL
/*
* Admin control.
* If client sends a request to the route "/main", server prints all the DBF data.
* If client sends a request to the route "/terminator", server does cleanup and terminates.
*/
if (tenant->tcp_server.port == 55555) {
if (strcmp(session->http_request.full_url, "/terminator") == 0) {
printf("Terminating SLEdge now!\n");
tenant_database_print_reservations();
printf("\nGLOBAL DBF DEMANDS:\n");
const int N_VIRT_WORKERS_DBF = USING_AGGREGATED_GLOBAL_DBF ? 1 : runtime_worker_threads_count;
for (int i = 0; i < N_VIRT_WORKERS_DBF; i++)
{
printf("GL Worker #%d\n", i);
dbf_print(global_virt_worker_dbfs[i], now);
}
// dbf_print(global_dbf, now);
// printf("\nGLOBAL GUAR DBF DEMANDS:\n");
printf("\nWorker #0 DBF DEMANDS:\n");
dbf_print(global_worker_dbf, now);
session->state = HTTP_SESSION_EXECUTION_COMPLETE;
http_session_set_response_header(session, 500);
on_client_response_header_sending(session);
runtime_cleanup();
exit(0);
}
if (strcmp(session->http_request.full_url, "/admin") == 0) {
printf("Hello from Admin!\n");
tenant_database_print_reservations();
printf("\nGLOBAL DBF DEMANDS:\n");
const int N_VIRT_WORKERS_DBF = USING_AGGREGATED_GLOBAL_DBF ? 1 : runtime_worker_threads_count;
for (int i = 0; i < N_VIRT_WORKERS_DBF; i++)
{
printf("GL Worker #%d\n", i);
dbf_print(global_virt_worker_dbfs[i], now);
}
// dbf_print(global_dbf, now);
// printf("\nGLOBAL GUAR DBF DEMANDS:\n");
printf("\nWorker #0 DBF DEMANDS:\n");
dbf_print(global_worker_dbf, now);
session->state = HTTP_SESSION_EXECUTION_COMPLETE;
http_session_set_response_header(session, 500);
on_client_response_header_sending(session);
return;
}
}
#endif
// const uint64_t sandbox_alloc_timestamp = __getcycles();
const uint64_t absolute_deadline = now + route->relative_deadline;
uint64_t estimated_execution = route->execution_histogram.estimated_execution; // TODO: By default half of deadline
uint64_t work_admitted = 1;
int return_code = 0;
int worker_id_v = -1;
#ifdef EXECUTION_REGRESSION
estimated_execution = get_regression_prediction(session);
#endif
#ifdef ADMISSIONS_CONTROL
/*
* Perform admissions control.
* If 0, workload was rejected, so close with 429 "Too Many Requests" and continue
*/
uint64_t admissions_estimate = admissions_control_calculate_estimate(estimated_execution,
/*
* Perform admissions control.
* If 0, workload was rejected, so close with 429 "Too Many Requests" and continue
*/
#if defined ADMISSIONS_CONTROL
const uint64_t admissions_estimate = admissions_control_calculate_estimate(estimated_execution,
route->relative_deadline);
work_admitted = admissions_control_decide(admissions_estimate);
#elif defined TRAFFIC_CONTROL
if (global_sandbox_meta == NULL) global_sandbox_meta = malloc(sizeof(struct sandbox_metadata));
assert(global_sandbox_meta);
global_sandbox_meta->tenant = tenant;
global_sandbox_meta->route = route;
global_sandbox_meta->tenant_queue = NULL;
global_sandbox_meta->sandbox_shadow = NULL;
global_sandbox_meta->global_queue_type = 0;
global_sandbox_meta->pq_idx_in_tenant_queue = 0;
global_sandbox_meta->error_code = 0;
global_sandbox_meta->exceeded_estimation = false;
global_sandbox_meta->terminated = false;
global_sandbox_meta->demand_node = NULL;
global_sandbox_meta->trs_job_node = NULL;
global_sandbox_meta->extra_slack = 0;
global_sandbox_meta->total_be_exec_cycles = 0;
global_sandbox_meta->owned_worker_idx = -2;
global_sandbox_meta->allocation_timestamp = now;
global_sandbox_meta->absolute_deadline = absolute_deadline;
global_sandbox_meta->remaining_exec = estimated_execution;
work_admitted = traffic_control_decide(global_sandbox_meta, now, estimated_execution,
&return_code, &worker_id_v);
assert(work_admitted == 0 || worker_id_v >= 0);
assert(work_admitted == 0 || return_code == 1 || return_code == 2);
#endif
if (work_admitted == 0) {
assert(worker_id_v < 0);
session->state = HTTP_SESSION_EXECUTION_COMPLETE;
http_session_set_response_header(session, 429);
on_client_response_header_sending(session);
sandbox_perf_log_print_denied_entry(tenant, route, return_code);
return;
}
#endif
/* Allocate a Sandbox */
session->state = HTTP_SESSION_EXECUTING;
struct sandbox *sandbox = sandbox_alloc(route->module, session, route, session->tenant, work_admitted);
struct sandbox *sandbox = sandbox_alloc(route->module, session, work_admitted, now);
if (unlikely(sandbox == NULL)) {
// TODO: REDUCE DEMAND!!!
debuglog("Failed to allocate sandbox\n");
session->state = HTTP_SESSION_EXECUTION_COMPLETE;
http_session_set_response_header(session, 500);
on_client_response_header_sending(session);
sandbox_perf_log_print_denied_entry(tenant, route, 5000);
return;
}
sandbox->remaining_exec = estimated_execution;
#if defined TRAFFIC_CONTROL
sandbox->global_queue_type = return_code;
sandbox->sandbox_meta = global_sandbox_meta;
if(extra_execution_slack_p > 0) {
const uint64_t hack = estimated_execution*extra_execution_slack_p/100;
dbf_list_force_add_extra_slack(global_virt_worker_dbfs[worker_id_v], global_sandbox_meta, hack);
global_sandbox_meta->extra_slack = hack;
}
global_sandbox_meta->sandbox_shadow = sandbox;
global_sandbox_meta->id = sandbox->id;
global_sandbox_meta->state = sandbox->state;
global_sandbox_meta->worker_id_virt = worker_id_v;
global_sandbox_meta->global_queue_type = return_code;
if (global_sandbox_meta->global_queue_type == 2) {
if(unlikely(priority_queue_enqueue_nolock(tenant->global_sandbox_metas, global_sandbox_meta))){
panic("Failed to add sandbox_meta to tenant metadata queue");
}
global_sandbox_meta->tenant_queue = tenant->global_sandbox_metas;
}
#endif
/* If the global request scheduler is full, return a 429 to the client */
if (unlikely(global_request_scheduler_add(sandbox) == NULL)) {
// debuglog("Failed to add sandbox to global queue\n");
// debuglog("Failed to add a %s sandbox to global queue\n", sandbox->tenant->name);
/////////////////////////////////// TODO ???
sandbox->response_code = 4290;
sandbox->state = SANDBOX_ERROR;
sandbox_perf_log_print_entry(sandbox);
sandbox->http = NULL;
// sandbox->state = SANDBOX_ERROR;
// sandbox_perf_log_print_entry(sandbox);
// sandbox->http = NULL;
sandbox->timestamp_of.dispatched = now;
// TODO: REDUCE DEMAND!!!
sandbox_set_as_error(sandbox, SANDBOX_INITIALIZED);
free(sandbox->sandbox_meta);
sandbox_free(sandbox);
session->state = HTTP_SESSION_EXECUTION_COMPLETE;
http_session_set_response_header(session, 429);
on_client_response_header_sending(session);
return;
// session->state = HTTP_SESSION_EXECUTION_COMPLETE;
// http_session_set_response_header(session, 429);
// on_client_response_header_sending(session);
// sandbox_perf_log_print_denied_entry(tenant, route, 999);
}
global_sandbox_meta = NULL;
}
static void
@ -437,20 +998,30 @@ listener_thread_main(void *dummy)
listener_thread_register_metrics_server();
/* Set my priority */
// runtime_set_pthread_prio(pthread_self(), 2);
// runtime_set_pthread_prio(pthread_self(), 2); // TODO: what to do with this?
pthread_setschedprio(pthread_self(), -20);
#ifdef TRAFFIC_CONTROL
const int epoll_timeout = 0;
#else
const int epoll_timeout = -1;
#endif
while (true) {
/* Block indefinitely on the epoll file descriptor, waiting on up to a max number of events */
#ifdef TRAFFIC_CONTROL
tenant_database_replenish_all();
check_messages_from_workers();
#endif
/* If -1, Block indefinitely on the epoll file descriptor, waiting on up to a max number of events */
int descriptor_count = epoll_wait(listener_thread_epoll_file_descriptor, epoll_events,
RUNTIME_MAX_EPOLL_EVENTS, -1);
RUNTIME_MAX_EPOLL_EVENTS, epoll_timeout);
if (descriptor_count == 0) continue;
if (descriptor_count < 0) {
if (errno == EINTR) continue;
panic("epoll_wait: %s", strerror(errno));
}
/* Assumption: Because epoll_wait is set to not timeout, we should always have descriptors here */
assert(descriptor_count > 0);
for (int i = 0; i < descriptor_count; i++) {
@ -460,6 +1031,8 @@ listener_thread_main(void *dummy)
switch (tag) {
case EPOLL_TAG_TENANT_SERVER_SOCKET:
// tenant_database_replenish_all();
// check_messages_from_workers();
on_tenant_socket_epoll_event(&epoll_events[i]);
break;
case EPOLL_TAG_HTTP_SESSION_CLIENT_SOCKET:

@ -5,7 +5,7 @@
/* Must be the same alignment as sandbox structs because of how the ps_list macros work */
thread_local static struct ps_list_head local_cleanup_queue PAGE_ALIGNED;
thread_local size_t local_cleanup_queue_size = 0;
void
local_cleanup_queue_initialize()
@ -29,6 +29,7 @@ local_cleanup_queue_add(struct sandbox *sandbox)
assert(sandbox);
assert(ps_list_singleton_d(sandbox));
ps_list_head_append_d(&local_cleanup_queue, sandbox);
local_cleanup_queue_size++;
assert(!local_cleanup_queue_is_empty());
}
@ -42,10 +43,13 @@ local_cleanup_queue_free()
{
struct sandbox *sandbox_iterator = NULL;
struct sandbox *buffer = NULL;
// if (local_cleanup_queue_size > 4) debuglog("Cleanup Queue Size: %lu", local_cleanup_queue_size);
ps_list_foreach_del_d(&local_cleanup_queue, sandbox_iterator, buffer)
{
assert(local_cleanup_queue_size > 0);
ps_list_rem_d(sandbox_iterator);
sandbox_free(sandbox_iterator);
local_cleanup_queue_size--;
}
}

@ -14,6 +14,8 @@
thread_local static struct priority_queue *local_runqueue_minheap;
thread_local static int max_local_runqueue_len = 0; //////////
/**
* Checks if the run queue is empty
* @returns true if empty. false otherwise
@ -40,6 +42,11 @@ local_runqueue_minheap_add(struct sandbox *sandbox)
return_code = priority_queue_enqueue_nolock(local_runqueue_minheap, sandbox);
if (unlikely(return_code == -ENOSPC)) panic("Thread Runqueue is full!\n");
}
if(priority_queue_length_nolock(local_runqueue_minheap) > max_local_runqueue_len) {
max_local_runqueue_len = priority_queue_length_nolock(local_runqueue_minheap);
debuglog("Local MAX Queue Length: %u", max_local_runqueue_len);
}
}
/**

@ -0,0 +1,124 @@
#include <stdint.h>
#include <threads.h>
#include "arch/context.h"
#include "current_sandbox.h"
#include "debuglog.h"
#include "global_request_scheduler.h"
#include "local_runqueue.h"
#include "local_runqueue_mtdbf.h"
#include "panic.h"
#include "priority_queue.h"
#include "sandbox_functions.h"
#include "runtime.h"
#include "dbf.h"
thread_local struct priority_queue *local_runqueue_mtdbf;
// thread_local struct priority_queue *local_default_queue;
thread_local static int max_local_runqueue_len = 0; //////////
/**
* Checks if the run queue is empty
* @returns true if empty. false otherwise
*/
bool
local_runqueue_mtdbf_is_empty()
{
return priority_queue_length_nolock(local_runqueue_mtdbf) == 0;
}
/**
* Adds a sandbox to the run queue
* @param sandbox
* @returns pointer to sandbox added
*/
void
local_runqueue_mtdbf_add(struct sandbox *sandbox)
{
assert(sandbox != NULL);
int rc = priority_queue_enqueue_nolock(local_runqueue_mtdbf, sandbox);
if (unlikely(rc == -ENOSPC)) {
struct priority_queue *temp = priority_queue_grow_nolock(local_runqueue_mtdbf);
if (unlikely(temp == NULL)) panic("Failed to grow local runqueue\n");
local_runqueue_mtdbf = temp;
rc = priority_queue_enqueue_nolock(local_runqueue_mtdbf, sandbox);
if (unlikely(rc == -ENOSPC)) panic("Thread Runqueue is full!\n");
}
// if (sandbox->global_queue_type == 2) {
// rc = priority_queue_enqueue_nolock(local_default_queue, sandbox);
// assert(rc == 0);
// }
sandbox->owned_worker_idx = worker_thread_idx;
if(priority_queue_length_nolock(local_runqueue_mtdbf) > max_local_runqueue_len) {
max_local_runqueue_len = priority_queue_length_nolock(local_runqueue_mtdbf);
debuglog("Local MAX Queue Length: %u", max_local_runqueue_len);
}
}
/**
* Deletes a sandbox from the runqueue
* @param sandbox to delete
*/
static void
local_runqueue_mtdbf_delete(struct sandbox *sandbox)
{
assert(sandbox != NULL);
priority_queue_delete_by_idx_nolock(local_runqueue_mtdbf, sandbox, sandbox->pq_idx_in_runqueue);
sandbox->owned_worker_idx = -2;
// if (sandbox->pq_idx_in_default_queue >= 1) {
// assert(sandbox->global_queue_type == 2 );
// priority_queue_delete_by_idx_nolock(local_default_queue, sandbox, sandbox->pq_idx_in_default_queue);
// }
}
/**
* This function determines the next sandbox to run.
* This is the head of the runqueue
*
* Execute the sandbox at the head of the thread local runqueue
* @return the sandbox to execute or NULL if none are available
*/
struct sandbox *
local_runqueue_mtdbf_get_next()
{
/* Get the deadline of the sandbox at the head of the local request queue */
struct sandbox *next = NULL;
int rc = priority_queue_top_nolock(local_runqueue_mtdbf, (void **)&next);
if (rc == -ENOENT) return NULL;
return next;
}
// static inline void
// sandbox_update_pq_idx_in_default_queue(void *element, size_t idx)
// {
// assert(element);
// struct sandbox *sandbox = (struct sandbox *)element;
// sandbox->pq_idx_in_default_queue = idx;
// }
/**
* Registers the PS variant with the polymorphic interface
*/
void
local_runqueue_mtdbf_initialize()
{
/* Initialize local state */
local_runqueue_mtdbf = priority_queue_initialize_new(RUNTIME_RUNQUEUE_SIZE, false, sandbox_get_priority_global, NULL,
sandbox_update_pq_idx_in_runqueue);
/* Register Function Pointers for Abstract Scheduling API */
struct local_runqueue_config config = { .add_fn = local_runqueue_mtdbf_add,
.is_empty_fn = local_runqueue_mtdbf_is_empty,
.delete_fn = local_runqueue_mtdbf_delete,
.get_next_fn = local_runqueue_mtdbf_get_next };
local_runqueue_initialize(&config);
}

@ -31,7 +31,7 @@
/* Conditionally used by debuglog when NDEBUG is not set */
int32_t debuglog_file_descriptor = -1;
uint32_t runtime_first_worker_processor = 1;
uint32_t runtime_first_worker_processor = 2;
uint32_t runtime_processor_speed_MHz = 0;
uint32_t runtime_total_online_processors = 0;
uint32_t runtime_worker_threads_count = 0;
@ -41,8 +41,11 @@ enum RUNTIME_SIGALRM_HANDLER runtime_sigalrm_handler = RUNTIME_SIGALRM_HANDLER_B
bool runtime_preemption_enabled = true;
bool runtime_worker_spinloop_pause_enabled = false;
uint32_t runtime_quantum_us = 1000; /* 1ms */
uint64_t runtime_quantum; /* cycles */
uint64_t runtime_boot_timestamp;
uint64_t runtime_max_deadline = 0L;
pid_t runtime_pid = 0;
uint16_t extra_execution_slack_p = 0; /* percentile */
/**
* Returns instructions on use of CLI if used incorrectly
@ -69,7 +72,7 @@ runtime_allocate_available_cores()
/* If more than two cores are available, leave core 0 free to run OS tasks */
if (runtime_total_online_processors > 2) {
runtime_first_worker_processor = 2;
// runtime_first_worker_processor = 2;
max_possible_workers = runtime_total_online_processors - 2;
} else if (runtime_total_online_processors == 2) {
runtime_first_worker_processor = 1;
@ -105,8 +108,12 @@ runtime_allocate_available_cores()
static inline void
runtime_get_processor_speed_MHz(void)
{
// runtime_processor_speed_MHz = 2600; // nworkers=1
// // runtime_processor_speed_MHz = 2893; // nworkers > 4
// return; ////////////////////////// temp
char *proc_mhz_raw = getenv("SLEDGE_PROC_MHZ");
FILE *cmd = NULL;
// FILE *cmd = NULL;
if (proc_mhz_raw != NULL) {
/* The case with manual override for the CPU freq */
@ -121,7 +128,7 @@ runtime_get_processor_speed_MHz(void)
awk '{ total += $4; count++ } END { print total/count }'",
runtime_first_worker_processor + 1,
runtime_first_worker_processor + runtime_worker_threads_count);
cmd = popen(command, "r");
FILE *cmd = popen(command, "r");
if (unlikely(cmd == NULL)) goto err;
char buff[16];
@ -140,6 +147,7 @@ runtime_get_processor_speed_MHz(void)
pretty_print_key_value("Worker CPU Freq", "%u MHz\n", runtime_processor_speed_MHz);
done:
// pclose(cmd);
return;
err:
goto done;
@ -223,7 +231,7 @@ runtime_configure()
if (strcmp(sigalrm_policy, "BROADCAST") == 0) {
runtime_sigalrm_handler = RUNTIME_SIGALRM_HANDLER_BROADCAST;
} else if (strcmp(sigalrm_policy, "TRIAGED") == 0) {
if (unlikely(scheduler != SCHEDULER_EDF)) panic("triaged sigalrm handlers are only valid with EDF\n");
// if (unlikely(scheduler != SCHEDULER_EDF)) panic("triaged sigalrm handlers are only valid with EDF\n");
runtime_sigalrm_handler = RUNTIME_SIGALRM_HANDLER_TRIAGED;
} else {
panic("Invalid sigalrm policy: %s. Must be {BROADCAST|TRIAGED}\n", sigalrm_policy);
@ -245,7 +253,17 @@ runtime_configure()
panic("SLEDGE_QUANTUM_US must be less than 999999 ms, saw %ld\n", quantum);
runtime_quantum_us = (uint32_t)quantum;
}
pretty_print_key_value("Quantum", "%u us\n", runtime_quantum_us);
// runtime_quantum = runtime_quantum_us * runtime_processor_speed_MHz;
// pretty_print_key_value("Quantum", "%u us = %lu cycles\n", runtime_quantum_us, runtime_quantum);
/* Extra execution slack percentile */
char *extra_slack_raw = getenv("EXTRA_EXEC_PERCENTILE");
if (extra_slack_raw != NULL) {
int extra_slack = atoi(extra_slack_raw);
if (unlikely(extra_slack < 0 || extra_slack > 50)) panic("EXTRA_EXEC_PERCENTILE must be between [0-50], saw %d\n", extra_slack);
extra_execution_slack_p = (uint16_t)extra_slack;
}
pretty_print_key_value("Extra Exec Slack", "%u%%\n", extra_execution_slack_p);
sandbox_perf_log_init();
http_session_perf_log_init();
@ -391,6 +409,12 @@ log_compiletime_config()
#else
pretty_print_key_disabled("Log Local Runqueue");
#endif
if (USING_EARLIEST_START_FIRST) {
pretty_print_key_enabled("USING_EARLIEST_START_FIRST");
} else {
pretty_print_key_disabled("USING_EARLIEST_START_FIRST");
}
}
void
@ -491,6 +515,7 @@ main(int argc, char **argv)
printf("Runtime Environment:\n");
runtime_set_resource_limits_to_max();
runtime_set_policy_and_prio();
runtime_allocate_available_cores();
runtime_configure();
runtime_initialize();
@ -500,6 +525,11 @@ main(int argc, char **argv)
runtime_start_runtime_worker_threads();
runtime_get_processor_speed_MHz();
runtime_configure_worker_spinloop_pause();
runtime_quantum = runtime_quantum_us * runtime_processor_speed_MHz;
pretty_print_key_value("Quantum", "%u us = %lu cycles\n", runtime_quantum_us, runtime_quantum);
// traffic_control_initialize();
software_interrupt_arm_timer();
#ifdef LOG_TENANT_LOADING
@ -524,11 +554,16 @@ main(int argc, char **argv)
exit(-1);
}
if (runtime_max_deadline < tenant->max_relative_deadline) runtime_max_deadline = tenant->max_relative_deadline;
/* Start listening for requests */
rc = tenant_listen(tenant);
if (rc < 0) exit(-1);
}
#ifdef TRAFFIC_CONTROL
traffic_control_initialize();
tenant_database_init_reservations();
#endif
runtime_boot_timestamp = __getcycles();
for (int tenant_idx = 0; tenant_idx < tenant_config_vec_len; tenant_idx++) {

@ -44,8 +44,9 @@ module_init(struct module *module, char *path)
rc = sledge_abi_symbols_init(&module->abi, path);
if (rc != 0) goto err;
module->pools = calloc((module->type == APP_MODULE ? runtime_worker_threads_count : 1),
sizeof(struct module_pool));
const int n = module->type == APP_MODULE ? runtime_worker_threads_count : 1;
// module->pools = calloc(n, sizeof(struct module_pool));
module->pools = calloc(1, sizeof(struct module_pool));
module->path = path;

@ -10,10 +10,13 @@
#include <sys/time.h>
#include "admissions_control.h"
#include "traffic_control.h"
#include "arch/context.h"
#include "debuglog.h"
#include "global_request_scheduler_deque.h"
#include "global_request_scheduler_minheap.h"
#include "global_request_scheduler_mtds.h"
#include "global_request_scheduler_mtdbf.h"
#include "http_parser_settings.h"
#include "listener_thread.h"
#include "module.h"
@ -32,6 +35,9 @@ int *runtime_worker_threads_argument;
/* The active deadline of the sandbox running on each worker thread */
uint64_t *runtime_worker_threads_deadline;
/* Tracks alive sandboxes */
bool sandbox_refs[RUNTIME_MAX_ALIVE_SANDBOXES] = { false };
/******************************************
* Shared Process / Listener Thread Logic *
*****************************************/
@ -197,3 +203,35 @@ runtime_set_prio(unsigned int nice)
return;
}
void
runtime_set_policy_and_prio()
{
if (geteuid() != 0) {
printf("Won't set the priority for sledgert. Run with sudo again.\n");
return;
}
// int which = PRIO_PROCESS; // You can also use PRIO_PGRP or PRIO_USER
// int nice_value = -20; // Adjust the nice value as needed
// if (setpriority(which, getpid(), nice_value) == -1) {
// perror("setpriority");
// }
const int FIFO_PRIO = 1; //sched_get_priority_max(SCHED_FIFO);
struct sched_param sp;
// Set the scheduling policy to SCHED_FIFO
sp.sched_priority = FIFO_PRIO;
int rc = sched_setscheduler(getpid(), SCHED_FIFO, &sp);
if (rc != 0) {
panic("sched_setscheduler error. Return code: %d", rc);
}
if (sched_getparam(0, &sp) < 0) { perror("getparam: "); }
assert(sp.sched_priority == FIFO_PRIO);
printf("Set sched for sledgert to FIFO. Prio=%d\n", FIFO_PRIO);
}

@ -2,6 +2,7 @@
#include <string.h>
#include <sys/mman.h>
#include "sandbox_types.h"
#include "current_sandbox.h"
#include "debuglog.h"
#include "panic.h"
@ -38,6 +39,10 @@ sandbox_allocate_linear_memory(struct sandbox *sandbox)
sandbox->memory = module_allocate_linear_memory(sandbox->module);
if (unlikely(sandbox->memory == NULL)) return -1;
// sandbox->sizes[sandbox->sizesize] = sandbox->memory->abi.size;
// sandbox->sizesize++;
sandbox->memory->abi.id = sandbox->id;
return 0;
}
@ -77,9 +82,22 @@ sandbox_free_stack(struct sandbox *sandbox)
{
assert(sandbox);
return module_free_stack(sandbox->module, sandbox->stack);
return module_free_stack(sandbox->module, sandbox->stack, sandbox->original_owner_worker_idx);
}
// /**
// * Free Linear Memory, leaving stack in place
// * @param sandbox
// */
// static inline void
// sandbox_free_linear_memory(struct sandbox *sandbox)
// {
// assert(sandbox != NULL);
// assert(sandbox->memory != NULL);
// module_free_linear_memory(sandbox->module, (struct wasm_memory *)sandbox->memory);
// sandbox->memory = NULL;
// }
/**
* Allocates HTTP buffers and performs our approximation of "WebAssembly instantiation"
* @param sandbox
@ -129,15 +147,15 @@ err_stack_allocation_failed:
err_memory_allocation_failed:
err_globals_allocation_failed:
err_http_allocation_failed:
sandbox_set_as_error(sandbox, SANDBOX_ALLOCATED);
sandbox_set_as_error(sandbox, SANDBOX_INITIALIZED);
sandbox_free(sandbox);
perror(error_message);
rc = -1;
goto done;
}
void
sandbox_init(struct sandbox *sandbox, struct module *module, struct http_session *session, struct route *route,
struct tenant *tenant, uint64_t admissions_estimate)
sandbox_init(struct sandbox *sandbox, struct module *module, struct http_session *session, uint64_t admissions_estimate)
{
/* Sets the ID to the value before the increment */
sandbox->id = sandbox_total_postfix_increment();
@ -150,12 +168,19 @@ sandbox_init(struct sandbox *sandbox, struct module *module, struct http_session
/* Allocate HTTP session structure */
assert(session);
sandbox->http = session;
sandbox->tenant = tenant;
sandbox->route = route;
sandbox->tenant = session->tenant;
sandbox->route = session->route;
sandbox->absolute_deadline = sandbox->timestamp_of.allocation + sandbox->route->relative_deadline;
sandbox->payload_size = session->http_request.body_length;
sandbox->exceeded_estimation = false;
sandbox->writeback_preemption_in_progress = false;
sandbox->writeback_overshoot_in_progress = false;
sandbox->response_code = 0;
sandbox->owned_worker_idx = -2;
sandbox->original_owner_worker_idx = -2;
/*
* Admissions Control State
* Assumption: an estimate of 0 should have been interpreted as a rejection
@ -176,8 +201,7 @@ sandbox_init(struct sandbox *sandbox, struct module *module, struct http_session
* @return the new sandbox request
*/
struct sandbox *
sandbox_alloc(struct module *module, struct http_session *session, struct route *route, struct tenant *tenant,
uint64_t admissions_estimate)
sandbox_alloc(struct module *module, struct http_session *session, uint64_t admissions_estimate, uint64_t sandbox_alloc_timestamp)
{
size_t alignment = (size_t)PAGE_SIZE;
size_t size_to_alloc = (size_t)round_up_to_page(sizeof(struct sandbox));
@ -190,13 +214,35 @@ sandbox_alloc(struct module *module, struct http_session *session, struct route
if (unlikely(sandbox == NULL)) return NULL;
memset(sandbox, 0, size_to_alloc);
sandbox->timestamp_of.allocation = sandbox_alloc_timestamp;
sandbox_set_as_allocated(sandbox);
sandbox_init(sandbox, module, session, route, tenant, admissions_estimate);
sandbox_init(sandbox, module, session, admissions_estimate);
sandbox_refs[sandbox->id % RUNTIME_MAX_ALIVE_SANDBOXES] = true;
return sandbox;
}
struct sandbox_metadata *sandbox_meta_alloc(struct sandbox *sandbox)
{
struct sandbox_metadata *sandbox_meta = malloc(sizeof(struct sandbox_metadata));
sandbox_meta->sandbox_shadow = sandbox;
sandbox_meta->tenant = sandbox->tenant;
sandbox_meta->route = sandbox->route;
sandbox_meta->id = sandbox->id;
sandbox_meta->state = sandbox->state;
sandbox_meta->allocation_timestamp = sandbox->timestamp_of.allocation;
sandbox_meta->absolute_deadline = sandbox->absolute_deadline;
sandbox_meta->remaining_exec = sandbox->remaining_exec;
sandbox_meta->exceeded_estimation = sandbox->exceeded_estimation;
sandbox_meta->owned_worker_idx = worker_thread_idx;
sandbox_meta->terminated = false;
sandbox_meta->pq_idx_in_tenant_queue = 0;
return sandbox_meta;
}
void
sandbox_deinit(struct sandbox *sandbox)
{
@ -210,8 +256,9 @@ sandbox_deinit(struct sandbox *sandbox)
module_release(sandbox->module);
/* Linear Memory and Guard Page should already have been munmaped and set to NULL */
assert(sandbox->memory == NULL);
// assert(sandbox->memory == NULL);
if (likely(sandbox->memory != NULL)) sandbox_free_linear_memory(sandbox);
if (likely(sandbox->stack != NULL)) sandbox_free_stack(sandbox);
if (likely(sandbox->globals.buffer != NULL)) sandbox_free_globals(sandbox);
if (likely(sandbox->wasi_context != NULL)) wasi_context_destroy(sandbox->wasi_context);
@ -227,7 +274,9 @@ sandbox_free(struct sandbox *sandbox)
assert(sandbox != NULL);
assert(sandbox != current_sandbox_get());
assert(sandbox->state == SANDBOX_ERROR || sandbox->state == SANDBOX_COMPLETE);
assert(!listener_thread_is_running() || sandbox->memory == NULL);
sandbox_refs[sandbox->id % RUNTIME_MAX_ALIVE_SANDBOXES] = false;
sandbox_deinit(sandbox);
free(sandbox);
}

@ -1,3 +1,172 @@
#include "scheduler.h"
enum SCHEDULER scheduler = SCHEDULER_EDF;
void
sandbox_process_scheduler_updates(struct sandbox *sandbox)
{
if (scheduler == SCHEDULER_MTDS && tenant_is_paid(sandbox->tenant)) {
atomic_fetch_sub(&sandbox->tenant->remaining_budget, sandbox->last_running_state_duration);
sandbox->last_running_state_duration = 0;
return;
}
#ifdef TRAFFIC_CONTROL
assert(sandbox->sandbox_meta);
assert(sandbox == sandbox->sandbox_meta->sandbox_shadow);
assert(sandbox->id == sandbox->sandbox_meta->id);
struct comm_with_worker *cfw = &comm_from_workers[worker_thread_idx];
assert(cfw);
const uint64_t now = __getcycles();
struct message new_message = {
.sandbox = sandbox,
.sandbox_id = sandbox->id,
.sandbox_meta = sandbox->sandbox_meta,
.state = sandbox->state,
.sender_worker_idx = worker_thread_idx,
.exceeded_estimation = sandbox->exceeded_estimation,
.total_running_duration = 0,
.timestamp = now
};
if (sandbox->state == SANDBOX_RETURNED || sandbox->state == SANDBOX_ERROR) {
uint64_t adjustment = sandbox->last_running_state_duration;
if (sandbox->remaining_exec < adjustment) adjustment = sandbox->remaining_exec;
// const uint64_t adjustment = sandbox->remaining_exec;
if (USING_LOCAL_RUNQUEUE && adjustment > 0 && sandbox->response_code == 0) {
// dbf_try_update_demand(worker_dbf, sandbox->timestamp_of.dispatched,
// sandbox->route->relative_deadline, sandbox->absolute_deadline, sandbox->remaining_exec,
// DBF_DELETE_EXISTING_DEMAND, NULL, NULL);
}
// sandbox->remaining_exec = 0;
new_message.message_type = MESSAGE_CFW_DELETE_SANDBOX;
new_message.adjustment = adjustment;
// new_message.remaining_exec = 0;
new_message.remaining_exec = sandbox->remaining_exec;
new_message.total_running_duration = sandbox->duration_of_state[SANDBOX_RUNNING_USER] + sandbox->duration_of_state[SANDBOX_RUNNING_SYS];
if (!ck_ring_enqueue_spsc_message(&cfw->worker_ring, cfw->worker_ring_buffer, &new_message)) {
panic("Ring The buffer was full and the enqueue operation has failed.!");
}
return;
}
/* Unless the sandbox is in the terminal state (handled above), then the only state it can be is INTERRUPTED */
assert(sandbox->state == SANDBOX_INTERRUPTED);
assert(sandbox == current_sandbox_get());
assert(sandbox->response_code == 0);
assert(sandbox->remaining_exec > 0);
assert(!sandbox->exceeded_estimation || sandbox->remaining_exec == runtime_quantum);
if (sandbox->sandbox_meta->terminated) {
assert(sandbox->sandbox_meta->error_code > 0);
// dbf_try_update_demand(worker_dbf, sandbox->timestamp_of.dispatched,
// sandbox->route->relative_deadline, sandbox->absolute_deadline, sandbox->remaining_exec,
// DBF_DELETE_EXISTING_DEMAND, NULL, NULL);
sandbox->response_code = sandbox->sandbox_meta->error_code;
interrupted_sandbox_exit();
return;
}
if (sandbox->absolute_deadline < now + (!sandbox->exceeded_estimation ? sandbox->remaining_exec : 0)) {
// dbf_try_update_demand(worker_dbf, sandbox->timestamp_of.dispatched,
// sandbox->route->relative_deadline, sandbox->absolute_deadline, sandbox->remaining_exec,
// DBF_DELETE_EXISTING_DEMAND, NULL, NULL);
sandbox->response_code = 4081;
interrupted_sandbox_exit();
return;
}
dbf_update_mode_t dbf_reduce_mode = DBF_REDUCE_EXISTING_DEMAND;
uint64_t adjustment = sandbox->last_running_state_duration;
if (sandbox->remaining_exec < sandbox->last_running_state_duration || sandbox->exceeded_estimation) {
/* To avoid less than quantum updates manually set the adjustment to quantum */
adjustment = sandbox->remaining_exec;
dbf_reduce_mode = DBF_DELETE_EXISTING_DEMAND;
}
sandbox->last_running_state_duration = 0;
sandbox->remaining_exec -= adjustment;
new_message.adjustment = adjustment;
new_message.message_type = MESSAGE_CFW_REDUCE_DEMAND;
new_message.remaining_exec = sandbox->remaining_exec;
if (USING_LOCAL_RUNQUEUE /* && !sandbox->exceeded_estimation */) {
// dbf_try_update_demand(worker_dbf, sandbox->timestamp_of.dispatched,
// sandbox->route->relative_deadline, sandbox->absolute_deadline, adjustment,
// dbf_reduce_mode, NULL, NULL);
}
if (!ck_ring_enqueue_spsc_message(&cfw->worker_ring, cfw->worker_ring_buffer, &new_message)) {
panic("Ring The buffer was full and the enqueue operation has failed.!")
}
if (sandbox->remaining_exec == 0) {
/* OVERSHOOT case! */
// printf("Went over estimation - sandbox_id=%lu of %s!\n", sandbox->id, sandbox->tenant->name);
if (sandbox->exceeded_estimation == false) sandbox->tenant->num_of_overshooted_sandboxes++;
sandbox->exceeded_estimation = true;
sandbox->num_of_overshoots++;
if (sandbox->num_of_overshoots > sandbox->tenant->max_overshoot_of_same_sandbox) {
sandbox->tenant->max_overshoot_of_same_sandbox = sandbox->num_of_overshoots;
}
const uint64_t extra_demand = runtime_quantum;
if (USING_LOCAL_RUNQUEUE && USING_TRY_LOCAL_EXTRA
/*&& dbf_try_update_demand(worker_dbf, now, sandbox->route->relative_deadline,
sandbox->absolute_deadline, extra_demand, DBF_CHECK_AND_ADD_DEMAND, &new_message, NULL)*/
|| (!USING_LOCAL_RUNQUEUE && USING_TRY_LOCAL_EXTRA)) {
/* Worker DBF has supply left */
// printf("Worker %d granted extra for sandbox %lu!\n", worker_thread_idx, sandbox->id);
sandbox->remaining_exec = extra_demand;
new_message.adjustment = extra_demand;
new_message.exceeded_estimation = true;
new_message.message_type = MESSAGE_CFW_EXTRA_DEMAND_REQUEST;
new_message.remaining_exec = extra_demand;
if (!ck_ring_enqueue_spsc_message(&cfw->worker_ring, cfw->worker_ring_buffer, &new_message)) {
panic("Ring The buffer was full and the enqueue operation has failed.!")
}
return;
} else if (USING_WRITEBACK_FOR_OVERSHOOT) {
/* Write back */
// printf("No supply left in worker #%d. So, writeback sandbox=%lu of %s\n", worker_thread_idx, sandbox->id, sandbox->tenant->name);
sandbox->remaining_exec = 0;
sandbox->writeback_overshoot_in_progress= true;
local_runqueue_delete(sandbox); // TODO: This needs to go in preemp_sandbox state change!
return;
} else {
/* Kill work */
// printf("No supply left in worker #%d. So, kill sandbox=%lu of %s\n", worker_thread_idx, sandbox->id, sandbox->tenant->name);
assert(sandbox->response_code == 0);
sandbox->response_code = 4093;
interrupted_sandbox_exit();
return;
}
}
#else
if (sandbox->remaining_exec > sandbox->last_running_state_duration) {
sandbox->remaining_exec -= sandbox->last_running_state_duration;
} else {
sandbox->remaining_exec = 0;
}
sandbox->last_running_state_duration = 0;
#endif
}

@ -43,14 +43,25 @@ check_bounds(uint32_t offset, uint32_t bounds_check)
EXPORT int32_t
sledge_abi__wasm_memory_expand(struct sledge_abi__wasm_memory *wasm_memory, uint32_t page_count)
{
uint64_t oldsize = wasm_memory->size;
struct sandbox *sandbox = current_sandbox_get();
assert(sandbox->sandbox_meta);
if (!(sandbox->id == wasm_memory->id)) fprintf(stderr, "sand_id: %lu, wasm_id: %lu, terminated: %u, orig_idx: %d, owner_wrk; %d, wrk: %d\n", sandbox->id, wasm_memory->id, sandbox->sandbox_meta->terminated, sandbox->original_owner_worker_idx, sandbox->owned_worker_idx, worker_thread_idx);
assert(sandbox->id == wasm_memory->id);
// if (!(sandbox->memory->abi.size == wasm_memory->size)) printf("sand_memsize: %lu, wasm_size: %lu, wasm_oldsize: %lu, targetsize: %lu\n", sandbox->memory->abi.size, wasm_memory->size, oldsize, page_count * WASM_PAGE_SIZE);
// assert(sandbox->memory->abi.size == wasm_memory->size);
if (sandbox->state != SANDBOX_RUNNING_USER) panic ("state!!! %u\n", sandbox->state);
sandbox_syscall(sandbox);
int32_t old_page_count = wasm_memory->size / WASM_PAGE_SIZE;
int rc = wasm_memory_expand((struct wasm_memory *)wasm_memory, page_count * WASM_PAGE_SIZE);
if (unlikely(rc == -1)) {
old_page_count = -1;
assert(0);
goto DONE;
}
@ -58,6 +69,13 @@ sledge_abi__wasm_memory_expand(struct sledge_abi__wasm_memory *wasm_memory, uint
* the original struct as well */
current_sandbox_memory_writeback();
// if (!(sandbox->memory->abi.size == wasm_memory->size)) printf("sand_memsize: %lu, wasm_size: %lu, wasm_oldsize: %lu, targetsize: %lu\n", sandbox->memory->abi.size, wasm_memory->size, oldsize, page_count * WASM_PAGE_SIZE);
// assert(sandbox->memory->abi.size == wasm_memory->size);
// sandbox->sizes[sandbox->sizesize] = wasm_memory->size + page_count * WASM_PAGE_SIZE;
// sandbox->sizesize++;
#ifdef LOG_SANDBOX_MEMORY_PROFILE
// Cache the runtime of the first N page allocations
for (int i = 0; i < page_count; i++) {

@ -179,6 +179,7 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
atomic_fetch_sub(&handler_depth, 1);
current_sandbox_trap(WASM_TRAP_OUT_OF_BOUNDS_LINEAR_MEMORY);
} else {
assert(0);
panic("Runtime SIGSEGV\n");
}

@ -3,6 +3,8 @@
#include "panic.h"
#include "runtime.h"
#include "tenant.h"
#include "panic.h"
#include "tenant_functions.h"
/*******************
* Tenant Database *
@ -35,7 +37,6 @@ err_no_space:
goto done;
}
/**
* Given a name, find the associated tenant
* @param name
@ -102,3 +103,138 @@ tenant_database_foreach(void (*cb)(struct tenant *, void *), void *arg)
cb(tenant_database[i], arg);
}
}
#ifdef TRAFFIC_CONTROL
void
tenant_database_init_reservations()
{
printf("Runtime Max Deadline: %lu\n", runtime_max_deadline);
for (size_t i = 0; i < tenant_database_count; i++) {
assert(tenant_database[i]);
struct tenant *t = tenant_database[i];
if (t->tcp_server.port == 55555) continue;
t->trs.max_budget_guaranteed = REPLENISHMENT_PERIOD * runtime_worker_threads_count * t->reservation_percentile / 100;
t->trs.budget_guaranteed = t->trs.max_budget_guaranteed;
t->trs.budget_best = UINT64_MAX;
printf("Tenant %s, max_deadline: %lu, max_budget: %ld\n", t->name, t->max_relative_deadline, t->trs.max_budget_guaranteed);
}
}
/**
* @brief Iterate through the tenant databse and print DBF info per tenant
*/
void
tenant_database_print_reservations()
{
for (size_t i = 0; i < tenant_database_count; i++) {
assert(tenant_database[i]);
struct tenant *t = tenant_database[i];
if (t->tcp_server.port == 55555) continue;
printf("\nTENANT: %s INFO:\n", t->name);
printf("Global_meta_size: %d, Local_meta_size: %d\n", priority_queue_length_nolock(t->global_sandbox_metas), priority_queue_length_nolock(t->local_sandbox_metas));
printf("Number of total overshoots: %u, MAX of overshoots from the same sandbox: %u \n", t->num_of_overshooted_sandboxes, t->max_overshoot_of_same_sandbox);
// printf("Best Effort demands: %lu\n", t->trs.best_effort_cycles);
// dbf_print(t->tenant_dbf, __getcycles());
tenant_print_jobs(t);
}
}
/**
* Checks is an opaque pointer is a tenant by comparing against
*/
struct tenant *
tenant_database_find_tenant_most_oversupply(struct tenant *tenant_to_exclude, uint64_t time_of_oversupply, bool weak_shed, struct sandbox_metadata **sandbox_meta_to_remove)
{
assert(sandbox_meta_to_remove != NULL);
struct tenant *tenant_to_punish = NULL;
uint64_t min_tenant_BE_budget = UINT64_MAX;
for (size_t i = 0; i < tenant_database_count; i++) {
assert(tenant_database[i]);
// if (tenant_database[i] == tenant_to_exclude) continue;
struct tenant *tenant = tenant_database[i];
assert(tenant);
if (tenant->tcp_server.port == 55555) continue;
if (priority_queue_length_nolock(tenant->global_sandbox_metas) + priority_queue_length_nolock(tenant->local_sandbox_metas) == 0) continue;
struct sandbox_metadata *sandbox_meta = NULL;
int rc = priority_queue_top_nolock(tenant->global_sandbox_metas, (void **)&sandbox_meta);
if (!sandbox_meta || sandbox_meta->absolute_deadline > time_of_oversupply) {
sandbox_meta = NULL;
rc = priority_queue_top_nolock(tenant->local_sandbox_metas, (void **)&sandbox_meta);
}
if (!sandbox_meta || sandbox_meta->absolute_deadline > time_of_oversupply) continue;
assert(rc == 0);
assert(sandbox_meta);
if (tenant->trs.budget_best < min_tenant_BE_budget) {
min_tenant_BE_budget = tenant->trs.budget_best;
tenant_to_punish = tenant;
*sandbox_meta_to_remove = sandbox_meta;
}
}
/*
if(weak_shed && tenant_to_punish == NULL) printf("Weak mode: No tenant to punish for %s\n", tenant_to_exclude->name);
if(!weak_shed && tenant_to_punish == NULL) printf("Strong mode: No tenant to punish for %s\n", tenant_to_exclude->name);
if (weak_shed && tenant_to_punish) {
assert(tenant_to_punish->reservation_percentile==20);
printf("MODE weak=%u - pending tenant %s \n", weak_shed, tenant_to_exclude->name);
printf("Start (ms): %lu\n", start_time);
printf("AbsDL (ms): %lu\n", absolute_deadline);
printf("ToS (ms): %lu\n", time_of_oversupply);
printf("RelativeDL (ms): %lu\n", absolute_deadline/runtime_quantum - start_time/runtime_quantum);
min_tenant_BE_budget = UINT64_MAX;
for (size_t i = 0; i < tenant_database_count; i++) {
assert(tenant_database[i]);
struct tenant *tenant = tenant_database[i];
assert(tenant);
if (tenant->tcp_server.port == 55555) continue;
uint64_t t_budget_BE = tenant->trs.budget_BE;
if (t_budget_BE < min_tenant_BE_budget) {
min_tenant_BE_budget = t_budget_BE;
tenant_to_punish = tenant;
}
printf("Tenant: %s, rp=%u, min_tenant_BE_budget=%lu\n", tenant->name,
tenant->reservation_percentile, min_tenant_BE_budget);
tenant_print_jobs(tenant);
printf("Tenant Globl SIZE: %d\n", priority_queue_length_nolock(tenant->global_sandbox_metas));
printf("Tenant Local SIZE: %d\n\n", priority_queue_length_nolock(tenant->local_sandbox_metas));
}
const int N_VIRT_WORKERS_DBF = USING_AGGREGATED_GLOBAL_DBF ? 1 : runtime_worker_threads_count;
for (int i = 0; i < N_VIRT_WORKERS_DBF; i++) {
printf("GL Worker #%d\n", i);
dbf_print(global_virt_worker_dbfs[i], start_time);
}
assert(0);
}*/
assert((*sandbox_meta_to_remove)==NULL || (*sandbox_meta_to_remove)->tenant == tenant_to_punish);
return tenant_to_punish;
}
void
tenant_database_replenish_all()
{
const uint64_t now = __getcycles();
for (size_t i = 0; i < tenant_database_count; i++) {
assert(tenant_database[i]);
struct tenant *t = tenant_database[i];
if (t->tcp_server.port == 55555) continue;
tenant_replenish(t, now);
}
}
#endif

@ -0,0 +1,254 @@
#include <unistd.h>
#include "traffic_control.h"
#include "debuglog.h"
#include "global_request_scheduler_mtdbf.h"
#include "tenant_functions.h"
#include "sandbox_set_as_error.h"
#include "dbf.h"
#ifdef TRAFFIC_CONTROL
// void *global_dbf;
void **global_virt_worker_dbfs;
void *global_worker_dbf; // temp ///////////
extern struct priority_queue *global_request_scheduler_mtdbf;//, *global_default;
extern lock_t global_lock;
void
traffic_control_initialize()
{
assert(runtime_max_deadline > 0);
const int N_VIRT_WORKERS_DBF = USING_AGGREGATED_GLOBAL_DBF ? 1 : runtime_worker_threads_count;
global_virt_worker_dbfs = malloc(N_VIRT_WORKERS_DBF * sizeof(void*));
for (int i = 0; i < N_VIRT_WORKERS_DBF; i++) {
global_virt_worker_dbfs[i] = dbf_initialize(runtime_worker_threads_count/N_VIRT_WORKERS_DBF, 100, -1, NULL);
}
}
void
traffic_control_log_decision(const int num, const bool admitted)
{
#ifdef LOG_TRAFFIC_CONTROL
debuglog("Admission case #: %d, Admitted? %s\n", num, admitted ? "yes" : "no");
#endif /* LOG_TRAFFIC_CONTROL */
}
int ind = 0;
int
global_virt_worker_dbfs_try_update_demand(uint64_t start_time, uint64_t adjustment, uint64_t *time_oversupply_p, struct sandbox_metadata *sm)
{
bool global_can_admit = false;
uint64_t time_oversupply = 0;
const uint64_t absolute_deadline = sm->absolute_deadline;
const int N_VIRT_WORKERS_DBF = USING_AGGREGATED_GLOBAL_DBF ? 1 : runtime_worker_threads_count;
/* Hack the start time to make sure demand less than the quantum is also served */
if((absolute_deadline - start_time) * N_VIRT_WORKERS_DBF < runtime_quantum) start_time = absolute_deadline - runtime_quantum;
for (int i = ind; i < (N_VIRT_WORKERS_DBF) + ind; i++) {
assert(global_virt_worker_dbfs);
void *global_dbf = global_virt_worker_dbfs[i%N_VIRT_WORKERS_DBF];
global_can_admit = dbf_list_try_add_new_demand(global_dbf, start_time, absolute_deadline, adjustment, sm);
if (global_can_admit) {
ind = (i+1)%N_VIRT_WORKERS_DBF;
return i%N_VIRT_WORKERS_DBF;
}
if (time_oversupply < dbf_get_time_of_oversupply(global_dbf)) time_oversupply = dbf_get_time_of_oversupply(global_dbf);
}
*time_oversupply_p = time_oversupply;
return -1;
}
uint64_t
traffic_control_decide(struct sandbox_metadata *sandbox_meta, const uint64_t start_time, const uint64_t estimated_execution, int *ret_code, int *worker_id_virtual)
{
/* Nominal non-zero value in case traffic control is disabled */
uint64_t work_admitted = estimated_execution;
int rc = 0;
int worker_id_v = -1;
assert(sandbox_meta);
struct tenant *tenant = sandbox_meta->tenant;
const uint64_t absolute_deadline = sandbox_meta->absolute_deadline;
uint64_t time_global_oversupply = 0;
worker_id_v = global_virt_worker_dbfs_try_update_demand(start_time, estimated_execution, &time_global_oversupply, sandbox_meta);
bool global_can_admit = worker_id_v >= 0;
// bool tenant_can_admit = tenant_try_add_job(tenant, start_time, estimated_execution, TRS_CHECK_GUARANTEED, sandbox_meta);
bool tenant_can_admit = tenant_can_admit_guaranteed(tenant, start_time, estimated_execution);
if (tenant_can_admit && global_can_admit) {
/* Case #1: Both the tenant and overall system is under utlized. So, just admit. */
tenant_can_admit = tenant_try_add_job_as_guaranteed(tenant, start_time, estimated_execution, sandbox_meta);
assert(tenant_can_admit);
traffic_control_log_decision(1, true);
rc = 1;
} else if (!tenant_can_admit && global_can_admit) {
/* Case #2: Tenant is over utilized, but system is under utilized. So, admit for work-conservation. */
if (USING_WORK_CONSERVATION == false) {
traffic_control_log_decision(2, false);
dbf_try_update_demand(global_virt_worker_dbfs[worker_id_v], start_time,
0, absolute_deadline, estimated_execution,
DBF_DELETE_EXISTING_DEMAND, NULL, sandbox_meta);
goto any_work_not_admitted;
}
traffic_control_log_decision(2, true);
rc = 2;
} else if (tenant_can_admit && !global_can_admit) {
/* Case #3: Tenant is under utilized, but system is over utilized. So, shed work and then admit. */
assert(time_global_oversupply >= absolute_deadline);
int worker_id_virt_just_shed;
while (!global_can_admit) {
assert(worker_id_v < 0);
worker_id_virt_just_shed = -1;
uint64_t cleared_demand = traffic_control_shed_work(tenant, time_global_oversupply, &worker_id_virt_just_shed, false);
if (cleared_demand == 0) {
/* No "bad" tenant requests left in the global queue, so we have deny the guaranteed tenant job. */
traffic_control_log_decision(3, false);
goto guaranteed_work_not_admitted;
}
assert(worker_id_virt_just_shed >= 0);
void *global_dbf = global_virt_worker_dbfs[worker_id_virt_just_shed];
global_can_admit = dbf_list_try_add_new_demand(global_dbf, start_time, absolute_deadline, estimated_execution, sandbox_meta);
time_global_oversupply = dbf_get_time_of_oversupply(global_dbf);
}
worker_id_v = worker_id_virt_just_shed;
tenant_can_admit = tenant_try_add_job_as_guaranteed(tenant, start_time, estimated_execution, sandbox_meta);
assert(tenant_can_admit);
traffic_control_log_decision(3, true);
rc = 1;
} else if (!tenant_can_admit && !global_can_admit) {
/* Case #4: Do NOT admit. */
// printf("Case #4: Do NOT admit.\n");
// traffic_control_log_decision(4, false);
// goto any_work_not_admitted;
// assert(time_global_oversupply >= absolute_deadline);
int worker_id_virt_just_shed;
while (!global_can_admit) {
assert(worker_id_v < 0);
worker_id_virt_just_shed = -1;
uint64_t cleared_demand = traffic_control_shed_work(tenant, time_global_oversupply, &worker_id_virt_just_shed, true);
if (cleared_demand == 0) {
/* No "bad" tenant requests left in the global queue, so we have deny this new job. */
traffic_control_log_decision(4, false);
goto any_work_not_admitted;
}
assert(worker_id_virt_just_shed >= 0);
void *global_dbf = global_virt_worker_dbfs[worker_id_virt_just_shed];
global_can_admit = dbf_list_try_add_new_demand(global_dbf, start_time, absolute_deadline, estimated_execution, sandbox_meta);
time_global_oversupply = dbf_get_time_of_oversupply(global_dbf);
}
// printf("Case #4: Do admit %s.\n", tenant->name);
assert (global_can_admit);
worker_id_v = worker_id_virt_just_shed;
rc = 2;
}
done:
*ret_code = rc;
*worker_id_virtual = worker_id_v;
return work_admitted;
any_work_not_admitted:
work_admitted = 0;
rc = sandbox_meta->exceeded_estimation ? 4295 : 4290;
goto done;
guaranteed_work_not_admitted:
work_admitted = 0;
rc = sandbox_meta->exceeded_estimation ? 4296: 4291;
goto done;
}
uint64_t traffic_control_shed_work(struct tenant *tenant_to_exclude, uint64_t time_of_oversupply, int *worker_id_virt_just_shed, bool weak_shed)
{
uint64_t cleared_demand = 0;
*worker_id_virt_just_shed = -1;
struct sandbox_metadata *sandbox_meta = NULL;
struct tenant *tenant_to_punish = tenant_database_find_tenant_most_oversupply(tenant_to_exclude, time_of_oversupply, weak_shed, &sandbox_meta);
if (tenant_to_punish == NULL) {
// printf("null\n");
assert (sandbox_meta == NULL);
goto done;
}
if (tenant_to_punish == tenant_to_exclude) {
// printf("itself\n");
// TODO: Should be able to kill from itself???
goto done;
}
assert(sandbox_meta);
assert(sandbox_meta->tenant == tenant_to_punish);
assert(sandbox_meta->absolute_deadline <= time_of_oversupply);
assert(sandbox_meta->terminated == false);
assert(sandbox_meta->error_code == 0);
if (sandbox_meta->state == SANDBOX_INITIALIZED) {
assert(sandbox_meta->tenant_queue == tenant_to_punish->global_sandbox_metas);
sandbox_meta->error_code = 4090;
assert(sandbox_meta->owned_worker_idx == -2);
} else {
assert(sandbox_meta->tenant_queue == tenant_to_punish->local_sandbox_metas);
sandbox_meta->error_code = 4091;
struct message new_message = { 0 };
if (sandbox_meta->owned_worker_idx >= 0 && sandbox_refs[sandbox_meta->id % RUNTIME_MAX_ALIVE_SANDBOXES]) {
assert(comm_to_workers);
struct comm_with_worker *ctw = &comm_to_workers[sandbox_meta->owned_worker_idx];
assert(ctw);
assert(ctw->worker_idx == sandbox_meta->owned_worker_idx);
assert(ck_ring_size(&ctw->worker_ring) < LISTENER_THREAD_RING_SIZE);
new_message.sandbox_meta = sandbox_meta;
new_message.sandbox = sandbox_meta->sandbox_shadow;
new_message.sandbox_id = sandbox_meta->id;
new_message.message_type = MESSAGE_CTW_SHED_CURRENT_JOB;
if (!ck_ring_enqueue_spsc_message(&ctw->worker_ring, ctw->worker_ring_buffer, &new_message)) {
panic("Ring buffer was full and the enqueue failed!")
}
pthread_kill(runtime_worker_threads[sandbox_meta->owned_worker_idx], SIGALRM);
}
}
struct sandbox_metadata *sm_to_remove = NULL;
int rc = priority_queue_dequeue_nolock(sandbox_meta->tenant_queue, (void **)&sm_to_remove);
assert(rc == 0);
assert(sandbox_meta == sm_to_remove);
assert(sandbox_meta->trs_job_node == NULL);
assert(sandbox_meta->remaining_exec > 0);
assert(sandbox_meta->global_queue_type == 2);
assert(sandbox_meta->worker_id_virt>=0);
void *global_dbf = global_virt_worker_dbfs[sandbox_meta->worker_id_virt];
dbf_list_reduce_demand(sandbox_meta, sandbox_meta->remaining_exec + sandbox_meta->extra_slack, true);
sandbox_meta->demand_node = NULL;
cleared_demand = sandbox_meta->remaining_exec;
// sandbox_meta->remaining_exec = 0;
// sandbox_meta->extra_slack = 0;
*worker_id_virt_just_shed = sandbox_meta->worker_id_virt;
sandbox_meta->terminated = true;
done:
return cleared_demand;
}
#endif /* TRAFFIC_CONTROL */

@ -17,6 +17,8 @@
#include "scheduler.h"
#include "tenant_functions.h"
#include "worker_thread.h"
#include "priority_queue.h"
#include "dbf.h"
/***************************
* Worker Thread State *
@ -30,6 +32,12 @@ thread_local int worker_thread_idx;
/* Used to track tenants' timeouts */
thread_local struct priority_queue *worker_thread_timeout_queue;
/* Used to track worker's dbf */
thread_local void *worker_dbf;
thread_local struct sandbox_metadata *sandbox_meta;
/***********************
* Worker Thread Logic *
**********************/
@ -60,6 +68,22 @@ worker_thread_main(void *argument)
if (scheduler == SCHEDULER_MTDS) {
worker_thread_timeout_queue = priority_queue_initialize(RUNTIME_MAX_TENANT_COUNT, false,
tenant_timeout_get_priority);
} else if (scheduler == SCHEDULER_MTDBF) {
#ifdef TRAFFIC_CONTROL
/* Initialize worker's dbf data structure */
/* To make sure global dbf reads out the max deadline in the system */
sleep(1);
// worker_dbf = dbf_initialize(1, 100, worker_thread_idx, NULL);
// worker_dbf = dbf_grow(worker_dbf, dbf_get_max_relative_dl(global_dbf));
// worker_dbf = dbf_grow(worker_dbf, runtime_max_deadline);
// printf("WORKER ");
// dbf_print(worker_dbf);
sandbox_meta = malloc(sizeof(struct sandbox_metadata));
sandbox_meta->tenant = NULL;
// if (worker_thread_idx == 0) global_worker_dbf = worker_dbf; // TODO: temp for debugging
#endif
}
software_interrupt_unmask_signal(SIGFPE);

@ -87,7 +87,7 @@ generate_spec_json() {
done
done
if [ "$CLIENT_TERMINATE_SERVER" == true ]; then jq_admin_spec; fi
if [ "$ADMIN_ACCESS" == true ]; then jq_admin_spec; fi
# Merges all of the multiple specs for a single module
jq -s '. | sort_by(.name)' ./result_*.json > "./spec.json"

@ -0,0 +1,4 @@
SLEDGE_SCHEDULER=MTDBF
SLEDGE_DISABLE_PREEMPTION=false
SLEDGE_SANDBOX_PERF_LOG=perf.log
SLEDGE_SPINLOOP_PAUSE_ENABLED=false

@ -0,0 +1 @@
out.png

@ -0,0 +1,102 @@
SLEDGE_BINARY_DIR=../../runtime/bin
HOST?=localhost # pass arguments to change this: make client-lpd HOST=10.10.1.4
# HOST=arena0.andrew.cmu.edu
# HOST=c220g2-011017.wisc.cloudlab.us
PORT0=10000
PORT1=15000
PORT2=20000
PORT3=25000
PORT4=30000
PORT5=35000
PORT6=40000
HEY_OPTS=-disable-compression -disable-keepalive -disable-redirects
default: run
clean:
rm -rf res/*
run:
SLEDGE_SIGALRM_HANDLER=TRIAGED SLEDGE_SCHEDULER=MTDBF SLEDGE_SPINLOOP_PAUSE_ENABLED=true SLEDGE_HTTP_SESSION_PERF_LOG=http_perf.log SLEDGE_SANDBOX_PERF_LOG=perf.log LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} ${SLEDGE_BINARY_DIR}/sledgert spec.json
debug:
SLEDGE_SCHEDULER=MTDBF SLEDGE_SPINLOOP_PAUSE_ENABLED=false SLEDGE_NWORKERS=18 LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} gdb ${SLEDGE_BINARY_DIR}/sledgert \
--eval-command="handle SIGUSR1 noprint nostop" \
--eval-command="handle SIGPIPE noprint nostop" \
--eval-command="set pagination off" \
--eval-command="set print pretty" \
--eval-command="run spec.json"
valgrind:
SLEDGE_DISABLE_PREEMPTION=true SLEDGE_NWORKERS=1 LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} valgrind --leak-check=full --max-stackframe=11150456 --run-libc-freeres=no --run-cxx-freeres=no ${SLEDGE_BINARY_DIR}/sledgert spec.json
client-cnn:
curl -H 'Expect: ' -H "Content-Type: image/jpeg" --data-binary "@input-cnn/faces01.jpg" "${HOST}:${PORT0}/cnn"
client-cifar10:
curl -H 'Expect: ' -H "Content-Type: image/bmp" --data-binary "@input-cifar10/airplane1.bmp" "${HOST}:${PORT1}/cifar10"
client-gocr:
curl -H 'Expect: ' -H "Content-Type: application/octet-stream" --data-binary "@input-gocr/5x8.pnm" "${HOST}:${PORT2}/gocr"
client-lpd:
# curl -H 'Expect: ' -H "Content-Type: image/png" --data-binary "@input-lpd-png/Cars0.png" "${HOST}:${PORT3}/lpd"
curl -H 'Expect: ' -H "Content-Type: image/jpeg" --data-binary "@input-lpd-jpg/Cars0.jpg" "${HOST}:${PORT3}/lpd"
client-resize:
curl -H 'Expect: ' -H "Content-Type: image/jpeg" --data-binary "@input-resize/picsum_512x512_01.jpg" "${HOST}:${PORT4}/resize" --output "out-resize.jpg"
client-ekf:
curl -H 'Expect: ' -H "Content-Type: application/octet-stream" --data-binary "@input-ekf/iter00.dat" "${HOST}:${PORT5}/ekf" --output "out-ekf-iter00.dat"
client-fib-curl:
curl -i -H 'Expect: ' -H "Content-Type: text/plain" "${HOST}:${PORT6}/fib?30"
########################################## Choose a random file to send with curl: ##########################################
client-cnn-random:
@dir="input-cnn"; random_file="$$(ls $$dir | shuf -n 1)"; echo "Random file: $$random_file"; \
curl -s -H 'Expect: ' -H "Content-Type: image/jpeg" --data-binary "@$$dir/$$random_file" "${HOST}:${PORT0}/cnn"
client-cifar10-random:
@dir="input-cifar10"; random_file="$$(ls $$dir | shuf -n 1)"; echo "Random file: $$random_file"; \
curl -s -H 'Expect: ' -H "Content-Type: image/bmp" --data-binary "@$$dir/$$random_file" "${HOST}:${PORT1}/cifar10"
client-gocr-random:
@dir="input-gocr"; random_file="$$(ls $$dir | shuf -n 1)"; echo "Random file: $$random_file"; \
curl -s -H 'Expect: ' -H "Content-Type: application/octet-stream" --data-binary "@$$dir/$$random_file" "${HOST}:${PORT2}/gocr"
client-lpd-random:
# @dir="input-lpd-png"; random_file="$$(ls $$dir | shuf -n 1)"; echo "Random file: $$random_file"; \
# curl -s -H 'Expect: ' -H "Content-Type: image/png" --data-binary "@$$dir/$$random_file" "${HOST}:${PORT3}/lpd"
@dir="input-lpd-jpg"; random_file="$$(ls $$dir | shuf -n 1)"; echo "Random file: $$random_file"; \
curl -s -H 'Expect: ' -H "Content-Type: image/jpeg" --data-binary "@$$dir/$$random_file" "${HOST}:${PORT3}/lpd"
client-resize-random:
@dir="input-resize"; random_file="$$(ls $$dir | shuf -n 1)"; echo "Random file: $$random_file"; \
curl -s -H 'Expect: ' -H "Content-Type: image/jpeg" --data-binary "@$$dir/$$random_file" "${HOST}:${PORT4}/resize" --output "out-resize-$$random_file"
client-ekf-random:
@dir="input-ekf"; random_file="$$(ls $$dir | shuf -n 1)"; echo "Random file: $$random_file"; \
curl -s -H 'Expect: ' -H "Content-Type: application/octet-stream" --data-binary "@$$dir/$$random_file" "${HOST}:${PORT5}/ekf" --output "out-ekf-$$random_file"
#############################################################################################################################
client-fib-once:
echo 30 | http ${HOST}:${PORT6}/fib
# http ${HOST}:${PORT6}/fib?30
client-fib-loadtest:
loadtest -n 10 -c 10 -P 30 "http://${HOST}:${PORT6}/fib"
client-fib-hey:
hey ${HEY_OPTS} -z 10s -c 72 -t 0 -o csv -m POST -d "30\n" "http://${HOST}:${PORT6}/fib"
client-fib-wrk:
wrk -t 1 -c 1 -d 5s -R 1 "http://${HOST}:${PORT6}/fib?30"
client-admin:
echo 5 | http ${HOST}:55555/admin
client-terminator:
echo 5 | http ${HOST}:55555/terminator

@ -0,0 +1,13 @@
#!/bin/bash
VARYING=(72 108 144)
for var in "${VARYING[@]}"; do
mkdir -p "$var"dpi
mkdir -p "$var"dpi-orig
for ((i=5; i<=15; i++)); do
shuf -n10 /usr/share/dict/american-english > "$var"dpi-orig/"$i"words.txt
pango-view --dpi="$var" --font=mono -qo "$var"dpi-orig/"$var"dpi_"$i"words.png "$var"dpi-orig/"$i"words.txt
pngtopnm "$var"dpi-orig/"$var"dpi_"$i"words.png > "$var"dpi/"$var"dpi_"$i"words.pnm
done
done

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save