docs: Use style guide format for comments

main
Sean McBride 4 years ago
parent cf42133127
commit 29201f737d

@ -4,12 +4,12 @@
#include <unistd.h>
#include <ucontext.h>
#define ARCH_NREGS (2) // SP + PC only.
#define ARCH_SIG_JMP_OFF 0x100 // Based on code generated!
#define ARCH_NREGS (2) /* SP + PC only */
#define ARCH_SIG_JMP_OFF 0x100 /* Based on code generated! */
/**
* ARM64 code. Currently Unimplemented
**/
*/
typedef uint64_t reg_t;
@ -23,14 +23,14 @@ typedef struct arch_context arch_context_t;
extern void __attribute__((noreturn)) worker_thread_sandbox_switch_preempt(void);
extern __thread arch_context_t worker_thread_base_context;
// Initialized a context, zeroing out registers and setting the Instruction and Stack pointers
/* Initialized a context, zeroing out registers and setting the Instruction and Stack pointers */
static inline void
arch_context_init(arch_context_t *actx, reg_t ip, reg_t sp)
{
memset(&actx->mctx, 0, sizeof(mcontext_t));
memset((void *)actx->regs, 0, sizeof(reg_t) * ARCH_NREGS);
*(actx->regs) = sp;
*(actx->regs) = sp;
*(actx->regs + 1) = ip;
}
@ -39,8 +39,8 @@ arch_mcontext_restore(mcontext_t *mc, arch_context_t *ctx)
{
assert(ctx != &worker_thread_base_context);
// if ctx->regs[0] is set, this was last in a user-level context switch state!
// else restore mcontext..
/* if ctx->regs[0] is set, this was last in a user-level context switch state!
else restore mcontext.. */
if (ctx->regs[0]) {
mc->sp = ctx->regs[0];
mc->pc = ctx->regs[1] + ARCH_SIG_JMP_OFF;
@ -70,45 +70,44 @@ arch_context_switch(arch_context_t *ca, arch_context_t *na)
{
if (!ca) {
assert(na);
// switching from "no sandbox to execute" state to "executing a sandbox"
/* switching from "no sandbox to execute" state to "executing a sandbox" */
ca = &worker_thread_base_context;
} else if (!na) {
assert(ca);
// switching from "executing a sandbox" to "no execution" state.
/* switching from "executing a sandbox" to "no execution" state. */
na = &worker_thread_base_context;
} else {
assert(na && ca);
// switching between sandboxes.
/* switching between sandboxes. */
}
reg_t *cr = ca->regs, *nr = na->regs;
assert(cr && nr);
asm volatile ( "mov x0, sp\n\t"
"adr x1, reset%=\n\t"
"str x1, [%[curr], 8]\n\t"
"str x0, [%[curr]]\n\t"
"ldr x2, [%[next]]\n\t"
"cbz x2, slow%=\n\t"
"ldr x3, [%[next], 8]\n\t"
"mov sp, x2\n\t"
"br x3\n\t"
"slow%=:\n\t"
"br %[slowpath]\n\t"
".align 8\n\t"
"reset%=:\n\t"
"mov x1, #0\n\t"
"str x1, [%[next]]\n\t"
".align 8\n\t"
"exit%=:\n\t"
:
: [curr]"r"(cr), [next]"r"(nr), [slowpath]"r"(&worker_thread_sandbox_switch_preempt)
: "memory", "cc", "x0", "x1", "x2", "x3",
"x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
"x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15" );
asm volatile("mov x0, sp\n\t"
"adr x1, reset%=\n\t"
"str x1, [%[curr], 8]\n\t"
"str x0, [%[curr]]\n\t"
"ldr x2, [%[next]]\n\t"
"cbz x2, slow%=\n\t"
"ldr x3, [%[next], 8]\n\t"
"mov sp, x2\n\t"
"br x3\n\t"
"slow%=:\n\t"
"br %[slowpath]\n\t"
".align 8\n\t"
"reset%=:\n\t"
"mov x1, #0\n\t"
"str x1, [%[next]]\n\t"
".align 8\n\t"
"exit%=:\n\t"
:
: [ curr ] "r"(cr), [ next ] "r"(nr), [ slowpath ] "r"(&worker_thread_sandbox_switch_preempt)
: "memory", "cc", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12",
"x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15");
return 0;
}

@ -79,15 +79,16 @@ static void __attribute__((noinline)) arch_context_init(arch_context_t *actx, re
* Preempt the current sandbox and start executing the next sandbox
* @param mc - the context of the current thread of execution
* @param ctx - the context that we want to restore
* @return {0,1} 0 = context restored successfully. 1 = special processing because thread was last in a user-level
* context switch state
**/
* @return Return code in {0,1}
* 0 = context restored successfully.
* 1 = special processing because thread was last in a user-level context switch state
*/
static int
arch_mcontext_restore(mcontext_t *mc, arch_context_t *ctx)
{
assert(ctx != &worker_thread_base_context);
// if ctx->regs[5] is set, this was last in a user-level context switch state
/* if ctx->regs[5] is set, this was last in a user-level context switch state */
bool did_user_level_context_switch = ctx->regs[5];
if (did_user_level_context_switch) {
mc->gregs[REG_RSP] = ctx->regs[5];
@ -95,7 +96,7 @@ arch_mcontext_restore(mcontext_t *mc, arch_context_t *ctx)
ctx->regs[5] = 0;
return 1;
} else {
// Restore mcontext
/* Restore mcontext */
memcpy(mc, &ctx->mctx, sizeof(mcontext_t));
memset(&ctx->mctx, 0, sizeof(mcontext_t));
return 0;
@ -106,7 +107,7 @@ arch_mcontext_restore(mcontext_t *mc, arch_context_t *ctx)
* Save the context of the currently executing process
* @param ctx - destination
* @param mc - source
**/
*/
static void
arch_mcontext_save(arch_context_t *ctx, mcontext_t *mc)
{
@ -123,14 +124,14 @@ arch_mcontext_save(arch_context_t *ctx, mcontext_t *mc)
*
* NULL in either of these values indicates the "no sandbox to execute" state,
* which defaults to resuming execution of main
**/
*/
static inline int
arch_context_switch(arch_context_t *current, arch_context_t *next)
{
// if both current and next are NULL, there is no state change
/* if both current and next are NULL, there is no state change */
assert(current != NULL || next != NULL);
// Set any NULLs to worker_thread_base_context to resume execution of main
/* Set any NULLs to worker_thread_base_context to resume execution of main */
if (current == NULL) current = &worker_thread_base_context;
if (next == NULL) next = &worker_thread_base_context;

@ -19,7 +19,8 @@
* PPoPP implementation paper, "Correct and Efficient Work-Stealing for Weak Memory Models"
* https://www.di.ens.fr/~zappa/readings/ppopp13.pdf
*/
// TODO: dynamic resize!
/* TODO: Implement the ability to dynamically resize! */
#define DEQUE_MAX_SZ (1 << 23)
#define DEQUE_PROTOTYPE(name, type) \

@ -14,13 +14,14 @@ struct http_request {
int header_count;
char * body;
int body_length;
int body_read_length; // How far we've read
// additional for http-parser
int last_was_value; // http-parser flag used to help the http-parser callbacks differentiate between header
// fields and values to know when to allocate a new header
int header_end; // boolean flag set when header processing is complete
int message_begin; // boolean flag set when body processing begins
int message_end; // boolean flag set when body processing is complete
int body_read_length; /* How far we've read */
/* additional members for http-parser */
int last_was_value; /* http-parser flag used to help the http-parser callbacks differentiate between header
fields and values to know when to allocate a new header */
int header_end; /* boolean flag set when header processing is complete */
int message_begin; /* boolean flag set when body processing begins */
int message_end; /* boolean flag set when body processing is complete */
};
/***************************************************

@ -5,7 +5,7 @@
#include <types.h>
#include <sys/uio.h>
// Conditionally load libuv
/* Conditionally load libuv */
#ifdef USE_HTTP_UVIO
#include <uv.h>
#endif
@ -23,7 +23,7 @@ struct http_response {
char * status;
int status_length;
#ifdef USE_HTTP_UVIO
uv_buf_t bufs[HTTP_MAX_HEADER_COUNT * 2 + 3]; // max headers, one line for status code, remaining for body!
uv_buf_t bufs[HTTP_MAX_HEADER_COUNT * 2 + 3]; /* max headers, one line for status code, remaining for body! */
#else
struct iovec bufs[HTTP_MAX_HEADER_COUNT * 2 + 3];
#endif

@ -12,20 +12,21 @@
#include <http_request.h>
/**
* TODO: is there some weird edge case where a UNICODE character might be split between reads? Do we care?
* Called after libuv has read a chunk of data
* Parses data read by the libuv stream chunk-by-chunk until the message is complete
* Then stops the stream and wakes up the sandbox
* @param stream
* @param number_read bytes read
* @param buffer unused
**/
*
* TODO: is there some weird edge case where a UNICODE character might be split between reads? Do we care?
* Called after libuv has read a chunk of data
*/
static inline void
libuv_callbacks_on_read_parse_http_request(uv_stream_t *stream, ssize_t number_read, const uv_buf_t *buffer)
{
struct sandbox *sandbox = stream->data;
// Parse the chunks libuv has read on our behalf until we've parse to message end
/* Parse the chunks libuv has read on our behalf until we've parse to message end */
if (number_read > 0) {
if (sandbox_parse_http_request(sandbox, number_read) != 0) return;
sandbox->request_response_data_length += number_read;
@ -33,7 +34,7 @@ libuv_callbacks_on_read_parse_http_request(uv_stream_t *stream, ssize_t number_r
if (!rh->message_end) return;
}
// When the entire message has been read, stop the stream and wakeup the sandbox
/* When the entire message has been read, stop the stream and wakeup the sandbox */
uv_read_stop(stream);
worker_thread_wakeup_sandbox(sandbox);
}
@ -41,7 +42,7 @@ libuv_callbacks_on_read_parse_http_request(uv_stream_t *stream, ssize_t number_r
/**
* On libuv close, executes this callback to wake the blocked sandbox back up
* @param stream
**/
*/
static inline void
libuv_callbacks_on_close_wakeup_sakebox(uv_handle_t *stream)
{
@ -53,7 +54,7 @@ libuv_callbacks_on_close_wakeup_sakebox(uv_handle_t *stream)
* On libuv shutdown, executes this callback to wake the blocked sandbox back up
* @param req shutdown request
* @param status unused in callback
**/
*/
static inline void
libuv_callbacks_on_shutdown_wakeup_sakebox(uv_shutdown_t *req, int status)
{
@ -66,7 +67,7 @@ libuv_callbacks_on_shutdown_wakeup_sakebox(uv_shutdown_t *req, int status)
* In case of error, shutdown the sandbox
* @param write shutdown request
* @param status status code
**/
*/
static inline void
libuv_callbacks_on_write_wakeup_sandbox(uv_write_t *write, int status)
{

@ -8,22 +8,24 @@
struct module {
char name[MODULE_MAX_NAME_LENGTH];
char path[MODULE_MAX_PATH_LENGTH];
void * dynamic_library_handle; // Handle to the *.so of the serverless function
void * dynamic_library_handle; /* Handle to the *.so of the serverless function */
i32 argument_count;
u32 stack_size; // a specification?
u64 max_memory; // perhaps a specification of the module. (max 4GB)
u32 stack_size; /* a specification? */
u64 max_memory; /* perhaps a specification of the module. (max 4GB) */
u32 relative_deadline_us;
u32 reference_count; // ref count how many instances exist here.
u32 reference_count; /* ref count how many instances exist here. */
struct indirect_table_entry indirect_table[INDIRECT_TABLE_SIZE];
struct sockaddr_in socket_address;
int socket_descriptor;
int port;
// unfortunately, using UV for accepting connections is not great!
// on_connection, to create a new accepted connection, will have to
// init a tcp handle, which requires a uvloop. cannot use main as
// rest of the connection is handled in sandboxing threads, with per-core(per-thread) tls data-structures.
// so, using direct epoll for accepting connections.
/* unfortunately, using UV for accepting connections is not great!
on_connection, to create a new accepted connection, will have to
init a tcp handle, which requires a uvloop. cannot use main as
rest of the connection is handled in sandboxing threads, with per-core(per-thread) tls data-structures.
so, using direct epoll for accepting connections. */
// TODO: Should this be removed?
// uv_handle_t srvuv;
unsigned long max_request_size;
@ -31,33 +33,33 @@ struct module {
int request_header_count;
char request_content_type[HTTP_MAX_HEADER_VALUE_LENGTH];
// resp size including headers!
/* resp size including headers! */
unsigned long max_response_size;
int response_header_count;
char response_content_type[HTTP_MAX_HEADER_VALUE_LENGTH];
char response_headers[HTTP_MAX_HEADER_COUNT][HTTP_MAX_HEADER_LENGTH];
// Equals the largest of either max_request_size or max_response_size
/* Equals the largest of either max_request_size or max_response_size */
unsigned long max_request_or_response_size;
// Functions to initialize aspects of sandbox
/* Functions to initialize aspects of sandbox */
mod_glb_fn_t initialize_globals;
mod_mem_fn_t initialize_memory;
mod_tbl_fn_t initialize_tables;
mod_libc_fn_t initialize_libc;
// Entry Function to invoke serverless function
/* Entry Function to invoke serverless function */
mod_main_fn_t main;
};
/***************************************
* Public Static Inlines
***************************************/
/*************************
* Public Static Inlines *
************************/
/**
* Increment a modules reference count
* @param module
**/
*/
static inline void
module_acquire(struct module *module)
{
@ -68,7 +70,7 @@ module_acquire(struct module *module)
* Get a module's argument count
* @param module
* @returns the number of arguments
**/
*/
static inline i32
module_get_argument_count(struct module *module)
{
@ -78,42 +80,42 @@ module_get_argument_count(struct module *module)
/**
* Invoke a module's initialize_globals
* @param module
**/
*/
static inline void
module_initialize_globals(struct module *module)
{
// called in a sandbox.
/* called in a sandbox. */
module->initialize_globals();
}
/**
* Invoke a module's initialize_tables
* @param module
**/
*/
static inline void
module_initialize_table(struct module *module)
{
// called at module creation time (once only per module).
/* called at module creation time (once only per module). */
module->initialize_tables();
}
/**
* Invoke a module's initialize_libc
* @param module
* @param env
* @param arguments
**/
* @param module - module whose libc we are initializing
* @param env - address?
* @param arguments - address?
*/
static inline void
module_initialize_libc(struct module *module, i32 env, i32 arguments)
{
// called in a sandbox.
/* called in a sandbox. */
module->initialize_libc(env, arguments);
}
/**
* Invoke a module's initialize_memory
* @param module
**/
* @param module - the module whose memory we are initializing
*/
static inline void
module_initialize_memory(struct module *module)
{
@ -123,9 +125,9 @@ module_initialize_memory(struct module *module)
/**
* Validate module, defined as having a non-NULL dynamical library handle and entry function pointer
* @param module
* @return 1 if valid. 0 if invalid
**/
* @param module - module to validate
* @return true if valid. false if invalid
*/
static inline bool
module_is_valid(struct module *module)
{
@ -137,7 +139,8 @@ module_is_valid(struct module *module)
* @param module
* @param argc standard UNIX count of arguments
* @param argv standard UNIX vector of arguments
**/
* @return return code of module's main function
*/
static inline i32
module_main(struct module *module, i32 argc, i32 argv)
{
@ -147,7 +150,7 @@ module_main(struct module *module, i32 argc, i32 argv)
/**
* Decrement a modules reference count
* @param module
**/
*/
static inline void
module_release(struct module *module)
{
@ -163,7 +166,7 @@ module_release(struct module *module)
* @param response_count
* @param response_headers
* @param response_content_type
**/
*/
static inline void
module_set_http_info(struct module *module, int request_count, char *request_headers, char request_content_type[],
int response_count, char *response_headers, char response_content_type[])
@ -177,9 +180,9 @@ module_set_http_info(struct module *module, int request_count, char *request_hea
strcpy(module->response_content_type, response_content_type);
}
/***************************************
* Public Methods from module.c
***************************************/
/********************************
* Public Methods from module.c *
*******************************/
void module_free(struct module *module);
struct module *module_new(char *mod_name, char *mod_path, i32 argument_count, u32 stack_sz, u32 max_heap,

@ -11,16 +11,15 @@ extern int module_database_free_offset;
/**
* Adds a module to the in-memory module DB
* Note: This was static inline, which I've unwound. I am unclear of the perf implications of this
* @param module module to add
* @return 0 on success. Aborts program on failure
**/
*/
static inline int
module_database_add(struct module *module)
{
assert(module->socket_descriptor == -1);
// __sync_fetch_and_add is provided by GCC
/* __sync_fetch_and_add is provided by GCC */
int f = __sync_fetch_and_add(&module_database_free_offset, 1);
assert(f < MODULE_MAX_MODULE_COUNT);
module_database[f] = module;

@ -12,10 +12,10 @@
* priority element that can be used to maintain a read replica
* @param element
* @returns priority (a u64)
**/
*/
typedef uint64_t (*priority_queue_get_priority_t)(void *element);
// We assume that priority is expressed in terms of a 64 bit unsigned integral
/* We assume that priority is expressed in terms of a 64 bit unsigned integral */
struct priority_queue {
ck_spinlock_fas_t lock;
uint64_t highest_priority;

@ -1,7 +1,7 @@
#ifndef SFRT_RUNTIME_H
#define SFRT_RUNTIME_H
#include <sys/epoll.h> // for epoll_create1(), epoll_ctl(), struct epoll_event
#include <sys/epoll.h> /* for epoll_create1(), epoll_ctl(), struct epoll_event */
#include "types.h"
extern int runtime_epoll_file_descriptor;

@ -14,9 +14,9 @@
#include "ps_list.h"
#include "software_interrupt.h"
/**************************
* Structs and Types *
**************************/
/*********************
* Structs and Types *
********************/
struct sandbox_io_handle {
int file_descriptor;
@ -34,29 +34,29 @@ typedef enum
struct sandbox {
sandbox_state_t state;
u32 sandbox_size; // The struct plus enough buffer to hold the request or response (sized off largest)
u32 sandbox_size; /* The struct plus enough buffer to hold the request or response (sized off largest) */
void *linear_memory_start; // after sandbox struct
u32 linear_memory_size; // from after sandbox struct
void *linear_memory_start; /* after sandbox struct */
u32 linear_memory_size; /* from after sandbox struct */
u64 linear_memory_max_size;
void *stack_start; // guess we need a mechanism for stack allocation.
u32 stack_size; // and to set the size of it.
void *stack_start;
u32 stack_size;
arch_context_t ctxt; // register context for context switch.
arch_context_t ctxt; /* register context for context switch. */
u64 total_time;
u64 start_time;
u64 absolute_deadline;
struct module *module; // the module this is an instance of
struct module *module; /* the module this is an instance of */
i32 arguments_offset; // actual placement of arguments in the sandbox.
void *arguments; // arguments from request, must be of module->argument_count size.
i32 arguments_offset; /* actual placement of arguments in the sandbox. */
void *arguments; /* arguments from request, must be of module->argument_count size. */
i32 return_value;
struct sandbox_io_handle io_handles[SANDBOX_MAX_IO_HANDLE_COUNT];
struct sockaddr client_address; // client requesting connection!
struct sockaddr client_address; /* client requesting connection! */
int client_socket_descriptor;
uv_tcp_t client_libuv_stream;
uv_shutdown_t client_libuv_shutdown_request;
@ -68,12 +68,12 @@ struct sandbox {
char * read_buffer;
ssize_t read_length, read_size;
// Used for the scheduling runqueue as an in-place linked list data structure.
// The variable name "list" is used for ps_list's default name-based MACROS.
/* Used for the scheduling runqueue as an in-place linked list data structure. */
/* The variable name "list" is used for ps_list's default name-based MACROS. */
struct ps_list list;
ssize_t request_response_data_length; // <= max(module->max_request_or_response_size)
char request_response_data[1]; // of request_response_data_length, following sandbox mem..
ssize_t request_response_data_length; /* Should be <= module->max_request_or_response_size */
char request_response_data[1]; /* of request_response_data_length, following sandbox mem.. */
} PAGE_ALIGNED;
typedef struct sandbox sandbox_t;
@ -129,7 +129,7 @@ sandbox_get_arguments(struct sandbox *sandbox)
* Initializes and returns an IO handle on the current sandbox ready for use
* @param sandbox
* @return index of handle we preopened or -1 on error (sandbox is null or all io_handles are exhausted)
**/
*/
static inline int
sandbox_initialize_io_handle(struct sandbox *sandbox)
{
@ -150,16 +150,17 @@ sandbox_initialize_io_handle(struct sandbox *sandbox)
* @param sandbox
* @param file_descriptor what we'll set on the IO handle after initialization
* @return index of handle we preopened or -1 if all io_handles are exhausted
**/
*/
static inline int
sandbox_initialize_io_handle_and_set_file_descriptor(struct sandbox *sandbox, int file_descriptor)
{
if (!sandbox) return -1;
if (file_descriptor < 0) return file_descriptor;
int io_handle_index = sandbox_initialize_io_handle(sandbox);
if (io_handle_index != -1)
if (io_handle_index != -1) {
sandbox->io_handles[io_handle_index].file_descriptor =
file_descriptor; // well, per sandbox.. so synchronization necessary!
file_descriptor; /* per sandbox, so synchronization necessary! */
}
return io_handle_index;
}
@ -170,7 +171,7 @@ sandbox_initialize_io_handle_and_set_file_descriptor(struct sandbox *sandbox, in
* @param io_handle_index index of the sandbox io_handles we want to set
* @param file_descriptor the file descripter we want to set it to
* @returns the index that was set or -1 in case of error
**/
*/
static inline int
sandbox_set_file_descriptor(struct sandbox *sandbox, int io_handle_index, int file_descriptor)
{
@ -188,7 +189,7 @@ sandbox_set_file_descriptor(struct sandbox *sandbox, int io_handle_index, int fi
* @param sandbox
* @param io_handle_index index into the sandbox's io_handles table
* @returns file descriptor or -1 in case of error
**/
*/
static inline int
sandbox_get_file_descriptor(struct sandbox *sandbox, int io_handle_index)
{
@ -201,12 +202,12 @@ sandbox_get_file_descriptor(struct sandbox *sandbox, int io_handle_index)
* Close the sandbox's ith io_handle
* @param sandbox
* @param io_handle_index index of the handle to close
**/
*/
static inline void
sandbox_close_file_descriptor(struct sandbox *sandbox, int io_handle_index)
{
if (io_handle_index >= SANDBOX_MAX_IO_HANDLE_COUNT || io_handle_index < 0) return;
// TODO: Do we actually need to call some sort of close function here?
/* TODO: Do we actually need to call some sort of close function here? */
sandbox->io_handles[io_handle_index].file_descriptor = -1;
}
@ -215,7 +216,7 @@ sandbox_close_file_descriptor(struct sandbox *sandbox, int io_handle_index)
* @param sandbox
* @param io_handle_index index of the handle containing libuv_handle???
* @returns any libuv handle or a NULL pointer in case of error
**/
*/
static inline union uv_any_handle *
sandbox_get_libuv_handle(struct sandbox *sandbox, int io_handle_index)
{

@ -15,8 +15,8 @@ struct sandbox_request {
char * arguments;
int socket_descriptor;
struct sockaddr *socket_address;
u64 start_time; // cycles
u64 absolute_deadline; // cycles
u64 start_time; /* cycles */
u64 absolute_deadline; /* cycles */
};
typedef struct sandbox_request sandbox_request_t;
@ -31,7 +31,7 @@ DEQUE_PROTOTYPE(sandbox, sandbox_request_t *);
* @param socket_address
* @param start_time the timestamp of when we receives the request from the network (in cycles)
* @return the new sandbox request
**/
*/
static inline sandbox_request_t *
sandbox_request_allocate(struct module *module, char *arguments, int socket_descriptor,
const struct sockaddr *socket_address, u64 start_time)

@ -3,7 +3,7 @@
#include <sandbox_request.h>
// Returns pointer back if successful, null otherwise
/* Returns pointer back if successful, null otherwise */
typedef sandbox_request_t *(*sandbox_request_scheduler_add_t)(void *);
typedef sandbox_request_t *(*sandbox_request_scheduler_remove_t)(void);
typedef uint64_t (*sandbox_request_scheduler_peek_t)(void);

@ -4,7 +4,7 @@
#include <stdbool.h>
#include <sandbox.h>
// Returns pointer back if successful, null otherwise
/* Returns pointer back if successful, null otherwise */
typedef struct sandbox *(*sandbox_run_queue_add_t)(struct sandbox *);
typedef bool (*sandbox_run_queue_is_empty_t)(void);
typedef void (*sandbox_run_queue_delete_t)(struct sandbox *sandbox);
@ -24,7 +24,7 @@ typedef struct sandbox_run_queue_config_t {
void sandbox_run_queue_initialize(sandbox_run_queue_config_t *config);
// This is currently only used by worker_thread_wakeup_sandbox
/* This is currently only used by worker_thread_wakeup_sandbox */
struct sandbox *sandbox_run_queue_add(struct sandbox *);
void sandbox_run_queue_delete(struct sandbox *);
bool sandbox_run_queue_is_empty();

@ -5,16 +5,16 @@
#include <assert.h>
#include <signal.h>
/***************************************
* Externs
***************************************/
/************
* Externs *
***********/
extern __thread volatile sig_atomic_t software_interrupt_is_disabled;
extern uint64_t SOFTWARE_INTERRUPT_INTERVAL_DURATION_IN_CYCLES;
/***************************************
* Public Static Inlines
***************************************/
/*************************
* Public Static Inlines *
************************/
static inline void
software_interrupt_disable(void)
@ -46,7 +46,7 @@ software_interrupt_is_enabled(void)
* Masks a signal on the current thread
* @param signal - the signal you want to mask
* @return 0 on success. Exits program otherwise
**/
*/
static inline int
software_interrupt_mask_signal(int signal)
{
@ -71,7 +71,7 @@ software_interrupt_mask_signal(int signal)
* Unmasks a signal on the current thread
* @param signal - the signal you want to unmask
* @return 0 on success. Exits program otherwise
**/
*/
static inline int
software_interrupt_unmask_signal(int signal)
{
@ -92,9 +92,9 @@ software_interrupt_unmask_signal(int signal)
return 0;
}
/***************************************
* Exports from module.c
***************************************/
/*************************
* Exports from module.c *
************************/
void software_interrupt_initialize(void);
void software_interrupt_arm_timer(void);

@ -37,7 +37,7 @@
#define round_to_page(x) round_to_pow2(x, PAGE_SIZE)
#define round_up_to_page(x) round_up_to_pow2(x, PAGE_SIZE)
// Type alias's so I don't have to write uint32_t a million times
/* Type alias's so I don't have to write uint32_t a million times */
typedef signed char i8;
typedef unsigned char u8;
typedef int16_t i16;
@ -47,24 +47,26 @@ typedef uint32_t u32;
typedef int64_t i64;
typedef uint64_t u64;
// FIXME: per-module configuration?
#define WASM_PAGE_SIZE (1024 * 64) // 64KB
#define WASM_START_PAGES (1 << 8) // 16MB
#define WASM_MAX_PAGES (1 << 15) // 4GB
#define WASM_STACK_SIZE (1 << 19) // 512KB.
#define SBOX_MAX_MEM (1L << 32) // 4GB
/* FIXME: per-module configuration? */
#define WASM_PAGE_SIZE (1024 * 64) /* 64KB */
#define WASM_START_PAGES (1 << 8) /* 16MB */
#define WASM_MAX_PAGES (1 << 15) /* 4GB */
#define WASM_STACK_SIZE (1 << 19) /* 512KB */
#define SBOX_MAX_MEM (1L << 32) /* 4GB */
// These are per module symbols and I'd need to dlsym for each module. instead just use global constants, see above
// macros. The code generator compiles in the starting number of wasm pages, and the maximum number of pages If we try
// and allocate more than max_pages, we should fault
/* These are per module symbols and I'd need to dlsym for each module. instead just use global constants, see above
macros. The code generator compiles in the starting number of wasm pages, and the maximum number of pages If we try
and allocate more than max_pages, we should fault */
// TODO: Should this be deleted?
// extern u32 starting_pages;
// extern u32 max_pages;
// The code generator also compiles in stubs that populate the linear memory and function table
/* The code generator also compiles in stubs that populate the linear memory and function table */
void populate_memory(void);
void populate_table(void);
// memory/* also provides the table access functions
/* memory also provides the table access functions */
#define INDIRECT_TABLE_SIZE (1 << 10)
struct indirect_table_entry {
@ -74,12 +76,14 @@ struct indirect_table_entry {
extern __thread struct indirect_table_entry *module_indirect_table;
// for sandbox linear memory isolation
/* for sandbox linear memory isolation */
extern __thread void *sandbox_lmbase;
extern __thread u32 sandbox_lmbound;
extern i32 runtime_log_file_descriptor; // TODO: LOG_TO_FILE logic is untested
// functions in the module to lookup and call per sandbox.
/* TODO: LOG_TO_FILE logic is untested */
extern i32 runtime_log_file_descriptor;
/* functions in the module to lookup and call per sandbox. */
typedef i32 (*mod_main_fn_t)(i32 a, i32 b);
typedef void (*mod_glb_fn_t)(void);
typedef void (*mod_mem_fn_t)(void);
@ -91,60 +95,60 @@ typedef void (*mod_libc_fn_t)(i32, i32);
* If DEBUG is not set, debuglog does nothing
* If DEBUG is set and LOG_TO_FILE is set, debuglog prints to the logfile defined in runtime_log_file_descriptor
* If DEBUG is set adn LOG_TO_FILE is not set, debuglog prints to STDOUT
**/
*/
#ifdef DEBUG
#ifdef LOG_TO_FILE
#define debuglog(fmt, ...) \
dprintf(runtime_log_file_descriptor, "(%d,%lu) %s: " fmt, sched_getcpu(), pthread_self(), __func__, \
##__VA_ARGS__)
#else // !LOG_TO_FILE
#else /* !LOG_TO_FILE */
#define debuglog(fmt, ...) printf("(%d,%lu) %s: " fmt, sched_getcpu(), pthread_self(), __func__, ##__VA_ARGS__)
#endif // LOG_TO_FILE
#else // !DEBUG
#endif /* LOG_TO_FILE */
#else /* !DEBUG */
#define debuglog(fmt, ...)
#endif // DEBUG
#endif /* DEBUG */
#define HTTP_MAX_HEADER_COUNT 16
#define HTTP_MAX_HEADER_LENGTH 32
#define HTTP_MAX_HEADER_VALUE_LENGTH 64
#define HTTP_RESPONSE_200_OK "HTTP/1.1 200 OK\r\n"
#define HTTP_RESPONSE_CONTENT_LENGTH "Content-length: \r\n\r\n" // content body follows this
#define HTTP_RESPONSE_CONTENT_LENGTH "Content-length: \r\n\r\n" /* content body follows this */
#define HTTP_RESPONSE_CONTENT_TYPE "Content-type: \r\n"
#define HTTP_RESPONSE_CONTENT_TYPE_PLAIN "text/plain"
#define JSON_MAX_ELEMENT_COUNT 16 // Max number of elements defined in JSON
#define JSON_MAX_ELEMENT_SIZE 1024 // Max size of a single module in JSON
#define JSON_MAX_ELEMENT_COUNT 16 /* Max number of elements defined in JSON */
#define JSON_MAX_ELEMENT_SIZE 1024 /* Max size of a single module in JSON */
#define LISTENER_THREAD_CORE_ID 0 // Dedicated Listener Core
#define LISTENER_THREAD_CORE_ID 0 /* Dedicated Listener Core */
#define LISTENER_THREAD_MAX_EPOLL_EVENTS 1024
#define MODULE_DEFAULT_REQUEST_RESPONSE_SIZE (PAGE_SIZE)
#define MODULE_INITIALIZE_GLOBALS "populate_globals" // From Silverfish
#define MODULE_INITIALIZE_MEMORY "populate_memory" // From Silverfish
#define MODULE_INITIALIZE_TABLE "populate_table" // From Silverfish
#define MODULE_INITIALIZE_LIBC "wasmf___init_libc" // From Silverfish
#define MODULE_MAIN "wasmf_main" // From Silverfish
#define MODULE_MAX_ARGUMENT_COUNT 16 // Max number of arguments
#define MODULE_MAX_ARGUMENT_SIZE 64 // Max size of a single argument
#define MODULE_MAX_MODULE_COUNT 128 // Max number of modules
#define MODULE_MAX_NAME_LENGTH 32 // Max module name length
#define MODULE_MAX_PATH_LENGTH 256 // Max length of path string
#define MODULE_INITIALIZE_GLOBALS "populate_globals" /* From Silverfish */
#define MODULE_INITIALIZE_MEMORY "populate_memory" /* From Silverfish */
#define MODULE_INITIALIZE_TABLE "populate_table" /* From Silverfish */
#define MODULE_INITIALIZE_LIBC "wasmf___init_libc" /* From Silverfish */
#define MODULE_MAIN "wasmf_main" /* From Silverfish */
#define MODULE_MAX_ARGUMENT_COUNT 16 /* Max number of arguments */
#define MODULE_MAX_ARGUMENT_SIZE 64 /* Max size of a single argument */
#define MODULE_MAX_MODULE_COUNT 128 /* Max number of modules */
#define MODULE_MAX_NAME_LENGTH 32 /* Max module name length */
#define MODULE_MAX_PATH_LENGTH 256 /* Max length of path string */
#define MODULE_MAX_PENDING_CLIENT_REQUESTS 1000
#define RUNTIME_LOG_FILE "awesome.log"
#define RUNTIME_READ_WRITE_VECTOR_LENGTH 16
#define RUNTIME_MAX_SANDBOX_REQUEST_COUNT (1 << 19) // random!
#define RUNTIME_MAX_SANDBOX_REQUEST_COUNT (1 << 19) /* random! */
#define SANDBOX_FILE_DESCRIPTOR_PREOPEN_MAGIC (707707707) // reads lol lol lol upside down
#define SANDBOX_FILE_DESCRIPTOR_PREOPEN_MAGIC (707707707) /* upside down LOLLOLLOL 🤣😂🤣*/
#define SANDBOX_MAX_IO_HANDLE_COUNT 32
#define SANDBOX_PULL_BATCH_SIZE 1 // Max # standboxes pulled onto the local runqueue in a single batch
#define SANDBOX_PULL_BATCH_SIZE 1 /* Max # standboxes pulled onto the local runqueue in a single batch */
#define SOFTWARE_INTERRUPT_TIME_TO_START_IN_USEC (10 * 1000) // start timers 10 ms from now.
#define SOFTWARE_INTERRUPT_INTERVAL_DURATION_IN_USEC (1000 * 5) // and execute every 5ms
#define SOFTWARE_INTERRUPT_TIME_TO_START_IN_USEC (10 * 1000) /* start timers 10 ms from now. */
#define SOFTWARE_INTERRUPT_INTERVAL_DURATION_IN_USEC (1000 * 5) /* and execute every 5ms */
// If multicore, use all but the dedicated listener core
// If there are fewer cores than this, main dynamically overrides this and uses all available
/* If multicore, use all but the dedicated listener core
If there are fewer cores than this, main dynamically overrides this and uses all available */
#define WORKER_THREAD_CORE_COUNT (NCORES > 1 ? NCORES - 1 : NCORES)

@ -13,7 +13,7 @@ void *worker_thread_main(void *return_code);
* @param offset an offset into the WebAssembly linear memory
* @param bounds_check the size of the thing we are pointing to
* @return void pointer to something in WebAssembly linear memory
**/
*/
static inline void *
worker_thread_get_memory_ptr_void(u32 offset, u32 bounds_check)
{
@ -24,7 +24,7 @@ worker_thread_get_memory_ptr_void(u32 offset, u32 bounds_check)
* Get a single-byte extended ASCII character from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return char at the offset
**/
*/
static inline char
worker_thread_get_memory_character(u32 offset)
{
@ -36,7 +36,7 @@ worker_thread_get_memory_character(u32 offset)
* @param offset an offset into the WebAssembly linear memory
* @param max_length the maximum expected length in characters
* @return pointer to the string or NULL if max_length is reached without finding null-terminator
**/
*/
static inline char *
worker_thread_get_memory_string(u32 offset, u32 max_length)
{
@ -50,7 +50,7 @@ worker_thread_get_memory_string(u32 offset, u32 max_length)
/**
* Get global libuv handle
**/
*/
static inline uv_loop_t *
worker_thread_get_libuv_handle(void)
{

@ -1,12 +1,12 @@
#include "current_sandbox.h"
// current sandbox that is active..
/* current sandbox that is active.. */
static __thread sandbox_t *worker_thread_current_sandbox = NULL;
/**
* Getter for the current sandbox executing on this thread
* @returns the current sandbox executing on this thread
**/
*/
struct sandbox *
current_sandbox_get(void)
{
@ -16,11 +16,11 @@ current_sandbox_get(void)
/**
* Setter for the current sandbox executing on this thread
* @param sandbox the sandbox we are setting this thread to run
**/
*/
void
current_sandbox_set(struct sandbox *sandbox)
{
// Unpack hierarchy to avoid pointer chasing
/* Unpack hierarchy to avoid pointer chasing */
if (sandbox == NULL) {
worker_thread_current_sandbox = NULL;
sandbox_lmbase = NULL;
@ -37,7 +37,7 @@ current_sandbox_set(struct sandbox *sandbox)
/**
* Initializes and returns an IO handle on the current sandbox ready for use
* @return index of handle we preopened or -1 if all io_handles are exhausted
**/
*/
int
current_sandbox_initialize_io_handle(void)
{
@ -53,7 +53,7 @@ int sandbox_parse_http_request(struct sandbox *sandbox, size_t l);
* @param io_handle_index index of the sandbox io_handle we want to set
* @param file_descriptor the file descripter we want to set it to
* @returns the index that was set or -1 in case of error
**/
*/
int
current_sandbox_set_file_descriptor(int io_handle_index, int file_descriptor)
{
@ -65,7 +65,7 @@ current_sandbox_set_file_descriptor(int io_handle_index, int file_descriptor)
* Get the file descriptor of the sandbox's ith io_handle
* @param io_handle_index index into the sandbox's io_handles table
* @returns file descriptor
**/
*/
int
current_sandbox_get_file_descriptor(int io_handle_index)
{
@ -76,7 +76,7 @@ current_sandbox_get_file_descriptor(int io_handle_index)
/**
* Close the sandbox's ith io_handle
* @param io_handle_index index of the handle to close
**/
*/
void
current_sandbox_close_file_descriptor(int io_handle_index)
{
@ -88,7 +88,7 @@ current_sandbox_close_file_descriptor(int io_handle_index)
* Get the Libuv handle located at idx of the sandbox ith io_handle
* @param io_handle_index index of the handle containing libuv_handle???
* @returns any libuv handle
**/
*/
union uv_any_handle *
current_sandbox_get_libuv_handle(int io_handle_index)
{

@ -22,7 +22,7 @@ env___syscall(i32 n, i32 a, i32 b, i32 c, i32 d, i32 e, i32 f)
void
env___unmapself(u32 base, u32 size)
{
// Just do some no op
/* Just do some no op */
}
i32
@ -135,7 +135,7 @@ env_do_barrier(i32 x)
ck_pr_barrier();
}
// Floating point routines
/* Floating point routines */
INLINE double
env_sin(double d)
{

@ -19,7 +19,7 @@ static http_parser_settings runtime_http_parser_settings;
* @param at the start of the URL
* @param length the length of the URL
* @returns 0
**/
*/
int
http_parser_settings_on_url(http_parser *parser, const char *at, size_t length)
{
@ -33,7 +33,7 @@ http_parser_settings_on_url(http_parser *parser, const char *at, size_t length)
* http-parser callback called when parsing of a new message begins
* Sets the HTTP Request's message_begin and last_was_value flags to true
* @param parser
**/
*/
int
http_parser_settings_on_message_begin(http_parser *parser)
{
@ -41,7 +41,7 @@ http_parser_settings_on_message_begin(http_parser *parser)
struct http_request *http_request = &sandbox->http_request;
http_request->message_begin = 1;
http_request->last_was_value = 1; // should always start with a header..
http_request->last_was_value = 1; /* should always start with a header */
return 0;
}
@ -56,7 +56,7 @@ http_parser_settings_on_message_begin(http_parser *parser)
* @param at start address of the header field
* @param length length of the header field
* @returns 0
**/
*/
int
http_parser_settings_on_header_field(http_parser *parser, const char *at, size_t length)
{
@ -67,9 +67,10 @@ http_parser_settings_on_header_field(http_parser *parser, const char *at, size_t
assert(http_request->header_count <= HTTP_MAX_HEADER_COUNT);
assert(length < HTTP_MAX_HEADER_LENGTH);
http_request->last_was_value = 0;
http_request->headers[http_request->header_count - 1].key = (char *)
at; // it is from the sandbox's request_response_data, should persist.
http_request->last_was_value = 0;
/* it is from the sandbox's request_response_data, should persist. */
http_request->headers[http_request->header_count - 1].key = (char *)at;
return 0;
}
@ -81,7 +82,7 @@ http_parser_settings_on_header_field(http_parser *parser, const char *at, size_t
* @param at start address of the header value
* @param length length of the header value
* @returns 0
**/
*/
int
http_parser_settings_on_header_value(http_parser *parser, const char *at, size_t length)
{
@ -92,8 +93,8 @@ http_parser_settings_on_header_value(http_parser *parser, const char *at, size_t
assert(http_request->header_count <= HTTP_MAX_HEADER_COUNT);
assert(length < HTTP_MAX_HEADER_VALUE_LENGTH);
http_request->headers[http_request->header_count - 1].value = (char *)
at; // it is from the sandbox's request_response_data, should persist.
/* it is from the sandbox's request_response_data, should persist. */
http_request->headers[http_request->header_count - 1].value = (char *)at;
return 0;
}
@ -102,7 +103,7 @@ http_parser_settings_on_header_value(http_parser *parser, const char *at, size_t
* http-parser callback called when header parsing is complete
* Just sets the HTTP Request's header_end flag to true
* @param parser
**/
*/
int
http_parser_settings_on_header_end(http_parser *parser)
{
@ -120,7 +121,7 @@ http_parser_settings_on_header_end(http_parser *parser)
* @param at
* @param length
* @returns 0
**/
*/
int
http_parser_settings_on_body(http_parser *parser, const char *at, size_t length)
{
@ -141,7 +142,8 @@ http_parser_settings_on_body(http_parser *parser, const char *at, size_t length)
/**
* Sets the HTTP Request's message_end flag to true
* @param parser
**/
* @returns 0
*/
int
http_parser_settings_on_msg_end(http_parser *parser)
{
@ -173,7 +175,7 @@ http_parser_settings_register_callbacks(http_parser_settings *settings)
/**
* This is really the only function that should have to be called to setup this structure
**/
*/
void
http_parser_settings_initialize()
{

@ -9,7 +9,7 @@
* @param sandbox the sandbox we want the body from
* @param body pointer that we'll assign to the http_request body
* @returns the length of the http_request's body
**/
*/
int
http_request_get_body(struct http_request *http_request, char **body)
{

@ -12,7 +12,7 @@
* Encodes a sandbox's HTTP Response as an array of buffers
* @param sandbox the sandbox containing the HTTP response we want to encode as buffers
* @returns the number of buffers used to store the HTTP Response
**/
*/
int
http_response_encode_as_vector(struct http_response *http_response)
{
@ -33,7 +33,7 @@ http_response_encode_as_vector(struct http_response *http_response)
buffer_count++;
http_response->bufs[buffer_count] = uv_buf_init(http_response->status + http_response->status_length
- 2,
2); // for crlf
2); /* for crlf */
buffer_count++;
}
#else
@ -67,7 +67,7 @@ http_response_encode_as_vector(struct http_response *http_response)
* @param body string of the body that we want to set
* @param length the length of the header string
* @returns 0 (abends program in case of error)
**/
*/
int
http_response_set_body(struct http_response *http_response, char *body, int length)
{
@ -84,7 +84,7 @@ http_response_set_body(struct http_response *http_response, char *body, int leng
* @param header string containing the header that we want to append
* @param length the length of the header string
* @returns 0 (abends program in case of error)
**/
*/
int
http_response_set_header(struct http_response *http_response, char *header, int length)
{
@ -102,7 +102,7 @@ http_response_set_header(struct http_response *http_response, char *header, int
* @param status string of the status we want to set
* @param length the length of the status
* @returns 0 (abends program in case of error)
**/
*/
int
http_response_set_status(struct http_response *http_response, char *status, int length)
{

@ -14,23 +14,23 @@
#include <software_interrupt.h>
#include <worker_thread.h>
// Conditionally used by debuglog when DEBUG is set
/* Conditionally used by debuglog when DEBUG is set */
#ifdef DEBUG
i32 runtime_log_file_descriptor = -1;
#endif
float runtime_processor_speed_MHz = 0;
u32 runtime_total_online_processors = 0;
u32 runtime_total_worker_processors = 0;
u32 runtime_first_worker_processor = 0;
int runtime_worker_threads_argument[WORKER_THREAD_CORE_COUNT] = { 0 }; // The worker sets its argument to -1 on error
float runtime_processor_speed_MHz = 0;
u32 runtime_total_online_processors = 0;
u32 runtime_total_worker_processors = 0;
u32 runtime_first_worker_processor = 0;
int runtime_worker_threads_argument[WORKER_THREAD_CORE_COUNT] = { 0 }; /* The worker sets its argument to -1 on error */
pthread_t runtime_worker_threads[WORKER_THREAD_CORE_COUNT];
/**
* Returns instructions on use of CLI if used incorrectly
* @param cmd - The command the user entered
**/
*/
static void
runtime_usage(char *cmd)
{
@ -41,7 +41,7 @@ runtime_usage(char *cmd)
/**
* Sets the process data segment (RLIMIT_DATA) and # file descriptors
* (RLIMIT_NOFILE) soft limit to its hard limit (see man getrlimit)
**/
*/
void
runtime_set_resource_limits_to_max()
{
@ -68,24 +68,24 @@ runtime_set_resource_limits_to_max()
/**
* Check the number of cores and the compiler flags and allocate available cores
**/
*/
void
runtime_allocate_available_cores()
{
// Find the number of processors currently online
/* Find the number of processors currently online */
runtime_total_online_processors = sysconf(_SC_NPROCESSORS_ONLN);
// If multicore, we'll pin one core as a listener and run sandbox threads on all others
/* If multicore, we'll pin one core as a listener and run sandbox threads on all others */
if (runtime_total_online_processors > 1) {
runtime_first_worker_processor = 1;
// WORKER_THREAD_CORE_COUNT can be used as a cap on the number of cores to use
// But if there are few cores that WORKER_THREAD_CORE_COUNT, just use what is available
/* WORKER_THREAD_CORE_COUNT can be used as a cap on the number of cores to use, but if there are few
* cores that WORKER_THREAD_CORE_COUNT, just use what is available */
u32 max_possible_workers = runtime_total_online_processors - 1;
runtime_total_worker_processors = (max_possible_workers >= WORKER_THREAD_CORE_COUNT)
? WORKER_THREAD_CORE_COUNT
: max_possible_workers;
} else {
// If single core, we'll do everything on CPUID 0
/* If single core, we'll do everything on CPUID 0 */
runtime_first_worker_processor = 0;
runtime_total_worker_processors = 1;
}
@ -99,7 +99,7 @@ runtime_allocate_available_cores()
* We are assuming all cores are the same clock speed, which is not true of many systems
* We are also assuming this value is static
* @return proceccor speed in MHz
**/
*/
static inline float
runtime_get_processor_speed_MHz(void)
{
@ -126,7 +126,7 @@ runtime_get_processor_speed_MHz(void)
* Controls the behavior of the debuglog macro defined in types.h
* If LOG_TO_FILE is defined, close stdin, stdout, stderr, and debuglog writes to a logfile named awesome.log.
* Otherwise, it writes to STDOUT
**/
*/
void
runtime_process_debug_log_behavior()
{
@ -141,13 +141,13 @@ runtime_process_debug_log_behavior()
}
#else
runtime_log_file_descriptor = 1;
#endif // LOG_TO_FILE
#endif /* LOG_TO_FILE */
}
#endif // DEBUG
#endif /* DEBUG */
/**
* Starts all worker threads and sleeps forever on pthread_join, which should never return
**/
*/
void
runtime_start_runtime_worker_threads()
{

@ -4,14 +4,14 @@ __thread struct indirect_table_entry *module_indirect_table = NULL;
__thread void * sandbox_lmbase = NULL;
__thread u32 sandbox_lmbound = 0;
// Region initialization helper function
/* Region initialization helper function */
EXPORT void
initialize_region(u32 offset, u32 data_count, char *data)
{
assert(sandbox_lmbound >= data_count);
assert(offset < sandbox_lmbound - data_count);
// FIXME: Hack around segmented and unsegmented access
/* FIXME: Hack around segmented and unsegmented access */
memcpy(get_memory_ptr_for_runtime(offset, data_count), data, data_count);
}
@ -20,15 +20,15 @@ add_function_to_table(u32 idx, u32 type_id, char *pointer)
{
assert(idx < INDIRECT_TABLE_SIZE);
// TODO: atomic for multiple concurrent invocations?
/* TODO: atomic for multiple concurrent invocations? */
if (module_indirect_table[idx].type_id == type_id && module_indirect_table[idx].func_pointer == pointer) return;
module_indirect_table[idx] = (struct indirect_table_entry){ .type_id = type_id, .func_pointer = pointer };
}
// If we are using runtime globals, we need to populate them
/* If we are using runtime globals, we need to populate them */
WEAK void
populate_globals()
{
assert(0); // FIXME: is this used in WASM as dynamic modules?
assert(0); /* FIXME: is this used in WASM as dynamic modules? */
}

@ -8,43 +8,44 @@
#include <module_database.h>
#include <runtime.h>
/***************************************
* Private Static Inline
***************************************/
/*************************
* Private Static Inline *
************************/
/**
* Start the module as a server listening at module->port
* @param module
**/
*/
static inline void
module_listen(struct module *module)
{
// Allocate a new socket
/* Allocate a new socket */
int socket_descriptor = socket(AF_INET, SOCK_STREAM, 0);
assert(socket_descriptor > 0);
// Configure socket address as [all addresses]:[module->port]
/* Configure socket address as [all addresses]:[module->port] */
module->socket_address.sin_family = AF_INET;
module->socket_address.sin_addr.s_addr = htonl(INADDR_ANY);
module->socket_address.sin_port = htons((unsigned short)module->port);
// Configure the socket to allow multiple sockets to bind to the same host and port
/* Configure the socket to allow multiple sockets to bind to the same host and port */
int optval = 1;
setsockopt(socket_descriptor, SOL_SOCKET, SO_REUSEPORT, &optval, sizeof(optval));
optval = 1;
setsockopt(socket_descriptor, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
// Bind to the interface
/* Bind to the interface */
if (bind(socket_descriptor, (struct sockaddr *)&module->socket_address, sizeof(module->socket_address)) < 0) {
perror("bind");
assert(0);
}
// Listen to the interface? Check that it is live?
/* Listen to the interface? Check that it is live? */
if (listen(socket_descriptor, MODULE_MAX_PENDING_CLIENT_REQUESTS) < 0) assert(0);
// Set the socket descriptor and register with our global epoll instance to monitor for incoming HTTP requests
/* Set the socket descriptor and register with our global epoll instance to monitor for incoming HTTP
requests */
module->socket_descriptor = socket_descriptor;
struct epoll_event accept_evt;
accept_evt.data.ptr = (void *)module;
@ -64,17 +65,17 @@ module_listen(struct module *module)
*
* TODO: Untested Functionality. Unsure if this will work
* @param module - the module to teardown
**/
*/
void
module_free(struct module *module)
{
if (module == NULL) return;
if (module->dynamic_library_handle == NULL) return;
// Do not free if we still have oustanding references
/* Do not free if we still have oustanding references */
if (module->reference_count) return;
// TODO: What about the module database? Do we need to do any cleanup there?
/* TODO: What about the module database? Do we need to do any cleanup there? */
close(module->socket_descriptor);
dlclose(module->dynamic_library_handle);
@ -96,7 +97,7 @@ module_free(struct module *module)
* @param port
* @param request_size
* @returns A new module or NULL in case of failure
**/
*/
struct module *
module_new(char *name, char *path, i32 argument_count, u32 stack_size, u32 max_memory, u32 relative_deadline_us,
int port, int request_size, int response_size)
@ -106,11 +107,11 @@ module_new(char *name, char *path, i32 argument_count, u32 stack_size, u32 max_m
memset(module, 0, sizeof(struct module));
// Load the dynamic library *.so file with lazy function call binding and deep binding
/* Load the dynamic library *.so file with lazy function call binding and deep binding */
module->dynamic_library_handle = dlopen(path, RTLD_LAZY | RTLD_DEEPBIND);
if (module->dynamic_library_handle == NULL) goto dl_open_error;
// Resolve the symbols in the dynamic library *.so file
/* Resolve the symbols in the dynamic library *.so file */
module->main = (mod_main_fn_t)dlsym(module->dynamic_library_handle, MODULE_MAIN);
if (module->main == NULL) goto dl_error;
@ -126,7 +127,7 @@ module_new(char *name, char *path, i32 argument_count, u32 stack_size, u32 max_m
module->initialize_libc = (mod_libc_fn_t)dlsym(module->dynamic_library_handle, MODULE_INITIALIZE_LIBC);
if (module->initialize_libc == NULL) goto dl_error;
// Set fields in the module struct
/* Set fields in the module struct */
strncpy(module->name, name, MODULE_MAX_NAME_LENGTH);
strncpy(module->path, path, MODULE_MAX_PATH_LENGTH);
@ -143,25 +144,25 @@ module_new(char *name, char *path, i32 argument_count, u32 stack_size, u32 max_m
module->max_request_or_response_size = round_up_to_page(request_size > response_size ? request_size
: response_size);
// module_indirect_table is a thread-local struct
/* module_indirect_table is a thread-local struct */
struct indirect_table_entry *cache_tbl = module_indirect_table;
// assumption: All modules are created at program start before we enable preemption or enable the execution of
// any worker threads We are checking that thread-local module_indirect_table is NULL to prove that we aren't
// yet preempting If we want to be able to do this later, we can possibly defer module_initialize_table until
// the first invocation
/* assumption: All modules are created at program start before we enable preemption or enable the execution
of any worker threads We are checking that thread-local module_indirect_table is NULL to prove that we aren't
yet preempting If we want to be able to do this later, we can possibly defer module_initialize_table until
the first invocation */
assert(cache_tbl == NULL);
// TODO: determine why we have to set the module_indirect_table state before calling table init and then restore
// the existing value What is the relationship between these things?
/* TODO: determine why we have to set the module_indirect_table state before calling table init and then
* restore the existing value. What is the relationship between these things? */
module_indirect_table = module->indirect_table;
module_initialize_table(module);
module_indirect_table = cache_tbl;
// Add the module to the in-memory module DB
/* Add the module to the in-memory module DB */
module_database_add(module);
// Start listening for requests
/* Start listening for requests */
module_listen(module);
return module;
@ -183,7 +184,7 @@ dl_open_error:
int
module_new_from_json(char *file_name)
{
// Use stat to get file attributes and make sure file is there and OK
/* Use stat to get file attributes and make sure file is there and OK */
struct stat stat_buffer;
memset(&stat_buffer, 0, sizeof(struct stat));
if (stat(file_name, &stat_buffer) < 0) {
@ -191,14 +192,15 @@ module_new_from_json(char *file_name)
return -1;
}
// Open the file
/* Open the file */
FILE *module_file = fopen(file_name, "r");
if (!module_file) {
perror("fopen");
return -1;
}
// Initialize a Buffer, Read the file into the buffer, and then check that the buffer size equals the file size
/* Initialize a Buffer, Read the file into the buffer, and then check that the buffer size equals the file
size */
char *file_buffer = malloc(stat_buffer.st_size);
memset(file_buffer, 0, stat_buffer.st_size);
int total_chars_read = fread(file_buffer, sizeof(char), stat_buffer.st_size, module_file);
@ -208,15 +210,15 @@ module_new_from_json(char *file_name)
return -1;
}
// Close the file
/* Close the file */
fclose(module_file);
// Initialize the Jasmine Parser and an array to hold the tokens
/* Initialize the Jasmine Parser and an array to hold the tokens */
jsmn_parser module_parser;
jsmn_init(&module_parser);
jsmntok_t tokens[JSON_MAX_ELEMENT_SIZE * JSON_MAX_ELEMENT_COUNT];
// Use Jasmine to parse the JSON
/* Use Jasmine to parse the JSON */
int total_tokens = jsmn_parse(&module_parser, file_buffer, strlen(file_buffer), tokens,
sizeof(tokens) / sizeof(tokens[0]));
if (total_tokens < 0) {
@ -308,10 +310,10 @@ module_new_from_json(char *file_name)
j += ntks;
}
i += ntoks;
// do not load if it is not active
/* do not load if it is not active */
if (is_active == 0) continue;
// Allocate a module based on the values from the JSON
/* Allocate a module based on the values from the JSON */
struct module *module = module_new(module_name, module_path, argument_count, 0, 0, relative_deadline_us,
port, request_size, response_size);
assert(module);

@ -1,20 +1,18 @@
#include <module_database.h>
/***************************************
* Module Database
***************************************/
/*******************
* Module Database *
******************/
// In-memory representation of all active modules
struct module *module_database[MODULE_MAX_MODULE_COUNT] = { NULL };
// First free in module
int module_database_free_offset = 0;
struct module *module_database[MODULE_MAX_MODULE_COUNT] = { NULL }; /* In-memory representation of all active modules */
int module_database_free_offset = 0; /* First free in module */
/**
* Given a name, find the associated module
* @param name
* @return module or NULL if no match found
**/
*/
struct module *
module_database_find_by_name(char *name)
{
@ -30,7 +28,7 @@ module_database_find_by_name(char *name)
* Given a socket_descriptor, find the associated module
* @param socket_descriptor
* @return module or NULL if no match found
**/
*/
struct module *
module_database_find_by_socket_descriptor(int socket_descriptor)
{

@ -8,14 +8,14 @@
/****************************
* Private Helper Functions *
****************************/
***************************/
/**
* Adds a value to the end of the binary heap
* @param self the priority queue
* @param new_item the value we are adding
* @return 0 on success. -1 when priority queue is full
**/
*/
static inline int
priority_queue_append(struct priority_queue *self, void *new_item)
{
@ -46,7 +46,7 @@ priority_queue_percolate_up(struct priority_queue *self)
void *temp = self->items[i / 2];
self->items[i / 2] = self->items[i];
self->items[i] = temp;
// If percolated to highest priority, update highest priority
/* If percolated to highest priority, update highest priority */
if (i / 2 == 1) self->highest_priority = self->get_priority(self->items[1]);
}
}
@ -69,14 +69,14 @@ priority_queue_find_smallest_child(struct priority_queue *self, int parent_index
int right_child_index = 2 * parent_index + 1;
assert(self->items[left_child_index] != NULL);
// If we don't have a right child or the left child is smaller, return it
/* If we don't have a right child or the left child is smaller, return it */
if (right_child_index == self->first_free) {
return left_child_index;
} else if (self->get_priority(self->items[left_child_index])
< self->get_priority(self->items[right_child_index])) {
return left_child_index;
} else {
// Otherwise, return the right child
/* Otherwise, return the right child */
return right_child_index;
}
}
@ -96,11 +96,11 @@ priority_queue_percolate_down(struct priority_queue *self, int parent_index)
int left_child_index = 2 * parent_index;
while (left_child_index >= 2 && left_child_index < self->first_free) {
int smallest_child_index = priority_queue_find_smallest_child(self, parent_index);
// Once the parent is equal to or less than its smallest child, break;
/* Once the parent is equal to or less than its smallest child, break; */
if (self->get_priority(self->items[parent_index])
<= self->get_priority(self->items[smallest_child_index]))
break;
// Otherwise, swap and continue down the tree
/* Otherwise, swap and continue down the tree */
void *temp = self->items[smallest_child_index];
self->items[smallest_child_index] = self->items[parent_index];
self->items[parent_index] = temp;
@ -112,14 +112,13 @@ priority_queue_percolate_down(struct priority_queue *self, int parent_index)
/*********************
* Public API *
*********************/
********************/
/**
* Initialized the Priority Queue Data structure
* @param self the priority_queue to initialize
* @param get_priority pointer to a function that returns the priority of an
*element
**/
* @param get_priority pointer to a function that returns the priority of an element
*/
void
priority_queue_initialize(struct priority_queue *self, priority_queue_get_priority_t get_priority)
{
@ -132,14 +131,14 @@ priority_queue_initialize(struct priority_queue *self, priority_queue_get_priori
self->first_free = 1;
self->get_priority = get_priority;
// We're assuming a min-heap implementation, so set to larget possible value
/* We're assuming a min-heap implementation, so set to larget possible value */
self->highest_priority = ULONG_MAX;
}
/**
* @param self the priority_queue
* @returns the number of elements in the priority queue
**/
*/
int
priority_queue_length(struct priority_queue *self)
{
@ -155,7 +154,7 @@ priority_queue_length(struct priority_queue *self)
* @param self - the priority queue we want to add to
* @param value - the value we want to add
* @returns 0 on success. -1 on full. -2 on unable to take lock
**/
*/
int
priority_queue_enqueue(struct priority_queue *self, void *value, char *name)
{
@ -164,7 +163,7 @@ priority_queue_enqueue(struct priority_queue *self, void *value, char *name)
int pre_length = self->first_free - 1;
// Start of Critical Section
/* Start of Critical Section */
if (priority_queue_append(self, value) == -1) {
printf("Priority Queue is full");
fflush(stdout);
@ -175,16 +174,16 @@ priority_queue_enqueue(struct priority_queue *self, void *value, char *name)
int post_length = self->first_free - 1;
// We should have appended here
/* We should have appended here */
assert(post_length == pre_length + 1);
// If this is the first element we add, update the highest priority
/* If this is the first element we add, update the highest priority */
if (self->first_free == 2) {
self->highest_priority = self->get_priority(value);
} else {
priority_queue_percolate_up(self);
}
// End of Critical Section
/* End of Critical Section */
ck_spinlock_fas_unlock(&self->lock);
return 0;
}
@ -192,7 +191,7 @@ priority_queue_enqueue(struct priority_queue *self, void *value, char *name)
* @param self - the priority queue we want to delete from
* @param value - the value we want to delete
* @returns 0 on success. -1 on not found. -2 on unable to take lock
**/
*/
int
priority_queue_delete(struct priority_queue *self, void *value, char *name)
{
@ -232,7 +231,7 @@ priority_queue_is_empty(struct priority_queue *self)
/**
* @param self - the priority queue we want to add to
* @returns The head of the priority queue or NULL when empty
**/
*/
void *
priority_queue_dequeue(struct priority_queue *self, char *name)
{
@ -242,21 +241,21 @@ priority_queue_dequeue(struct priority_queue *self, char *name)
ck_spinlock_fas_lock(&self->lock);
assert(ck_spinlock_fas_locked(&self->lock));
// Start of Critical Section
/* Start of Critical Section */
void *min = NULL;
if (!priority_queue_is_empty(self)) {
min = self->items[1];
self->items[1] = self->items[--self->first_free];
self->items[self->first_free] = NULL;
// Because of 1-based indices, first_free is 2 when there is only one element
/* Because of 1-based indices, first_free is 2 when there is only one element */
if (self->first_free > 2) priority_queue_percolate_down(self, 1);
// Update the highest priority
/* Update the highest priority */
self->highest_priority = !priority_queue_is_empty(self) ? self->get_priority(self->items[1])
: ULONG_MAX;
}
ck_spinlock_fas_unlock(&self->lock);
// End of Critical Section
/* End of Critical Section */
return min;
}

@ -1,15 +1,15 @@
// Something is not idempotent with this or some other include.
// If placed in Local Includes, error is triggered that memset was implicitly declared
/* Something is not idempotent with this or some other include. If placed in Local Includes, error is triggered that
* memset was implicitly declared */
#include <runtime.h>
/***************************
* External Includes *
**************************/
#include <pthread.h> // POSIX Threads
#include <signal.h> // POSIX Signals
#include <sched.h> // Wasmception. Included as submodule
#include <sys/mman.h> // Wasmception. Included as submodule
#include <uv.h> // Libub
#include <pthread.h>
#include <signal.h>
#include <sched.h> /* Wasmception. Included as submodule */
#include <sys/mman.h> /* Wasmception. Included as submodule */
#include <uv.h>
/***************************
* Local Includes *
@ -31,7 +31,7 @@ int runtime_epoll_file_descriptor;
/******************************************
* Shared Process / Listener Thread Logic *
******************************************/
*****************************************/
/**
* Initialize runtime global state, mask signals, and init http parser
@ -39,25 +39,27 @@ int runtime_epoll_file_descriptor;
void
runtime_initialize(void)
{
// Setup epoll
/* Setup epoll */
runtime_epoll_file_descriptor = epoll_create1(0);
assert(runtime_epoll_file_descriptor >= 0);
// Allocate and Initialize the global deque
/* Allocate and Initialize the global deque
TODO: Improve to expose variant as a config
*/
// sandbox_request_scheduler_fifo_initialize();
sandbox_request_scheduler_ps_initialize();
// Mask Signals
/* Mask Signals */
software_interrupt_mask_signal(SIGUSR1);
software_interrupt_mask_signal(SIGALRM);
// Initialize http_parser_settings global
/* Initialize http_parser_settings global */
http_parser_settings_initialize();
}
/********************************
* Listener Thread Logic *
********************************/
/*************************
* Listener Thread Logic *
************************/
/**
* @brief Execution Loop of the listener core, io_handles HTTP requests, allocates sandbox request objects, and pushes
@ -80,7 +82,7 @@ listener_thread_main(void *dummy)
int request_count = epoll_wait(runtime_epoll_file_descriptor, epoll_events,
LISTENER_THREAD_MAX_EPOLL_EVENTS, -1);
// Capture Start Time to calculate absolute deadline
/* Capture Start Time to calculate absolute deadline */
u64 start_time = __getcycles();
for (int i = 0; i < request_count; i++) {
if (epoll_events[i].events & EPOLLERR) {
@ -88,7 +90,7 @@ listener_thread_main(void *dummy)
assert(false);
}
// Accept Client Request
/* Accept Client Request */
struct sockaddr_in client_address;
socklen_t client_length = sizeof(client_address);
struct module * module = (struct module *)epoll_events[i].data.ptr;
@ -101,13 +103,13 @@ listener_thread_main(void *dummy)
}
total_requests++;
// Allocate a Sandbox Request
/* Allocate a Sandbox Request */
sandbox_request_t *sandbox_request =
sandbox_request_allocate(module, module->name, socket_descriptor,
(const struct sockaddr *)&client_address, start_time);
assert(sandbox_request);
// Add to the Global Sandbox Request Scheduler
/* Add to the Global Sandbox Request Scheduler */
sandbox_request_scheduler_add(sandbox_request);
}
}

@ -12,7 +12,7 @@
/**
* Takes the arguments from the sandbox struct and writes them into the WebAssembly linear memory
**/
*/
static inline void
sandbox_setup_arguments(struct sandbox *sandbox)
{
@ -20,7 +20,7 @@ sandbox_setup_arguments(struct sandbox *sandbox)
char *arguments = sandbox_get_arguments(sandbox);
i32 argument_count = module_get_argument_count(sandbox->module);
// whatever gregor has, to be able to pass arguments to a module!
/* whatever gregor has, to be able to pass arguments to a module! */
sandbox->arguments_offset = sandbox_lmbound;
assert(sandbox_lmbase == sandbox->linear_memory_start);
expand_memory();
@ -33,7 +33,7 @@ sandbox_setup_arguments(struct sandbox *sandbox)
size_t str_sz = strlen(arg) + 1;
array_ptr[i] = string_off;
// why get_memory_ptr_for_runtime??
/* why get_memory_ptr_for_runtime?? */
strncpy(get_memory_ptr_for_runtime(string_off, str_sz), arg, strlen(arg));
string_off += str_sz;
@ -47,14 +47,14 @@ sandbox_setup_arguments(struct sandbox *sandbox)
* @param length The size of the request_response_data that we want to parse
* @returns 0
*
**/
*/
int
sandbox_parse_http_request(struct sandbox *sandbox, size_t length)
{
assert(sandbox != NULL);
assert(length > 0);
// Why is our start address sandbox->request_response_data + sandbox->request_response_data_length?
// it's like a cursor to keep track of what we've read so far
/* Why is our start address sandbox->request_response_data + sandbox->request_response_data_length?
it's like a cursor to keep track of what we've read so far */
http_parser_execute(&sandbox->http_parser, http_parser_settings_get(),
sandbox->request_response_data + sandbox->request_response_data_length, length);
return 0;
@ -63,7 +63,7 @@ sandbox_parse_http_request(struct sandbox *sandbox, size_t length)
/**
* Receive and Parse the Request for the current sandbox
* @return 1 on success, 0 if no context, < 0 on failure.
**/
*/
static inline int
sandbox_receive_and_parse_client_request(struct sandbox *sandbox)
{
@ -105,7 +105,7 @@ sandbox_receive_and_parse_client_request(struct sandbox *sandbox)
/**
* Sends Response Back to Client
* @return RC. -1 on Failure
**/
*/
static inline int
sandbox_build_and_send_client_response(struct sandbox *sandbox)
{
@ -195,25 +195,28 @@ sandbox_open_http(struct sandbox *sandbox)
http_parser_init(&sandbox->http_parser, HTTP_REQUEST);
// Set the sandbox as the data the http-parser has access to
/* Set the sandbox as the data the http-parser has access to */
sandbox->http_parser.data = sandbox;
#ifdef USE_HTTP_UVIO
// Initialize libuv TCP stream
/* Initialize libuv TCP stream */
int r = uv_tcp_init(worker_thread_get_libuv_handle(), (uv_tcp_t *)&sandbox->client_libuv_stream);
assert(r == 0);
// Set the current sandbox as the data the libuv callbacks have access to
/* Set the current sandbox as the data the libuv callbacks have access to */
sandbox->client_libuv_stream.data = sandbox;
// Open the libuv TCP stream
/* Open the libuv TCP stream */
r = uv_tcp_open((uv_tcp_t *)&sandbox->client_libuv_stream, sandbox->client_socket_descriptor);
assert(r == 0);
#endif
}
// Initialize file descriptors 0, 1, and 2 as io handles 0, 1, 2
/**
* Initialize files descriptors 0, 1, and 2 as io handles 0, 1, 2
* @param sandbox - the sandbox on which we are initializing file descriptors
*/
static inline void
sandbox_initialize_io_handles_and_file_descriptors(struct sandbox *sandbox)
{
@ -228,8 +231,8 @@ sandbox_initialize_io_handles_and_file_descriptors(struct sandbox *sandbox)
/**
* Sandbox execution logic
* Handles setup, request parsing, WebAssembly initialization, function execution, response building and sending, and
*cleanup
**/
* cleanup
*/
void
current_sandbox_main(void)
{
@ -246,28 +249,28 @@ current_sandbox_main(void)
sandbox_open_http(sandbox);
// Parse the request. 1 = Success
/* Parse the request. 1 = Success */
int rc = sandbox_receive_and_parse_client_request(sandbox);
if (rc != 1) goto err;
// Initialize the module
/* Initialize the module */
struct module *current_module = sandbox_get_module(sandbox);
int argument_count = module_get_argument_count(current_module);
// alloc_linear_memory();
module_initialize_globals(current_module);
module_initialize_memory(current_module);
// Copy the arguments into the WebAssembly sandbox
/* Copy the arguments into the WebAssembly sandbox */
sandbox_setup_arguments(sandbox);
// Executing the function
/* Executing the function */
sandbox->return_value = module_main(current_module, argument_count, sandbox->arguments_offset);
// Retrieve the result, construct the HTTP response, and send to client
/* Retrieve the result, construct the HTTP response, and send to client */
sandbox_build_and_send_client_response(sandbox);
done:
// Cleanup connection and exit sandbox
/* Cleanup connection and exit sandbox */
sandbox_close_http(sandbox);
worker_thread_on_sandbox_exit(sandbox);
err:
@ -279,23 +282,23 @@ err:
* struct sandbox | Buffer for HTTP Req/Resp | 4GB of Wasm Linear Memory | Guard Page
* @param module the module that we want to run
* @returns the resulting sandbox or NULL if mmap failed
**/
*/
static inline struct sandbox *
sandbox_allocate_memory(struct module *module)
{
assert(module != NULL);
char * error_message = NULL;
unsigned long linear_memory_size = WASM_PAGE_SIZE * WASM_START_PAGES; // The initial pages
unsigned long linear_memory_size = WASM_PAGE_SIZE * WASM_START_PAGES; /* The initial pages */
uint64_t linear_memory_max_size = (uint64_t)SBOX_MAX_MEM;
struct sandbox *sandbox = NULL;
unsigned long sandbox_size = sizeof(struct sandbox) + module->max_request_or_response_size;
// Control information should be page-aligned
// TODO: Should I use round_up_to_page when setting sandbox_page?
/* Control information should be page-aligned
TODO: Should I use round_up_to_page when setting sandbox_page? */
assert(round_up_to_page(sandbox_size) == sandbox_size);
// At an address of the system's choosing, allocate the memory, marking it as inaccessible
/* At an address of the system's choosing, allocate the memory, marking it as inaccessible */
errno = 0;
void *addr = mmap(NULL, sandbox_size + linear_memory_max_size + /* guard page */ PAGE_SIZE, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
@ -305,7 +308,7 @@ sandbox_allocate_memory(struct module *module)
}
sandbox = (struct sandbox *)addr;
// Set the struct sandbox, HTTP Req/Resp buffer, and the initial Wasm Pages as read/write
/* Set the struct sandbox, HTTP Req/Resp buffer, and the initial Wasm Pages as read/write */
errno = 0;
void *addr_rw = mmap(addr, sandbox_size + linear_memory_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
@ -314,7 +317,7 @@ sandbox_allocate_memory(struct module *module)
goto set_rw_failed;
}
// Populate Sandbox members
/* Populate Sandbox members */
sandbox->linear_memory_start = (char *)addr + sandbox_size;
sandbox->linear_memory_size = linear_memory_size;
sandbox->linear_memory_max_size = linear_memory_max_size;
@ -366,37 +369,37 @@ sandbox_allocate(sandbox_request_t *sandbox_request)
int rc;
struct sandbox *sandbox = NULL;
// Allocate Sandbox control structures, buffers, and linear memory in a 4GB address space
/* Allocate Sandbox control structures, buffers, and linear memory in a 4GB address space */
errno = 0;
sandbox = (struct sandbox *)sandbox_allocate_memory(sandbox_request->module);
if (!sandbox) goto err_memory_allocation_failed;
// Set state to initializing
/* Set state to initializing */
sandbox->state = INITIALIZING;
// Allocate the Stack
/* Allocate the Stack */
rc = sandbox_allocate_stack(sandbox);
if (rc != 0) goto err_stack_allocation_failed;
// Copy the socket descriptor, address, and arguments of the client invocation
/* Copy the socket descriptor, address, and arguments of the client invocation */
sandbox->absolute_deadline = sandbox_request->absolute_deadline;
sandbox->arguments = (void *)sandbox_request->arguments;
sandbox->client_socket_descriptor = sandbox_request->socket_descriptor;
sandbox->start_time = sandbox_request->start_time;
// Initialize the sandbox's context, stack, and instruction pointer
/* Initialize the sandbox's context, stack, and instruction pointer */
arch_context_init(&sandbox->ctxt, (reg_t)current_sandbox_main,
(reg_t)(sandbox->stack_start + sandbox->stack_size));
// What does it mean if there isn't a socket_address? Shouldn't this be a hard requirement?
// It seems that only the socket descriptor is used to send response
/* TODO: What does it mean if there isn't a socket_address? Shouldn't this be a hard requirement?
It seems that only the socket descriptor is used to send response */
const struct sockaddr *socket_address = sandbox_request->socket_address;
if (socket_address) memcpy(&sandbox->client_address, socket_address, sizeof(struct sockaddr));
// Initialize file descriptors to -1
/* Initialize file descriptors to -1 */
for (int i = 0; i < SANDBOX_MAX_IO_HANDLE_COUNT; i++) sandbox->io_handles[i].file_descriptor = -1;
// Initialize Parsec control structures (used by Completion Queue)
/* Initialize Parsec control structures (used by Completion Queue) */
ps_list_init_d(sandbox);
done:
@ -412,7 +415,7 @@ err:
/**
* Free stack and heap resources.. also any I/O handles.
* @param sandbox
**/
*/
void
sandbox_free(struct sandbox *sandbox)
{
@ -429,7 +432,7 @@ sandbox_free(struct sandbox *sandbox)
size_t stksz = sandbox->stack_size;
// Free Sandbox Stack
/* Free Sandbox Stack */
errno = 0;
rc = munmap(stkaddr, stksz);
if (rc == -1) {
@ -438,9 +441,9 @@ sandbox_free(struct sandbox *sandbox)
};
// Free Sandbox Linear Address Space
// struct sandbox | HTTP Buffer | 4GB of Wasm Linear Memory | Guard Page
// sandbox_size includes the struct and HTTP buffer
/* Free Sandbox Linear Address Space
struct sandbox | HTTP Buffer | 4GB of Wasm Linear Memory | Guard Page
sandbox_size includes the struct and HTTP buffer */
size_t sandbox_address_space_size = sandbox->sandbox_size + sandbox->linear_memory_max_size
+ /* guard page */ PAGE_SIZE;
@ -455,7 +458,7 @@ done:
return;
err_free_sandbox_failed:
err_free_stack_failed:
// Inability to free memory is a fatal error
/* Inability to free memory is a fatal error */
perror(error_message);
exit(EXIT_FAILURE);
err:

@ -17,8 +17,8 @@ sandbox_completion_queue_is_empty()
/**
* Adds sandbox to the completion queue
* @param sandbox
**/
* @param sandbox to add to completion queue
*/
void
sandbox_completion_queue_add(struct sandbox *sandbox)
{

@ -1,17 +1,24 @@
#include <sandbox_request_scheduler.h>
// The global of our polymorphic interface
/* The global of our polymorphic interface */
static sandbox_request_scheduler_config_t sandbox_request_scheduler;
// Initializes a concrete implementation of the sandbox request scheduler interface
/**
* Initializes the polymorphic interface with a concrete implementation
* @param config
*/
void
sandbox_request_scheduler_initialize(sandbox_request_scheduler_config_t *config)
{
memcpy(&sandbox_request_scheduler, config, sizeof(sandbox_request_scheduler_config_t));
}
// Adds a sandbox request
/**
* Adds a sandbox request to the request scheduler
* @param sandbox_request
*/
sandbox_request_t *
sandbox_request_scheduler_add(sandbox_request_t *sandbox_request)
{
@ -19,7 +26,10 @@ sandbox_request_scheduler_add(sandbox_request_t *sandbox_request)
return sandbox_request_scheduler.add(sandbox_request);
}
// Removes a sandbox request
/**
* Removes a sandbox request according to the scheduling policy of the variant
* @returns pointer to a sandbox request
*/
sandbox_request_t *
sandbox_request_scheduler_remove()
{
@ -27,6 +37,10 @@ sandbox_request_scheduler_remove()
return sandbox_request_scheduler.remove();
}
/**
* Peeks at the priority of the highest priority sandbox request
* @returns highest priority
*/
uint64_t
sandbox_request_scheduler_peek()
{

@ -1,6 +1,5 @@
#include <sandbox_request_scheduler.h>
// Local State
static struct deque_sandbox *runtime_global_deque;
static pthread_mutex_t runtime_global_deque_mutex = PTHREAD_MUTEX_INITIALIZER;
@ -8,15 +7,15 @@ static pthread_mutex_t runtime_global_deque_mutex = PTHREAD_MUTEX_INITIALI
* Pushes a sandbox request to the global deque
* @param sandbox_request
* @returns pointer to request if added. NULL otherwise
**/
*/
static sandbox_request_t *
sandbox_request_scheduler_fifo_add(void *sandbox_request_raw)
{
sandbox_request_t *sandbox_request = (sandbox_request_t *)sandbox_request_raw;
int return_code = 1;
// TODO: Running the runtime and listener cores on a single shared core is untested
// We are unsure if the locking behavior is correct, so there may be deadlocks
/* TODO: Running the runtime and listener cores on a single shared core is untested
We are unsure if the locking behavior is correct, so there may be deadlocks */
#if NCORES == 1
pthread_mutex_lock(&runtime_global_deque_mutex);
#endif
@ -40,7 +39,7 @@ sandbox_request_scheduler_fifo_add(void *sandbox_request_raw)
* and add an assert in "steal" function for NCORES == 1.
*
* @returns A Sandbox Request or NULL
**/
*/
static sandbox_request_t *
sandbox_request_scheduler_fifo_remove(void)
{
@ -60,13 +59,13 @@ sandbox_request_scheduler_fifo_remove(void)
void
sandbox_request_scheduler_fifo_initialize()
{
// Allocate and Initialize the global deque
/* Allocate and Initialize the global deque */
runtime_global_deque = (struct deque_sandbox *)malloc(sizeof(struct deque_sandbox));
assert(runtime_global_deque);
// Note: Below is a Macro
/* Note: Below is a Macro */
deque_init_sandbox(runtime_global_deque, RUNTIME_MAX_SANDBOX_REQUEST_COUNT);
// Register Function Pointers for Abstract Scheduling API
/* Register Function Pointers for Abstract Scheduling API */
sandbox_request_scheduler_config_t config = { .add = sandbox_request_scheduler_fifo_add,
.remove = sandbox_request_scheduler_fifo_remove };

@ -1,14 +1,13 @@
#include <sandbox_request_scheduler.h>
#include "priority_queue.h"
// Local State
static struct priority_queue sandbox_request_scheduler_ps;
/**
* Pushes a sandbox request to the global deque
* @param sandbox_request
* @returns pointer to request if added. NULL otherwise
**/
*/
static sandbox_request_t *
sandbox_request_scheduler_ps_add(void *sandbox_request)
{
@ -25,7 +24,7 @@ sandbox_request_scheduler_ps_add(void *sandbox_request)
/**
*
* @returns A Sandbox Request or NULL
**/
*/
static sandbox_request_t *
sandbox_request_scheduler_ps_remove(void)
{
@ -35,7 +34,7 @@ sandbox_request_scheduler_ps_remove(void)
/**
*
* @returns A Sandbox Request or NULL
**/
*/
static uint64_t
sandbox_request_scheduler_ps_peek(void)
{
@ -51,15 +50,13 @@ sandbox_request_get_priority(void *element)
/**
*
**/
* Initializes the variant and registers against the polymorphic interface
*/
void
sandbox_request_scheduler_ps_initialize()
{
// Initialize local state
priority_queue_initialize(&sandbox_request_scheduler_ps, sandbox_request_get_priority);
// Register Function Pointers for Abstract Scheduling API
sandbox_request_scheduler_config_t config = { .add = sandbox_request_scheduler_ps_add,
.remove = sandbox_request_scheduler_ps_remove,
.peek = sandbox_request_scheduler_ps_peek };

@ -1,16 +1,19 @@
#include <sandbox_run_queue.h>
// The global of our polymorphic interface
static sandbox_run_queue_config_t sandbox_run_queue;
// Initializes a concrete implementation of the sandbox request scheduler interface
/* Initializes a concrete implementation of the sandbox request scheduler interface */
void
sandbox_run_queue_initialize(sandbox_run_queue_config_t *config)
{
memcpy(&sandbox_run_queue, config, sizeof(sandbox_run_queue_config_t));
}
// Adds a sandbox request
/**
* Adds a sandbox request to the run queue
* @param sandbox to add
* @returns sandbox that was added (or NULL?)
*/
struct sandbox *
sandbox_run_queue_add(struct sandbox *sandbox)
{
@ -18,6 +21,10 @@ sandbox_run_queue_add(struct sandbox *sandbox)
return sandbox_run_queue.add(sandbox);
}
/**
* Delete a sandbox from the run queue
* @param sandbox to delete
*/
void
sandbox_run_queue_delete(struct sandbox *sandbox)
{
@ -25,7 +32,10 @@ sandbox_run_queue_delete(struct sandbox *sandbox)
sandbox_run_queue.delete(sandbox);
}
/**
* Checks if run queue is empty
* @returns true if empty
*/
bool
sandbox_run_queue_is_empty()
{
@ -33,6 +43,10 @@ sandbox_run_queue_is_empty()
return sandbox_run_queue.is_empty();
}
/**
* Get next sandbox from run queue, where next is defined by
* @returns sandbox (or NULL?)
*/
struct sandbox *
sandbox_run_queue_get_next()
{
@ -40,6 +54,10 @@ sandbox_run_queue_get_next()
return sandbox_run_queue.get_next();
};
/**
* Preempt the current sandbox according to the scheduler variant
* @param context
*/
void
sandbox_run_queue_preempt(ucontext_t *context)
{

@ -10,7 +10,7 @@ sandbox_run_queue_fifo_is_empty()
return ps_list_head_empty(&sandbox_run_queue_fifo);
}
// Get the sandbox at the head of the thread local runqueue
/* Get the sandbox at the head of the thread local runqueue */
struct sandbox *
sandbox_run_queue_fifo_get_head()
{
@ -20,7 +20,7 @@ sandbox_run_queue_fifo_get_head()
/**
* Removes the thread from the thread-local runqueue
* @param sandbox sandbox
**/
*/
void
sandbox_run_queue_fifo_remove(struct sandbox *sandbox_to_remove)
{
@ -39,7 +39,7 @@ sandbox_run_queue_fifo_remove_and_return()
* Execute the sandbox at the head of the thread local runqueue
* If the runqueue is empty, pull a fresh batch of sandbox requests, instantiate them, and then execute the new head
* @return the sandbox to execute or NULL if none are available
**/
*/
struct sandbox *
sandbox_run_queue_fifo_get_next()
{
@ -54,7 +54,7 @@ sandbox_run_queue_fifo_get_next()
return sandbox;
}
// Execute Round Robin Scheduling Logic
/* Execute Round Robin Scheduling Logic */
struct sandbox *next_sandbox = sandbox_run_queue_fifo_remove_and_return();
assert(next_sandbox->state != RETURNED);
sandbox_run_queue_add(next_sandbox);
@ -64,7 +64,10 @@ sandbox_run_queue_fifo_get_next()
}
// Append a sandbox to the runqueue
/**
* Append a sandbox to the runqueue
* @returns the appended sandbox
*/
struct sandbox *
sandbox_run_queue_fifo_append(struct sandbox *sandbox_to_append)
{
@ -75,8 +78,9 @@ sandbox_run_queue_fifo_append(struct sandbox *sandbox_to_append)
return sandbox_to_append;
}
// Conditionally checks to see if current sandbox should be preempted
// FIFO doesn't preempt, so just return.
/**
* Conditionally checks to see if current sandbox should be preempted FIFO doesn't preempt, so just return.
*/
void
sandbox_run_queue_fifo_preempt(ucontext_t *user_context)
{
@ -89,7 +93,7 @@ sandbox_run_queue_fifo_initialize()
{
ps_list_head_init(&sandbox_run_queue_fifo);
// Register Function Pointers for Abstract Scheduling API
/* Register Function Pointers for Abstract Scheduling API */
sandbox_run_queue_config_t config = { .add = sandbox_run_queue_fifo_append,
.is_empty = sandbox_run_queue_fifo_is_empty,
.delete = sandbox_run_queue_fifo_remove,

@ -6,9 +6,12 @@
#include <stdint.h>
// Local State
__thread static struct priority_queue sandbox_run_queue_ps;
/**
* Checks if the run queue is empty
* @returns true if empty. false otherwise
*/
bool
sandbox_run_queue_ps_is_empty()
{
@ -18,10 +21,10 @@ sandbox_run_queue_ps_is_empty()
}
/**
* Pushes a sandbox to the runqueue
* Adds a sandbox to the run queue
* @param sandbox
* @returns pointer to request if added. NULL otherwise
**/
*/
static struct sandbox *
sandbox_run_queue_ps_add(struct sandbox *sandbox)
{
@ -37,24 +40,24 @@ sandbox_run_queue_ps_add(struct sandbox *sandbox)
assert(final_length == original_length + 1);
// printf("Added Sandbox to runqueue\n");
assert(return_code == 0);
return sandbox;
}
/**
*
* @returns A Sandbox or NULL
**/
* Removes the highest priority sandbox from the run queue
* @returns A Sandbox or NULL if empty
*/
static struct sandbox *
sandbox_run_queue_ps_remove(void)
sandbox_run_queue_ps_remove()
{
return (struct sandbox *)priority_queue_dequeue(&sandbox_run_queue_ps, "Runqueue");
}
/**
* @returns A Sandbox or NULL
**/
* Deletes a sandbox from the runqueue
* @param sandbox to delete
*/
static void
sandbox_run_queue_ps_delete(struct sandbox *sandbox)
{
@ -70,19 +73,16 @@ sandbox_run_queue_ps_delete(struct sandbox *sandbox)
* Execute the sandbox at the head of the thread local runqueue
* If the runqueue is empty, pull a fresh batch of sandbox requests, instantiate them, and then execute the new head
* @return the sandbox to execute or NULL if none are available
**/
*/
struct sandbox *
sandbox_run_queue_ps_get_next()
{
if (sandbox_run_queue_is_empty()) {
// Try to pull a sandbox request and return NULL if we're unable to get one
/* Try to pull a sandbox request and return NULL if we're unable to get one */
sandbox_request_t *sandbox_request;
if ((sandbox_request = sandbox_request_scheduler_remove()) == NULL) {
// printf("Global Request Queue was empty!\n");
return NULL;
};
if ((sandbox_request = sandbox_request_scheduler_remove()) == NULL) { return NULL; };
// Otherwise, allocate the sandbox request as a runnable sandbox and place on the runqueue
/* Otherwise, allocate the sandbox request as a runnable sandbox and place on the runqueue */
struct sandbox *sandbox = sandbox_allocate(sandbox_request);
if (sandbox == NULL) return NULL;
assert(sandbox);
@ -92,27 +92,29 @@ sandbox_run_queue_ps_get_next()
return sandbox;
}
// Resume the sandbox at the top of the runqueue
/* Resume the sandbox at the top of the runqueue */
struct sandbox *sandbox = sandbox_run_queue_ps_remove();
sandbox_run_queue_ps_add(sandbox);
return sandbox;
}
// Conditionally checks to see if current sandbox should be preempted
/**
* Conditionally checks to see if current sandbox should be preempted
*/
void
sandbox_run_queue_ps_preempt(ucontext_t *user_context)
{
software_interrupt_disable(); // no nesting!
software_interrupt_disable(); /* no nesting! */
struct sandbox *current_sandbox = current_sandbox_get();
// If current_sandbox is null, there's nothing to preempt, so let the "main" scheduler run its course.
/* If current_sandbox is null, there's nothing to preempt, so let the "main" scheduler run its course. */
if (current_sandbox == NULL) {
software_interrupt_enable();
return;
};
// The current sandbox should be the head of the runqueue
/* The current sandbox should be the head of the runqueue */
assert(sandbox_run_queue_ps_is_empty() == false);
// TODO: Factor quantum and/or sandbox allocation time into decision
@ -123,35 +125,35 @@ sandbox_run_queue_ps_preempt(ucontext_t *user_context)
uint64_t local_deadline = priority_queue_peek(&sandbox_run_queue_ps);
uint64_t global_deadline = sandbox_request_scheduler_peek();
// Our local deadline should only be ULONG_MAX if our local runqueue is empty
/* Our local deadline should only be ULONG_MAX if our local runqueue is empty */
if (local_deadline == ULONG_MAX) { assert(sandbox_run_queue_ps.first_free == 1); };
// If we're able to get a sandbox request with a tighter deadline, preempt the current context and run it
/* If we're able to get a sandbox request with a tighter deadline, preempt the current context and run it */
sandbox_request_t *sandbox_request;
if (global_deadline < local_deadline && (sandbox_request = sandbox_request_scheduler_remove()) != NULL) {
printf("Thread %lu Preempted %lu for %lu\n", pthread_self(), local_deadline,
sandbox_request->absolute_deadline);
// Allocate the request
/* Allocate the request */
struct sandbox *next_sandbox = sandbox_allocate(sandbox_request);
assert(next_sandbox);
free(sandbox_request);
next_sandbox->state = RUNNABLE;
// Add it to the runqueue
/* Add it to the runqueue */
printf("adding new sandbox to runqueue\n");
sandbox_run_queue_add(next_sandbox);
debuglog("[%p: %s]\n", sandbox, sandbox->module->name);
// Save the context of the currently executing sandbox before switching from it
/* Save the context of the currently executing sandbox before switching from it */
arch_mcontext_save(&current_sandbox->ctxt, &user_context->uc_mcontext);
// Update current_sandbox to the next sandbox
/* Update current_sandbox to the next sandbox */
current_sandbox_set(next_sandbox);
// And load the context of this new sandbox
// RC of 1 indicates that sandbox was last in a user-level context switch state,
// so do not enable software interrupts.
/* And load the context of this new sandbox
RC of 1 indicates that sandbox was last in a user-level context switch state,
so do not enable software interrupts. */
if (arch_mcontext_restore(&user_context->uc_mcontext, &next_sandbox->ctxt) == 1)
should_enable_software_interrupt = false;
}
@ -166,13 +168,16 @@ sandbox_get_priority(void *element)
return sandbox->absolute_deadline;
};
/**
* Registers the PS variant with the polymorphic interface
**/
void
sandbox_run_queue_ps_initialize()
{
// Initialize local state
/* Initialize local state */
priority_queue_initialize(&sandbox_run_queue_ps, sandbox_get_priority);
// Register Function Pointers for Abstract Scheduling API
/* Register Function Pointers for Abstract Scheduling API */
sandbox_run_queue_config_t config = { .add = sandbox_run_queue_ps_add,
.is_empty = sandbox_run_queue_ps_is_empty,
.delete = sandbox_run_queue_ps_delete,

@ -15,16 +15,16 @@
#include <current_sandbox.h>
#include "sandbox_run_queue.h"
/***************************************
* Process Globals
***************************************/
/*******************
* Process Globals *
******************/
static const int software_interrupt_supported_signals[] = { SIGALRM, SIGUSR1 };
uint64_t SOFTWARE_INTERRUPT_INTERVAL_DURATION_IN_CYCLES;
/***************************************
* Thread Globals
***************************************/
/******************
* Thread Globals *
*****************/
__thread static volatile sig_atomic_t software_interrupt_SIGALRM_count = 0;
__thread static volatile sig_atomic_t software_interrupt_SIGUSR_count = 0;
@ -32,13 +32,13 @@ __thread volatile sig_atomic_t software_interrupt_is_disabled = 0;
/***************************************
* Externs
***************************************/
**************************************/
extern pthread_t runtime_worker_threads[];
/***************************************
* Private Static Inlines
***************************************/
/**************************
* Private Static Inlines *
*************************/
static inline void software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void *user_context_raw);
@ -49,7 +49,7 @@ static inline void software_interrupt_handle_signals(int signal_type, siginfo_t
* @param signal_type
* @param signal_info data structure containing signal info
* @param user_context_raw void* to a user_context struct
**/
*/
static inline void
software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void *user_context_raw)
{
@ -61,37 +61,36 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
switch (signal_type) {
case SIGALRM: {
// SIGALRM is the preemption signal that occurs every quantum of execution
/* SIGALRM is the preemption signal that occurs every quantum of execution */
// A POSIX signal is delivered to one of the threads in our process. If sent by the kernel, "broadcast"
// by forwarding to all all threads
/* A POSIX signal is delivered to one of the threads in our process.If sent by the kernel, "broadcast"
* by forwarding to all all threads */
if (signal_info->si_code == SI_KERNEL) {
for (int i = 0; i < WORKER_THREAD_CORE_COUNT; i++) {
if (pthread_self() == runtime_worker_threads[i]) continue;
pthread_kill(runtime_worker_threads[i], SIGALRM);
}
} else {
// If not sent by the kernel, this should be a signal forwarded from another thread
/* If not sent by the kernel, this should be a signal forwarded from another thread */
assert(signal_info->si_code == SI_TKILL);
}
// debuglog("alrm:%d\n", software_interrupt_SIGALRM_count);
software_interrupt_SIGALRM_count++;
// if the current sandbox is NULL or not in a RETURNED state
/* if the current sandbox is NULL or not in a RETURNED state */
if (current_sandbox && current_sandbox->state == RETURNED) return;
// and the next context is NULL
/* and the next context is NULL */
if (worker_thread_next_context) return;
// and software interrupts are not disabled
/* and software interrupts are not disabled */
if (!software_interrupt_is_enabled()) return;
// Preempt
/* Preempt */
sandbox_run_queue_preempt(user_context);
return;
}
case SIGUSR1: {
// SIGUSR1 restores the preempted sandbox stored in worker_thread_next_context
// make sure sigalrm doesn't mess this up if nested..
case SIGUSR1: { /* SIGUSR1 restores the preempted sandbox stored in worker_thread_next_context. */
/* Make sure *sigalrm doesn't mess this up if nested.. */
assert(!software_interrupt_is_enabled());
/* we set current before calling pthread_kill! */
assert(worker_thread_next_context && (&current_sandbox->ctxt == worker_thread_next_context));
@ -99,10 +98,10 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
// debuglog("usr1:%d\n", software_interrupt_SIGUSR_count);
software_interrupt_SIGUSR_count++;
// do not save current sandbox.. it is in co-operative switch..
// pick the next from "worker_thread_next_context"..
// assert its "sp" to be zero in regs..
// memcpy from next context..
/* do not save current sandbox.. it is in co-operative switch..
pick the next from "worker_thread_next_context"..
assert its "sp" to be zero in regs..
memcpy from next context.. */
arch_mcontext_restore(&user_context->uc_mcontext, &current_sandbox->ctxt);
worker_thread_next_context = NULL;
software_interrupt_enable();
@ -114,13 +113,13 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
#endif
}
/***************************************
* Public Functions
***************************************/
/********************
* Public Functions *
*******************/
/**
* Arms the Interval Timer to start in 10ms and then trigger a SIGALRM every 5ms
**/
*/
void
software_interrupt_arm_timer(void)
{
@ -141,7 +140,7 @@ software_interrupt_arm_timer(void)
/**
* Disarm the Interval Timer
**/
*/
void
software_interrupt_disarm_timer(void)
{
@ -162,7 +161,7 @@ software_interrupt_disarm_timer(void)
/**
* Initialize software Interrupts
* Register sonftint_handler to execute on SIGALRM and SIGUSR1
**/
*/
void
software_interrupt_initialize(void)
{

@ -1,15 +1,15 @@
// Something is not idempotent with this or some other include.
// If placed in Local Includes, error is triggered that memset was implicitly declared
/* Something is not idempotent with this or some other include.
If placed in Local Includes, error is triggered that memset was implicitly declared */
#include <runtime.h>
/***************************
* External Includes *
**************************/
#include <pthread.h> // POSIX Threads
#include <signal.h> // POSIX Signals
#include <sched.h> // Wasmception. Included as submodule
#include <sys/mman.h> // Wasmception. Included as submodule
#include <uv.h> // Libuv
#include <pthread.h>
#include <signal.h>
#include <sched.h> /* Wasmception. Included as submodule */
#include <sys/mman.h> /* Wasmception. Included as submodule */
#include <uv.h>
/***************************
* Local Includes *
@ -27,21 +27,21 @@
* Worker Thread State *
**************************/
// context pointer used to store and restore a preempted sandbox. SIGUSR1
/* context pointer used to store and restore a preempted sandbox. SIGUSR1 */
__thread arch_context_t *worker_thread_next_context = NULL;
// context of the runtime thread before running sandboxes or to resume its "main".
/* context of the runtime thread before running sandboxes or to resume its "main". */
__thread arch_context_t worker_thread_base_context;
// libuv i/o loop handle per sandboxing thread!
/* libuv i/o loop handle per sandboxing thread! */
__thread uv_loop_t worker_thread_uvio_handle;
// Flag to signify if the thread is currently running callbacks in the libuv event loop
/* Flag to signify if the thread is currently running callbacks in the libuv event loop */
static __thread bool worker_thread_is_in_callback;
/**************************************************
* Worker Thread Logic
*************************************************/
/***********************
* Worker Thread Logic *
**********************/
/**
* @brief Switches to the next sandbox, placing the current sandbox on the completion queue if in RETURNED state
@ -55,22 +55,22 @@ worker_thread_switch_to_sandbox(struct sandbox *next_sandbox)
software_interrupt_disable();
// Get the old sandbox we're switching from
/* Get the old sandbox we're switching from */
struct sandbox *previous_sandbox = current_sandbox_get();
arch_context_t *previous_register_context = previous_sandbox == NULL ? NULL : &previous_sandbox->ctxt;
// Set the current sandbox to the next
/* Set the current sandbox to the next */
current_sandbox_set(next_sandbox);
// and switch to the associated context.
// Save the context pointer to worker_thread_next_context in case of preemption
/* ...and switch to the associated context.
Save the context pointer to worker_thread_next_context in case of preemption */
worker_thread_next_context = next_register_context;
arch_context_switch(previous_register_context, next_register_context);
assert(previous_sandbox == NULL || previous_sandbox->state == RUNNABLE || previous_sandbox->state == BLOCKED
|| previous_sandbox->state == RETURNED);
// If the current sandbox we're switching from is in a RETURNED state, add to completion queue
/* If the current sandbox we're switching from is in a RETURNED state, add to completion queue */
if (previous_sandbox != NULL && previous_sandbox->state == RETURNED) {
sandbox_completion_queue_add(previous_sandbox);
} else if (previous_sandbox != NULL) {
@ -83,7 +83,7 @@ worker_thread_switch_to_sandbox(struct sandbox *next_sandbox)
/**
* Mark a blocked sandbox as runnable and add it to the runqueue
* @param sandbox the sandbox to check and update if blocked
**/
*/
void
worker_thread_wakeup_sandbox(sandbox_t *sandbox)
{
@ -100,20 +100,20 @@ worker_thread_wakeup_sandbox(sandbox_t *sandbox)
/**
* Mark the currently executing sandbox as blocked, remove it from the local runqueue, and pull the sandbox at the head
*of the runqueue
**/
* of the runqueue
*/
void
worker_thread_block_current_sandbox(void)
{
assert(worker_thread_is_in_callback == false);
software_interrupt_disable();
// Remove the sandbox we were just executing from the runqueue and mark as blocked
/* Remove the sandbox we were just executing from the runqueue and mark as blocked */
struct sandbox *previous_sandbox = current_sandbox_get();
sandbox_run_queue_delete(previous_sandbox);
previous_sandbox->state = BLOCKED;
// Switch to the next sandbox
/* Switch to the next sandbox */
struct sandbox *next_sandbox = sandbox_run_queue_get_next();
debuglog("[%p: %next_sandbox, %p: %next_sandbox]\n", previous_sandbox, previous_sandbox->module->name,
next_sandbox, next_sandbox ? next_sandbox->module->name : "");
@ -124,41 +124,41 @@ worker_thread_block_current_sandbox(void)
/**
* Execute I/O
**/
*/
void
worker_thread_process_io(void)
{
#ifdef USE_HTTP_UVIO
#ifdef USE_HTTP_SYNC
// realistically, we're processing all async I/O on this core when a sandbox blocks on http processing, not
// great! if there is a way (TODO), perhaps RUN_ONCE and check if your I/O is processed, if yes, return else do
// async block!
/* realistically, we're processing all async I/O on this core when a sandbox blocks on http processing, not
* great! if there is a way (TODO), perhaps RUN_ONCE and check if your I/O is processed, if yes, return else do
* async block! */
uv_run(worker_thread_get_libuv_handle(), UV_RUN_DEFAULT);
#else /* USE_HTTP_SYNC */
worker_thread_block_current_sandbox();
#endif /* USE_HTTP_UVIO */
#else
assert(false);
// it should not be called if not using uvio for http
/* it should not be called if not using uvio for http */
#endif
}
/**
* Sends the current thread a SIGUSR1, causing a preempted sandbox to be restored
* Invoked by asm during a context switch
**/
*/
void __attribute__((noinline)) __attribute__((noreturn)) worker_thread_sandbox_switch_preempt(void)
{
pthread_kill(pthread_self(), SIGUSR1);
assert(false); // should not get here..
assert(false); /* should not get here.. */
while (true)
;
}
/**
* Run all outstanding events in the local thread's libuv event loop
**/
*/
void
worker_thread_execute_libuv_event_loop(void)
{
@ -175,11 +175,11 @@ worker_thread_execute_libuv_event_loop(void)
* The entry function for sandbox worker threads
* Initializes thread-local state, unmasks signals, sets up libuv loop and
* @param return_code - argument provided by pthread API. We set to -1 on error
**/
*/
void *
worker_thread_main(void *return_code)
{
// Initialize Worker Infrastructure
/* Initialize Worker Infrastructure */
arch_context_init(&worker_thread_base_context, 0, 0);
// sandbox_run_queue_fifo_initialize();
sandbox_run_queue_ps_initialize();
@ -193,11 +193,11 @@ worker_thread_main(void *return_code)
uv_loop_init(&worker_thread_uvio_handle);
worker_thread_is_in_callback = false;
// Begin Worker Execution Loop
/* Begin Worker Execution Loop */
struct sandbox *next_sandbox;
while (true) {
assert(current_sandbox_get() == NULL);
// If "in a callback", the libuv event loop is triggering this, so we don't need to start it
/* If "in a callback", the libuv event loop is triggering this, so we don't need to start it */
if (!worker_thread_is_in_callback) worker_thread_execute_libuv_event_loop();
software_interrupt_disable();
@ -218,20 +218,20 @@ worker_thread_main(void *return_code)
* Removes the standbox from the thread-local runqueue, sets its state to RETURNED,
* releases the linear memory, and then switches to the sandbox at the head of the runqueue
* TODO: Consider moving this to a future current_sandbox file. This has thus far proven difficult to move
**/
*/
void
worker_thread_on_sandbox_exit(sandbox_t *exiting_sandbox)
{
assert(exiting_sandbox);
// TODO: I do not understand when software interrupts must be disabled?
/* TODO: I do not understand when software interrupts must be disabled? */
software_interrupt_disable();
sandbox_run_queue_delete(exiting_sandbox);
exiting_sandbox->state = RETURNED;
software_interrupt_enable();
// Because the stack is still in use, only unmap linear memory and defer free resources until "main
// function execution"
/* Because the stack is still in use, only unmap linear memory and defer free resources until "main
function execution" */
int rc = munmap(exiting_sandbox->linear_memory_start, SBOX_MAX_MEM + PAGE_SIZE);
if (rc == -1) {
perror("worker_thread_on_sandbox_exit - munmap failed");
@ -240,6 +240,6 @@ worker_thread_on_sandbox_exit(sandbox_t *exiting_sandbox)
sandbox_completion_queue_add(exiting_sandbox);
// This should force return to main event loop
/* This should force return to main event loop */
worker_thread_switch_to_sandbox(NULL);
}

Loading…
Cancel
Save