From 2311ec1ccf63b08afc0cdbef5f24a455336a924b Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Wed, 8 Dec 2021 22:34:14 +0000 Subject: [PATCH 01/15] feat: wasm_memory pool --- runtime/include/module.h | 53 +++++++++++-- runtime/include/pool.h | 119 ++++++++++++++++++++++++++++ runtime/include/sandbox_functions.h | 4 +- runtime/include/wasm_memory.h | 18 ++++- runtime/src/module.c | 10 +-- runtime/src/sandbox.c | 12 +-- 6 files changed, 190 insertions(+), 26 deletions(-) create mode 100644 runtime/include/pool.h diff --git a/runtime/include/module.h b/runtime/include/module.h index 4b92c24..82df353 100644 --- a/runtime/include/module.h +++ b/runtime/include/module.h @@ -10,6 +10,7 @@ #include "awsm_abi.h" #include "http.h" #include "panic.h" +#include "pool.h" #include "types.h" #define MODULE_DEFAULT_REQUEST_RESPONSE_SIZE (PAGE_SIZE) @@ -17,6 +18,8 @@ #define MODULE_MAX_NAME_LENGTH 32 #define MODULE_MAX_PATH_LENGTH 256 +extern thread_local int worker_thread_idx; + /* * Defines the listen backlog, the queue length for completely established socketeds waiting to be accepted * If this value is greater than the value in /proc/sys/net/core/somaxconn (typically 128), then it is silently @@ -32,12 +35,18 @@ "MODULE_MAX_PENDING_CLIENT_REQUESTS likely exceeds the value in /proc/sys/net/core/somaxconn and thus may be silently truncated"; #endif +/* TODO: Dynamically size based on number of threads */ +#define MAX_WORKER_THREADS 64 + +struct module_pools { + struct pool memory[MAX_WORKER_THREADS]; +}; + struct module { /* Metadata from JSON Config */ char name[MODULE_MAX_NAME_LENGTH]; char path[MODULE_MAX_PATH_LENGTH]; uint32_t stack_size; /* a specification? */ - uint64_t max_memory; /* perhaps a specification of the module. (max 4GB) */ uint32_t relative_deadline_us; int port; struct admissions_info admissions_info; @@ -55,6 +64,8 @@ struct module { _Atomic uint32_t reference_count; /* ref count how many instances exist here. */ struct wasm_table *indirect_table; + + struct module_pools pools; }; /************************* @@ -141,12 +152,42 @@ module_release(struct module *module) return; } +static inline struct wasm_memory * +module_allocate_linear_memory(struct module *module) +{ + assert(module != NULL); + + char *error_message = NULL; + + size_t initial = (size_t)module->abi.starting_pages * WASM_PAGE_SIZE; + size_t max = (size_t)module->abi.max_pages * WASM_PAGE_SIZE; + + assert(initial <= (size_t)UINT32_MAX + 1); + assert(max <= (size_t)UINT32_MAX + 1); + + struct wasm_memory *linear_memory = (struct wasm_memory *)pool_remove_nolock( + &module->pools.memory[worker_thread_idx]); + if (linear_memory == NULL) { + linear_memory = wasm_memory_allocate(initial, max); + if (unlikely(linear_memory == NULL)) return NULL; + } + + return linear_memory; +} + +static inline void +module_free_linear_memory(struct module *module, struct wasm_memory *memory) +{ + wasm_memory_wipe(memory); + wasm_memory_reinit(memory, module->abi.starting_pages * WASM_PAGE_SIZE); + pool_add_nolock(&module->pools.memory[worker_thread_idx], memory); +} + /******************************** * Public Methods from module.c * *******************************/ -void module_free(struct module *module); -struct module * - module_new(char *mod_name, char *mod_path, uint32_t stack_sz, uint32_t max_heap, uint32_t relative_deadline_us, - int port, int req_sz, int resp_sz, int admissions_percentile, uint32_t expected_execution_us); -int module_new_from_json(char *filename); +void module_free(struct module *module); +struct module *module_new(char *mod_name, char *mod_path, uint32_t stack_sz, uint32_t relative_deadline_us, int port, + int req_sz, int resp_sz, int admissions_percentile, uint32_t expected_execution_us); +int module_new_from_json(char *filename); diff --git a/runtime/include/pool.h b/runtime/include/pool.h new file mode 100644 index 0000000..c529037 --- /dev/null +++ b/runtime/include/pool.h @@ -0,0 +1,119 @@ +#pragma once + +#include +#include +#include + +#include "generic_thread.h" +#include "lock.h" +#include "ps_list.h" +#include "wasm_memory.h" + +struct pool { + bool use_lock; + lock_t lock; + struct ps_list_head list; +}; + +static inline bool +pool_is_empty(struct pool *self) +{ + assert(self != NULL); + + return ps_list_head_empty(&self->list); +} + +static inline void +pool_init(struct pool *self, bool use_lock) +{ + ps_list_head_init(&self->list); + self->use_lock = use_lock; + if (use_lock) LOCK_INIT(&self->lock); +} + +static inline void +pool_deinit(struct pool *self) +{ + if (pool_is_empty(self)) return; + + struct wasm_memory *iterator = NULL; + struct wasm_memory *buffer = NULL; + + ps_list_foreach_del_d(&self->list, iterator, buffer) + { + ps_list_rem_d(iterator); + wasm_memory_free(iterator); + } +} + +static inline struct wasm_memory * +pool_remove_nolock(struct pool *self) +{ + assert(self != NULL); + assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); + + struct wasm_memory *obj = NULL; + + if (pool_is_empty(self)) return obj; + + obj = ps_list_head_first_d(&self->list, struct wasm_memory); + assert(obj); + ps_list_rem_d(obj); + + return obj; +} + +static inline struct wasm_memory * +pool_remove(struct pool *self) +{ + assert(self != NULL); + assert(self->use_lock); + + struct wasm_memory *obj = NULL; + + if (pool_is_empty(self)) return obj; + + LOCK_LOCK(&self->lock); + if (pool_is_empty(self)) { + LOCK_UNLOCK(&self->lock); + return obj; + } + + obj = ps_list_head_first_d(&self->list, struct wasm_memory); + assert(obj); + ps_list_rem_d(obj); + LOCK_UNLOCK(&self->lock); + return obj; +} + +static inline int +pool_add_nolock(struct pool *self, struct wasm_memory *obj) +{ + assert(self != NULL); + assert(obj != NULL); + assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); + + ps_list_head_add_d(&self->list, obj); + return 0; +} + +static inline int +pool_add(struct pool *self, struct wasm_memory *obj) +{ + assert(self != NULL); + assert(obj != NULL); + assert(self->use_lock); + + LOCK_LOCK(&self->lock); + ps_list_head_add_d(&self->list, obj); + LOCK_UNLOCK(&self->lock); + return 0; +} + +static inline void +pool_free(struct pool *self) +{ + while (!pool_is_empty(self)) free(pool_remove(self)); + + free(self); +} diff --git a/runtime/include/sandbox_functions.h b/runtime/include/sandbox_functions.h index 6b1900d..2f8fac9 100644 --- a/runtime/include/sandbox_functions.h +++ b/runtime/include/sandbox_functions.h @@ -36,7 +36,9 @@ sandbox_close_http(struct sandbox *sandbox) static inline void sandbox_free_linear_memory(struct sandbox *sandbox) { - wasm_memory_free(sandbox->memory); + assert(sandbox != NULL); + assert(sandbox->memory != NULL); + module_free_linear_memory(sandbox->module, sandbox->memory); sandbox->memory = NULL; } diff --git a/runtime/include/wasm_memory.h b/runtime/include/wasm_memory.h index a142491..d092d2b 100644 --- a/runtime/include/wasm_memory.h +++ b/runtime/include/wasm_memory.h @@ -7,16 +7,18 @@ #include #include +#include "ps_list.h" #include "types.h" /* PAGE_SIZE */ #include "wasm_types.h" #define WASM_MEMORY_MAX (size_t) UINT32_MAX + 1 struct wasm_memory { - size_t size; /* Initial Size in bytes */ - size_t capacity; /* Size backed by actual pages */ - size_t max; /* Soft cap in bytes. Defaults to 4GB */ - uint8_t data[]; + struct ps_list list; /* Linked List Node used for object pool */ + size_t size; /* Initial Size in bytes */ + size_t capacity; /* Size backed by actual pages */ + size_t max; /* Soft cap in bytes. Defaults to 4GB */ + uint8_t data[]; }; static inline struct wasm_memory * @@ -48,6 +50,7 @@ wasm_memory_allocate(size_t initial, size_t max) return NULL; } + ps_list_init_d(self); self->size = initial; self->capacity = initial; self->max = max; @@ -67,6 +70,13 @@ wasm_memory_wipe(struct wasm_memory *self) memset(self->data, 0, self->size); } +static inline void +wasm_memory_reinit(struct wasm_memory *self, size_t initial) +{ + wasm_memory_wipe(self); + self->size = initial; +} + static inline int wasm_memory_expand(struct wasm_memory *self, size_t size_to_expand) { diff --git a/runtime/src/module.c b/runtime/src/module.c index f4fe23a..ebcd028 100644 --- a/runtime/src/module.c +++ b/runtime/src/module.c @@ -135,7 +135,6 @@ module_free(struct module *module) * @param name * @param path * @param stack_size - * @param max_memory * @param relative_deadline_us * @param port * @param request_size @@ -143,8 +142,8 @@ module_free(struct module *module) */ struct module * -module_new(char *name, char *path, uint32_t stack_size, uint32_t max_memory, uint32_t relative_deadline_us, int port, - int request_size, int response_size, int admissions_percentile, uint32_t expected_execution_us) +module_new(char *name, char *path, uint32_t stack_size, uint32_t relative_deadline_us, int port, int request_size, + int response_size, int admissions_percentile, uint32_t expected_execution_us) { int rc = 0; @@ -165,7 +164,6 @@ module_new(char *name, char *path, uint32_t stack_size, uint32_t max_memory, uin module->stack_size = ((uint32_t)(round_up_to_page(stack_size == 0 ? WASM_STACK_SIZE : stack_size))); debuglog("Stack Size: %u", module->stack_size); - module->max_memory = max_memory == 0 ? ((uint64_t)WASM_PAGE_SIZE * WASM_MEMORY_PAGES_MAX) : max_memory; module->socket_descriptor = -1; module->port = port; @@ -212,6 +210,8 @@ module_new(char *name, char *path, uint32_t stack_size, uint32_t max_memory, uin module_initialize_table(module); current_wasm_module_instance.table = NULL; + for (int i = 0; i < MAX_WORKER_THREADS; i++) { pool_init(&module->pools.memory[i], false); } + /* Start listening for requests */ rc = module_listen(module); if (rc < 0) goto err_listen; @@ -421,7 +421,7 @@ module_new_from_json(char *file_name) #endif /* Allocate a module based on the values from the JSON */ - struct module *module = module_new(module_name, module_path, 0, 0, relative_deadline_us, port, + struct module *module = module_new(module_name, module_path, 0, relative_deadline_us, port, request_size, response_size, admissions_percentile, expected_execution_us); if (module == NULL) goto module_new_err; diff --git a/runtime/src/sandbox.c b/runtime/src/sandbox.c index c160a42..c46cefa 100644 --- a/runtime/src/sandbox.c +++ b/runtime/src/sandbox.c @@ -5,6 +5,7 @@ #include "current_sandbox.h" #include "debuglog.h" #include "panic.h" +#include "pool.h" #include "runtime.h" #include "sandbox_functions.h" #include "sandbox_set_as_error.h" @@ -34,16 +35,7 @@ static inline int sandbox_allocate_linear_memory(struct sandbox *sandbox) { assert(sandbox != NULL); - - char *error_message = NULL; - - size_t initial = (size_t)sandbox->module->abi.starting_pages * WASM_PAGE_SIZE; - size_t max = (size_t)sandbox->module->abi.max_pages * WASM_PAGE_SIZE; - - assert(initial <= (size_t)UINT32_MAX + 1); - assert(max <= (size_t)UINT32_MAX + 1); - - sandbox->memory = wasm_memory_allocate(initial, max); + sandbox->memory = module_allocate_linear_memory(sandbox->module); if (unlikely(sandbox->memory == NULL)) return -1; return 0; From b3dc7db31c467684a64b8bd0f84cf2cf8f2f321a Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Thu, 9 Dec 2021 02:51:41 +0000 Subject: [PATCH 02/15] feat: wasm_stack object pool --- .vscode/settings.json | 1 + runtime/include/module.h | 38 +++++- runtime/include/pool.h | 199 +++++++++++++++----------------- runtime/include/sandbox_types.h | 2 +- runtime/include/wasm_memory.h | 8 +- runtime/include/wasm_stack.h | 102 ++++++++++++---- runtime/src/module.c | 5 +- runtime/src/sandbox.c | 11 +- 8 files changed, 217 insertions(+), 149 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 52765fc..fc29845 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -102,6 +102,7 @@ "software_interrupt_counts.h": "c", "sandbox_set_as_running_sys.h": "c", "wasm_module_instance.h": "c", + "wasm_stack.h": "c" }, "files.exclude": { "**/.git": true, diff --git a/runtime/include/module.h b/runtime/include/module.h index 82df353..20b1915 100644 --- a/runtime/include/module.h +++ b/runtime/include/module.h @@ -12,6 +12,8 @@ #include "panic.h" #include "pool.h" #include "types.h" +#include "wasm_stack.h" +#include "wasm_memory.h" #define MODULE_DEFAULT_REQUEST_RESPONSE_SIZE (PAGE_SIZE) @@ -20,6 +22,9 @@ extern thread_local int worker_thread_idx; +INIT_POOL(wasm_memory, wasm_memory_delete) +INIT_POOL(wasm_stack, wasm_stack_delete) + /* * Defines the listen backlog, the queue length for completely established socketeds waiting to be accepted * If this value is greater than the value in /proc/sys/net/core/somaxconn (typically 128), then it is silently @@ -39,7 +44,8 @@ extern thread_local int worker_thread_idx; #define MAX_WORKER_THREADS 64 struct module_pools { - struct pool memory[MAX_WORKER_THREADS]; + struct wasm_memory_pool memory[MAX_WORKER_THREADS]; + struct wasm_stack_pool stack[MAX_WORKER_THREADS]; }; struct module { @@ -152,12 +158,33 @@ module_release(struct module *module) return; } +static inline struct wasm_stack * +module_allocate_stack(struct module *self) +{ + assert(self != NULL); + + struct wasm_stack *stack = wasm_stack_pool_remove_nolock(&self->pools.stack[worker_thread_idx]); + + if (stack == NULL) { + stack = wasm_stack_new(self->stack_size); + if (unlikely(stack == NULL)) return NULL; + } + + return stack; +} + +static inline void +module_free_stack(struct module *self, struct wasm_stack *stack) +{ + wasm_stack_reinit(stack); + wasm_stack_pool_add_nolock(&self->pools.stack[worker_thread_idx], stack); +} + static inline struct wasm_memory * module_allocate_linear_memory(struct module *module) { assert(module != NULL); - char *error_message = NULL; size_t initial = (size_t)module->abi.starting_pages * WASM_PAGE_SIZE; size_t max = (size_t)module->abi.max_pages * WASM_PAGE_SIZE; @@ -165,10 +192,9 @@ module_allocate_linear_memory(struct module *module) assert(initial <= (size_t)UINT32_MAX + 1); assert(max <= (size_t)UINT32_MAX + 1); - struct wasm_memory *linear_memory = (struct wasm_memory *)pool_remove_nolock( - &module->pools.memory[worker_thread_idx]); + struct wasm_memory *linear_memory = wasm_memory_pool_remove_nolock(&module->pools.memory[worker_thread_idx]); if (linear_memory == NULL) { - linear_memory = wasm_memory_allocate(initial, max); + linear_memory = wasm_memory_new(initial, max); if (unlikely(linear_memory == NULL)) return NULL; } @@ -180,7 +206,7 @@ module_free_linear_memory(struct module *module, struct wasm_memory *memory) { wasm_memory_wipe(memory); wasm_memory_reinit(memory, module->abi.starting_pages * WASM_PAGE_SIZE); - pool_add_nolock(&module->pools.memory[worker_thread_idx], memory); + wasm_memory_pool_add_nolock(&module->pools.memory[worker_thread_idx], memory); } /******************************** diff --git a/runtime/include/pool.h b/runtime/include/pool.h index c529037..26d7527 100644 --- a/runtime/include/pool.h +++ b/runtime/include/pool.h @@ -7,113 +7,96 @@ #include "generic_thread.h" #include "lock.h" #include "ps_list.h" -#include "wasm_memory.h" -struct pool { - bool use_lock; - lock_t lock; - struct ps_list_head list; -}; - -static inline bool -pool_is_empty(struct pool *self) -{ - assert(self != NULL); - - return ps_list_head_empty(&self->list); -} - -static inline void -pool_init(struct pool *self, bool use_lock) -{ - ps_list_head_init(&self->list); - self->use_lock = use_lock; - if (use_lock) LOCK_INIT(&self->lock); -} - -static inline void -pool_deinit(struct pool *self) -{ - if (pool_is_empty(self)) return; - - struct wasm_memory *iterator = NULL; - struct wasm_memory *buffer = NULL; - - ps_list_foreach_del_d(&self->list, iterator, buffer) - { - ps_list_rem_d(iterator); - wasm_memory_free(iterator); +#define INIT_POOL(STRUCT_NAME, DTOR_FN) \ + struct STRUCT_NAME##_pool { \ + bool use_lock; \ + lock_t lock; \ + struct ps_list_head list; \ + }; \ + \ + static inline bool STRUCT_NAME##_pool_is_empty(struct STRUCT_NAME##_pool *self) \ + { \ + assert(self != NULL); \ + \ + return ps_list_head_empty(&self->list); \ + } \ + \ + static inline void STRUCT_NAME##_pool_init(struct STRUCT_NAME##_pool *self, bool use_lock) \ + { \ + ps_list_head_init(&self->list); \ + self->use_lock = use_lock; \ + if (use_lock) LOCK_INIT(&self->lock); \ + } \ + \ + static inline void STRUCT_NAME##_pool_deinit(struct STRUCT_NAME##_pool *self) \ + { \ + if (STRUCT_NAME##_pool_is_empty(self)) return; \ + struct STRUCT_NAME *iterator = NULL; \ + struct STRUCT_NAME *buffer = NULL; \ + ps_list_foreach_del_d(&self->list, iterator, buffer) \ + { \ + ps_list_rem_d(iterator); \ + DTOR_FN(iterator); \ + } \ + } \ + \ + static inline struct STRUCT_NAME *STRUCT_NAME##_pool_remove_nolock(struct STRUCT_NAME##_pool *self) \ + { \ + assert(self != NULL); \ + assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); \ + \ + struct STRUCT_NAME *obj = NULL; \ + \ + if (STRUCT_NAME##_pool_is_empty(self)) return obj; \ + \ + obj = ps_list_head_first_d(&self->list, struct STRUCT_NAME); \ + assert(obj); \ + ps_list_rem_d(obj); \ + \ + return obj; \ + } \ + \ + static inline struct STRUCT_NAME *STRUCT_NAME##_pool_remove(struct STRUCT_NAME##_pool *self) \ + { \ + assert(self != NULL); \ + assert(self->use_lock); \ + \ + struct STRUCT_NAME *obj = NULL; \ + \ + if (STRUCT_NAME##_pool_is_empty(self)) return obj; \ + \ + LOCK_LOCK(&self->lock); \ + if (STRUCT_NAME##_pool_is_empty(self)) { \ + LOCK_UNLOCK(&self->lock); \ + return obj; \ + } \ + \ + obj = ps_list_head_first_d(&self->list, struct STRUCT_NAME); \ + assert(obj); \ + ps_list_rem_d(obj); \ + LOCK_UNLOCK(&self->lock); \ + return obj; \ + } \ + \ + static inline int STRUCT_NAME##_pool_add_nolock(struct STRUCT_NAME##_pool *self, struct STRUCT_NAME *obj) \ + { \ + assert(self != NULL); \ + assert(obj != NULL); \ + assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); \ + \ + ps_list_head_add_d(&self->list, obj); \ + return 0; \ + } \ + \ + static inline int STRUCT_NAME##_pool_add(struct STRUCT_NAME##_pool *self, struct STRUCT_NAME *obj) \ + { \ + assert(self != NULL); \ + assert(obj != NULL); \ + assert(self->use_lock); \ + \ + LOCK_LOCK(&self->lock); \ + ps_list_head_add_d(&self->list, obj); \ + LOCK_UNLOCK(&self->lock); \ + return 0; \ } -} - -static inline struct wasm_memory * -pool_remove_nolock(struct pool *self) -{ - assert(self != NULL); - assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); - - struct wasm_memory *obj = NULL; - - if (pool_is_empty(self)) return obj; - - obj = ps_list_head_first_d(&self->list, struct wasm_memory); - assert(obj); - ps_list_rem_d(obj); - - return obj; -} - -static inline struct wasm_memory * -pool_remove(struct pool *self) -{ - assert(self != NULL); - assert(self->use_lock); - - struct wasm_memory *obj = NULL; - - if (pool_is_empty(self)) return obj; - - LOCK_LOCK(&self->lock); - if (pool_is_empty(self)) { - LOCK_UNLOCK(&self->lock); - return obj; - } - - obj = ps_list_head_first_d(&self->list, struct wasm_memory); - assert(obj); - ps_list_rem_d(obj); - LOCK_UNLOCK(&self->lock); - return obj; -} - -static inline int -pool_add_nolock(struct pool *self, struct wasm_memory *obj) -{ - assert(self != NULL); - assert(obj != NULL); - assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); - - ps_list_head_add_d(&self->list, obj); - return 0; -} - -static inline int -pool_add(struct pool *self, struct wasm_memory *obj) -{ - assert(self != NULL); - assert(obj != NULL); - assert(self->use_lock); - - LOCK_LOCK(&self->lock); - ps_list_head_add_d(&self->list, obj); - LOCK_UNLOCK(&self->lock); - return 0; -} - -static inline void -pool_free(struct pool *self) -{ - while (!pool_is_empty(self)) free(pool_remove(self)); - - free(self); -} diff --git a/runtime/include/sandbox_types.h b/runtime/include/sandbox_types.h index 6267e8f..05265ab 100644 --- a/runtime/include/sandbox_types.h +++ b/runtime/include/sandbox_types.h @@ -58,7 +58,7 @@ struct sandbox { /* WebAssembly Instance State */ struct arch_context ctxt; - struct wasm_stack stack; + struct wasm_stack * stack; struct wasm_memory *memory; /* Scheduling and Temporal State */ diff --git a/runtime/include/wasm_memory.h b/runtime/include/wasm_memory.h index d092d2b..dd5ec07 100644 --- a/runtime/include/wasm_memory.h +++ b/runtime/include/wasm_memory.h @@ -22,7 +22,7 @@ struct wasm_memory { }; static inline struct wasm_memory * -wasm_memory_allocate(size_t initial, size_t max) +wasm_memory_new(size_t initial, size_t max) { assert(initial > 0); assert(initial <= (size_t)UINT32_MAX + 1); @@ -33,7 +33,7 @@ wasm_memory_allocate(size_t initial, size_t max) size_t size_to_alloc = sizeof(struct wasm_memory) + WASM_MEMORY_MAX + /* guard page */ PAGE_SIZE; void * temp = mmap(NULL, size_to_alloc, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (temp == MAP_FAILED) { - fprintf(stderr, "wasm_memory_allocate - allocation failed, (size: %lu) %s\n", size_to_alloc, + fprintf(stderr, "wasm_memory_new - allocation failed, (size: %lu) %s\n", size_to_alloc, strerror(errno)); return NULL; } @@ -44,7 +44,7 @@ wasm_memory_allocate(size_t initial, size_t max) int rc = mprotect(self, size_to_read_write, PROT_READ | PROT_WRITE); if (rc != 0) { - perror("wasm_memory_allocate - prot r/w failed"); + perror("wasm_memory_new - prot r/w failed"); munmap(self, size_to_alloc); assert(0); return NULL; @@ -58,7 +58,7 @@ wasm_memory_allocate(size_t initial, size_t max) } static inline void -wasm_memory_free(struct wasm_memory *self) +wasm_memory_delete(struct wasm_memory *self) { size_t size_to_free = sizeof(struct wasm_memory) + WASM_MEMORY_MAX + /* guard page */ PAGE_SIZE; munmap(self, size_to_free); diff --git a/runtime/include/wasm_stack.h b/runtime/include/wasm_stack.h index dd6c809..1752586 100644 --- a/runtime/include/wasm_stack.h +++ b/runtime/include/wasm_stack.h @@ -4,15 +4,22 @@ #include #include -#include "sandbox_types.h" - +/* This structure is not suitable for a flexible array member because it allocates a guard page beneath the buffer. This + * negates the benefit of tight locality */ struct wasm_stack { - size_t capacity; /* Usable capacity. Excludes size of guard page that we need to free */ - uint8_t *high; /* The highest address of the stack. Grows down from here */ - uint8_t *low; /* The address of the lowest usabe address. Above guard page */ - uint8_t *buffer; /* Points to Guard Page */ + struct ps_list list; /* Linked List Node used for object pool */ + size_t capacity; /* Usable capacity. Excludes size of guard page that we need to free */ + uint8_t * high; /* The highest address of the stack. Grows down from here */ + uint8_t * low; /* The address of the lowest usabe address. Above guard page */ + uint8_t * buffer; /* Points to Guard Page */ }; +static inline struct wasm_stack * +wasm_stack_allocate(void) +{ + return calloc(1, sizeof(struct wasm_stack)); +} + /** * Allocates a static sized stack for a sandbox with a guard page underneath * Because a stack grows down, this protects against stack overflow @@ -21,48 +28,93 @@ struct wasm_stack { * @returns 0 on success, -1 on error */ static inline int -wasm_stack_allocate(struct wasm_stack *stack, size_t capacity) +wasm_stack_init(struct wasm_stack *self, size_t capacity) { - assert(stack); + assert(self); int rc = 0; - stack->buffer = (uint8_t *)mmap(NULL, /* guard page */ PAGE_SIZE + capacity, PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (unlikely(stack->buffer == MAP_FAILED)) { + self->buffer = (uint8_t *)mmap(NULL, /* guard page */ PAGE_SIZE + capacity, PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (unlikely(self->buffer == MAP_FAILED)) { perror("sandbox allocate stack"); goto err_stack_allocation_failed; } - stack->low = (uint8_t *)mmap(stack->buffer + /* guard page */ PAGE_SIZE, capacity, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); - if (unlikely(stack->low == MAP_FAILED)) { + self->low = (uint8_t *)mmap(self->buffer + /* guard page */ PAGE_SIZE, capacity, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + if (unlikely(self->low == MAP_FAILED)) { perror("sandbox set stack read/write"); goto err_stack_prot_failed; } - stack->capacity = capacity; - stack->high = stack->low + capacity; + ps_list_init_d(self); + self->capacity = capacity; + self->high = self->low + capacity; rc = 0; done: return rc; err_stack_prot_failed: - rc = munmap(stack->buffer, PAGE_SIZE + capacity); + rc = munmap(self->buffer, PAGE_SIZE + capacity); if (rc == -1) perror("munmap"); err_stack_allocation_failed: - stack->buffer = NULL; - rc = -1; + self->buffer = NULL; + rc = -1; goto done; } static inline void -wasm_stack_free(struct wasm_stack *stack) +wasm_stack_free(struct wasm_stack *self) { - assert(stack != NULL); - assert(stack->buffer != NULL); + free(self); +} + + +static struct wasm_stack * +wasm_stack_new(size_t capacity) +{ + struct wasm_stack *self = wasm_stack_allocate(); + int rc = wasm_stack_init(self, capacity); + if (rc < 0) { + wasm_stack_free(self); + return NULL; + } + + return self; +} + +static inline void +wasm_stack_deinit(struct wasm_stack *self) +{ + assert(self != NULL); + assert(self->buffer != NULL); + /* The stack start is the bottom of the usable stack, but we allocated a guard page below this */ - int rc = munmap(stack->buffer, stack->capacity + PAGE_SIZE); - stack->buffer = NULL; - if (unlikely(rc == -1)) perror("munmap"); + munmap(self->buffer, self->capacity + PAGE_SIZE); + self->buffer = NULL; + self->high = NULL; + self->low = NULL; +} + +static inline void +wasm_stack_delete(struct wasm_stack *self) +{ + assert(self != NULL); + assert(self->buffer != NULL); + wasm_stack_deinit(self); + wasm_stack_free(self); +} + +static inline void +wasm_stack_reinit(struct wasm_stack *self) +{ + assert(self != NULL); + assert(self->buffer != NULL); + + self->low = self->buffer + /* guard page */ PAGE_SIZE; + + memset(self->low, 0, self->capacity); + ps_list_init_d(self); + self->high = self->low + self->capacity; } diff --git a/runtime/src/module.c b/runtime/src/module.c index ebcd028..dbed165 100644 --- a/runtime/src/module.c +++ b/runtime/src/module.c @@ -210,7 +210,10 @@ module_new(char *name, char *path, uint32_t stack_size, uint32_t relative_deadli module_initialize_table(module); current_wasm_module_instance.table = NULL; - for (int i = 0; i < MAX_WORKER_THREADS; i++) { pool_init(&module->pools.memory[i], false); } + for (int i = 0; i < MAX_WORKER_THREADS; i++) { + wasm_memory_pool_init(&module->pools.memory[i], false); + wasm_stack_pool_init(&module->pools.stack[i], false); + } /* Start listening for requests */ rc = module_listen(module); diff --git a/runtime/src/sandbox.c b/runtime/src/sandbox.c index c46cefa..f766f22 100644 --- a/runtime/src/sandbox.c +++ b/runtime/src/sandbox.c @@ -47,7 +47,10 @@ sandbox_allocate_stack(struct sandbox *sandbox) assert(sandbox); assert(sandbox->module); - return wasm_stack_allocate(&sandbox->stack, sandbox->module->stack_size); + sandbox->stack = module_allocate_stack(sandbox->module); + if (sandbox->stack == NULL) return -1; + + return 0; } static inline void @@ -55,7 +58,7 @@ sandbox_free_stack(struct sandbox *sandbox) { assert(sandbox); - return wasm_stack_free(&sandbox->stack); + return module_free_stack(sandbox->module, sandbox->stack); } /** @@ -122,7 +125,7 @@ sandbox_prepare_execution_environemnt(struct sandbox *sandbox) /* Initialize the sandbox's context, stack, and instruction pointer */ /* stack grows down, so set to high address */ - arch_context_init(&sandbox->ctxt, (reg_t)current_sandbox_start, (reg_t)sandbox->stack.high); + arch_context_init(&sandbox->ctxt, (reg_t)current_sandbox_start, (reg_t)sandbox->stack->high); rc = 0; done: @@ -210,7 +213,7 @@ sandbox_free(struct sandbox *sandbox) /* Free Sandbox Struct and HTTP Request and Response Buffers */ - if (likely(sandbox->stack.buffer != NULL)) sandbox_free_stack(sandbox); + if (likely(sandbox->stack->buffer != NULL)) sandbox_free_stack(sandbox); free(sandbox); if (rc == -1) { From e1cb478262888b214f9af3562adea6a6bb82b359 Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Fri, 10 Dec 2021 20:59:29 +0000 Subject: [PATCH 03/15] fix: Correct sandbox free stack logic --- runtime/src/sandbox.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/sandbox.c b/runtime/src/sandbox.c index fbf2b88..61e62cc 100644 --- a/runtime/src/sandbox.c +++ b/runtime/src/sandbox.c @@ -214,7 +214,7 @@ sandbox_free(struct sandbox *sandbox) /* Free Sandbox Struct and HTTP Request and Response Buffers */ - if (likely(sandbox->stack->buffer != NULL)) sandbox_free_stack(sandbox); + if (likely(sandbox->stack != NULL)) sandbox_free_stack(sandbox); free(sandbox); if (rc == -1) { From b652613ecef5537003333bf6fcfe56f2a4e7434d Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Fri, 10 Dec 2021 21:41:47 +0000 Subject: [PATCH 04/15] refactor: Invert pool structure --- runtime/include/module.h | 16 ++++++++-------- runtime/src/module.c | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/runtime/include/module.h b/runtime/include/module.h index 20b1915..6fb6a8f 100644 --- a/runtime/include/module.h +++ b/runtime/include/module.h @@ -44,9 +44,9 @@ INIT_POOL(wasm_stack, wasm_stack_delete) #define MAX_WORKER_THREADS 64 struct module_pools { - struct wasm_memory_pool memory[MAX_WORKER_THREADS]; - struct wasm_stack_pool stack[MAX_WORKER_THREADS]; -}; + struct wasm_memory_pool memory; + struct wasm_stack_pool stack; +} __attribute__((aligned(8))); struct module { /* Metadata from JSON Config */ @@ -71,7 +71,7 @@ struct module { _Atomic uint32_t reference_count; /* ref count how many instances exist here. */ struct wasm_table *indirect_table; - struct module_pools pools; + struct module_pools pools[MAX_WORKER_THREADS]; }; /************************* @@ -163,7 +163,7 @@ module_allocate_stack(struct module *self) { assert(self != NULL); - struct wasm_stack *stack = wasm_stack_pool_remove_nolock(&self->pools.stack[worker_thread_idx]); + struct wasm_stack *stack = wasm_stack_pool_remove_nolock(&self->pools[worker_thread_idx].stack); if (stack == NULL) { stack = wasm_stack_new(self->stack_size); @@ -177,7 +177,7 @@ static inline void module_free_stack(struct module *self, struct wasm_stack *stack) { wasm_stack_reinit(stack); - wasm_stack_pool_add_nolock(&self->pools.stack[worker_thread_idx], stack); + wasm_stack_pool_add_nolock(&self->pools[worker_thread_idx].stack, stack); } static inline struct wasm_memory * @@ -192,7 +192,7 @@ module_allocate_linear_memory(struct module *module) assert(initial <= (size_t)UINT32_MAX + 1); assert(max <= (size_t)UINT32_MAX + 1); - struct wasm_memory *linear_memory = wasm_memory_pool_remove_nolock(&module->pools.memory[worker_thread_idx]); + struct wasm_memory *linear_memory = wasm_memory_pool_remove_nolock(&module->pools[worker_thread_idx].memory); if (linear_memory == NULL) { linear_memory = wasm_memory_new(initial, max); if (unlikely(linear_memory == NULL)) return NULL; @@ -206,7 +206,7 @@ module_free_linear_memory(struct module *module, struct wasm_memory *memory) { wasm_memory_wipe(memory); wasm_memory_reinit(memory, module->abi.starting_pages * WASM_PAGE_SIZE); - wasm_memory_pool_add_nolock(&module->pools.memory[worker_thread_idx], memory); + wasm_memory_pool_add_nolock(&module->pools[worker_thread_idx].memory, memory); } /******************************** diff --git a/runtime/src/module.c b/runtime/src/module.c index fa3e2f6..45a96f2 100644 --- a/runtime/src/module.c +++ b/runtime/src/module.c @@ -211,8 +211,8 @@ module_new(char *name, char *path, uint32_t stack_size, uint32_t relative_deadli current_wasm_module_instance.table = NULL; for (int i = 0; i < MAX_WORKER_THREADS; i++) { - wasm_memory_pool_init(&module->pools.memory[i], false); - wasm_stack_pool_init(&module->pools.stack[i], false); + wasm_memory_pool_init(&module->pools[i].memory, false); + wasm_stack_pool_init(&module->pools[i].stack, false); } /* Start listening for requests */ From 773dbf3f70587bd3c42e90f305c615db56c92db0 Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Thu, 16 Dec 2021 09:47:57 -0500 Subject: [PATCH 05/15] refactor: Align memory pools on cache lines --- runtime/include/module.h | 2 +- runtime/include/types.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/runtime/include/module.h b/runtime/include/module.h index 6fb6a8f..9e35598 100644 --- a/runtime/include/module.h +++ b/runtime/include/module.h @@ -46,7 +46,7 @@ INIT_POOL(wasm_stack, wasm_stack_delete) struct module_pools { struct wasm_memory_pool memory; struct wasm_stack_pool stack; -} __attribute__((aligned(8))); +} __attribute__((aligned(CACHE_LINE))); struct module { /* Metadata from JSON Config */ diff --git a/runtime/include/types.h b/runtime/include/types.h index a660bab..1b14366 100644 --- a/runtime/include/types.h +++ b/runtime/include/types.h @@ -17,6 +17,7 @@ #define PAGE_ALIGNED __attribute__((aligned(PAGE_SIZE))) #define PAGE_SIZE (unsigned long)(1 << 12) #define WEAK __attribute__((weak)) +#define CACHE_LINE 64 #ifndef unlikely #define unlikely(x) __builtin_expect(!!(x), 0) From ba05a924d3b08d7cc97fa243f2f7d2d5755feb17 Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Thu, 16 Dec 2021 09:53:45 -0500 Subject: [PATCH 06/15] refactor: Align on doulbe cache line --- runtime/include/module.h | 2 +- runtime/include/types.h | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/runtime/include/module.h b/runtime/include/module.h index 9e35598..38279fa 100644 --- a/runtime/include/module.h +++ b/runtime/include/module.h @@ -46,7 +46,7 @@ INIT_POOL(wasm_stack, wasm_stack_delete) struct module_pools { struct wasm_memory_pool memory; struct wasm_stack_pool stack; -} __attribute__((aligned(CACHE_LINE))); +} __attribute__((aligned(CACHE_PAD))); struct module { /* Metadata from JSON Config */ diff --git a/runtime/include/types.h b/runtime/include/types.h index 1b14366..4c1d80c 100644 --- a/runtime/include/types.h +++ b/runtime/include/types.h @@ -17,7 +17,11 @@ #define PAGE_ALIGNED __attribute__((aligned(PAGE_SIZE))) #define PAGE_SIZE (unsigned long)(1 << 12) #define WEAK __attribute__((weak)) -#define CACHE_LINE 64 + +#define CACHE_LINE 64 +/* This might be Intel specific. ARM and x64 both have the same CACHE_LINE size, but x64 uses Intel uses a double + * cache-line as a coherency unit */ +#define CACHE_PAD (CACHE_LINE * 2) #ifndef unlikely #define unlikely(x) __builtin_expect(!!(x), 0) From 2a21db9a348d2bea2fd16b1f978e75490c8c9571 Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Thu, 16 Dec 2021 09:58:17 -0500 Subject: [PATCH 07/15] style: remove extra newline --- runtime/include/module.h | 1 - 1 file changed, 1 deletion(-) diff --git a/runtime/include/module.h b/runtime/include/module.h index 38279fa..5ca8440 100644 --- a/runtime/include/module.h +++ b/runtime/include/module.h @@ -185,7 +185,6 @@ module_allocate_linear_memory(struct module *module) { assert(module != NULL); - size_t initial = (size_t)module->abi.starting_pages * WASM_PAGE_SIZE; size_t max = (size_t)module->abi.max_pages * WASM_PAGE_SIZE; From a044f8ec3786fec8521e8f2e015a36b4a4513115 Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Thu, 16 Dec 2021 10:02:19 -0500 Subject: [PATCH 08/15] fix: redundant wasm memory wipe --- runtime/include/module.h | 1 - 1 file changed, 1 deletion(-) diff --git a/runtime/include/module.h b/runtime/include/module.h index 5ca8440..37747f2 100644 --- a/runtime/include/module.h +++ b/runtime/include/module.h @@ -203,7 +203,6 @@ module_allocate_linear_memory(struct module *module) static inline void module_free_linear_memory(struct module *module, struct wasm_memory *memory) { - wasm_memory_wipe(memory); wasm_memory_reinit(memory, module->abi.starting_pages * WASM_PAGE_SIZE); wasm_memory_pool_add_nolock(&module->pools[worker_thread_idx].memory, memory); } From 7d91a9cfc013635a129b9309f1390cc7c0fa39ea Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Thu, 16 Dec 2021 10:30:58 -0500 Subject: [PATCH 09/15] refactor: rename self --- runtime/include/admissions_info.h | 4 +- runtime/include/http_request.h | 2 +- runtime/include/module.h | 12 +- runtime/include/perf_window.h | 102 ++++----- runtime/include/priority_queue.h | 283 ++++++++++++------------ runtime/include/sandbox_state_history.h | 10 +- runtime/include/vec_u8.h | 70 +++--- runtime/include/wasm_memory.h | 208 ++++++++--------- runtime/include/wasm_stack.h | 72 +++--- runtime/include/wasm_table.h | 74 +++---- runtime/src/admissions_info.c | 38 ++-- runtime/src/http_request.c | 18 +- runtime/src/sandbox.c | 8 +- 13 files changed, 459 insertions(+), 442 deletions(-) diff --git a/runtime/include/admissions_info.h b/runtime/include/admissions_info.h index d0c2786..0a941c5 100644 --- a/runtime/include/admissions_info.h +++ b/runtime/include/admissions_info.h @@ -10,6 +10,6 @@ struct admissions_info { uint64_t relative_deadline; /* Relative deadline in cycles. This is duplicated state */ }; -void admissions_info_initialize(struct admissions_info *self, int percentile, uint64_t expected_execution, +void admissions_info_initialize(struct admissions_info *admissions_info, int percentile, uint64_t expected_execution, uint64_t relative_deadline); -void admissions_info_update(struct admissions_info *self, uint64_t execution_duration); +void admissions_info_update(struct admissions_info *admissions_info, uint64_t execution_duration); diff --git a/runtime/include/http_request.h b/runtime/include/http_request.h index 53beeae..f6ac30c 100644 --- a/runtime/include/http_request.h +++ b/runtime/include/http_request.h @@ -27,4 +27,4 @@ struct http_request { bool message_end; /* boolean flag set when body processing is complete */ }; -void http_request_print(struct http_request *self); +void http_request_print(struct http_request *http_request); diff --git a/runtime/include/module.h b/runtime/include/module.h index 37747f2..2d4afd5 100644 --- a/runtime/include/module.h +++ b/runtime/include/module.h @@ -159,14 +159,14 @@ module_release(struct module *module) } static inline struct wasm_stack * -module_allocate_stack(struct module *self) +module_allocate_stack(struct module *module) { - assert(self != NULL); + assert(module != NULL); - struct wasm_stack *stack = wasm_stack_pool_remove_nolock(&self->pools[worker_thread_idx].stack); + struct wasm_stack *stack = wasm_stack_pool_remove_nolock(&module->pools[worker_thread_idx].stack); if (stack == NULL) { - stack = wasm_stack_new(self->stack_size); + stack = wasm_stack_new(module->stack_size); if (unlikely(stack == NULL)) return NULL; } @@ -174,10 +174,10 @@ module_allocate_stack(struct module *self) } static inline void -module_free_stack(struct module *self, struct wasm_stack *stack) +module_free_stack(struct module *module, struct wasm_stack *stack) { wasm_stack_reinit(stack); - wasm_stack_pool_add_nolock(&self->pools[worker_thread_idx].stack, stack); + wasm_stack_pool_add_nolock(&module->pools[worker_thread_idx].stack, stack); } static inline struct wasm_memory * diff --git a/runtime/include/perf_window.h b/runtime/include/perf_window.h index d93a117..0c0ec9e 100644 --- a/runtime/include/perf_window.h +++ b/runtime/include/perf_window.h @@ -12,122 +12,122 @@ /** * Initializes perf window - * @param self + * @param perf_window */ static inline void -perf_window_initialize(struct perf_window *self) +perf_window_initialize(struct perf_window *perf_window) { - assert(self != NULL); + assert(perf_window != NULL); - LOCK_INIT(&self->lock); - self->count = 0; - memset(&self->by_duration, 0, sizeof(struct execution_node) * PERF_WINDOW_BUFFER_SIZE); - memset(&self->by_termination, 0, sizeof(uint16_t) * PERF_WINDOW_BUFFER_SIZE); + LOCK_INIT(&perf_window->lock); + perf_window->count = 0; + memset(&perf_window->by_duration, 0, sizeof(struct execution_node) * PERF_WINDOW_BUFFER_SIZE); + memset(&perf_window->by_termination, 0, sizeof(uint16_t) * PERF_WINDOW_BUFFER_SIZE); } /** * Swaps two execution nodes in the by_duration array, including updating the indices in the by_termination circular * buffer - * @param self + * @param perf_window * @param first_by_duration_idx * @param second_by_duration_idx */ static inline void -perf_window_swap(struct perf_window *self, uint16_t first_by_duration_idx, uint16_t second_by_duration_idx) +perf_window_swap(struct perf_window *perf_window, uint16_t first_by_duration_idx, uint16_t second_by_duration_idx) { - assert(LOCK_IS_LOCKED(&self->lock)); - assert(self != NULL); + assert(LOCK_IS_LOCKED(&perf_window->lock)); + assert(perf_window != NULL); assert(first_by_duration_idx >= 0 && first_by_duration_idx < PERF_WINDOW_BUFFER_SIZE); assert(second_by_duration_idx >= 0 && second_by_duration_idx < PERF_WINDOW_BUFFER_SIZE); - uint16_t first_by_termination_idx = self->by_duration[first_by_duration_idx].by_termination_idx; - uint16_t second_by_termination_idx = self->by_duration[second_by_duration_idx].by_termination_idx; + uint16_t first_by_termination_idx = perf_window->by_duration[first_by_duration_idx].by_termination_idx; + uint16_t second_by_termination_idx = perf_window->by_duration[second_by_duration_idx].by_termination_idx; /* The execution node's by_termination_idx points to a by_termination cell equal to its own by_duration index */ - assert(self->by_termination[first_by_termination_idx] == first_by_duration_idx); - assert(self->by_termination[second_by_termination_idx] == second_by_duration_idx); + assert(perf_window->by_termination[first_by_termination_idx] == first_by_duration_idx); + assert(perf_window->by_termination[second_by_termination_idx] == second_by_duration_idx); - uint64_t first_execution_time = self->by_duration[first_by_duration_idx].execution_time; - uint64_t second_execution_time = self->by_duration[second_by_duration_idx].execution_time; + uint64_t first_execution_time = perf_window->by_duration[first_by_duration_idx].execution_time; + uint64_t second_execution_time = perf_window->by_duration[second_by_duration_idx].execution_time; /* Swap Indices in Buffer*/ - self->by_termination[first_by_termination_idx] = second_by_duration_idx; - self->by_termination[second_by_termination_idx] = first_by_duration_idx; + perf_window->by_termination[first_by_termination_idx] = second_by_duration_idx; + perf_window->by_termination[second_by_termination_idx] = first_by_duration_idx; /* Swap by_termination_idx */ - struct execution_node tmp_node = self->by_duration[first_by_duration_idx]; - self->by_duration[first_by_duration_idx] = self->by_duration[second_by_duration_idx]; - self->by_duration[second_by_duration_idx] = tmp_node; + struct execution_node tmp_node = perf_window->by_duration[first_by_duration_idx]; + perf_window->by_duration[first_by_duration_idx] = perf_window->by_duration[second_by_duration_idx]; + perf_window->by_duration[second_by_duration_idx] = tmp_node; /* The circular by_termination indices should always point to the same execution times across all swaps */ - assert(self->by_duration[self->by_termination[first_by_termination_idx]].execution_time + assert(perf_window->by_duration[perf_window->by_termination[first_by_termination_idx]].execution_time == first_execution_time); - assert(self->by_duration[self->by_termination[second_by_termination_idx]].execution_time + assert(perf_window->by_duration[perf_window->by_termination[second_by_termination_idx]].execution_time == second_execution_time); } /** * Adds a new value to the perf window * Not intended to be called directly! - * @param self + * @param perf_window * @param value */ static inline void -perf_window_add(struct perf_window *self, uint64_t value) +perf_window_add(struct perf_window *perf_window, uint64_t value) { - assert(self != NULL); + assert(perf_window != NULL); uint16_t idx_of_oldest; bool check_up; - if (unlikely(!LOCK_IS_LOCKED(&self->lock))) panic("lock not held when calling perf_window_add\n"); + if (unlikely(!LOCK_IS_LOCKED(&perf_window->lock))) panic("lock not held when calling perf_window_add\n"); /* A successful invocation should run for a non-zero amount of time */ assert(value > 0); /* If count is 0, then fill entire array with initial execution times */ - if (self->count == 0) { + if (perf_window->count == 0) { for (int i = 0; i < PERF_WINDOW_BUFFER_SIZE; i++) { - self->by_termination[i] = i; - self->by_duration[i] = (struct execution_node){ .execution_time = value, + perf_window->by_termination[i] = i; + perf_window->by_duration[i] = (struct execution_node){ .execution_time = value, .by_termination_idx = i }; } - self->count = PERF_WINDOW_BUFFER_SIZE; + perf_window->count = PERF_WINDOW_BUFFER_SIZE; goto done; } /* Otherwise, replace the oldest value, and then sort */ - idx_of_oldest = self->by_termination[self->count % PERF_WINDOW_BUFFER_SIZE]; - check_up = value > self->by_duration[idx_of_oldest].execution_time; + idx_of_oldest = perf_window->by_termination[perf_window->count % PERF_WINDOW_BUFFER_SIZE]; + check_up = value > perf_window->by_duration[idx_of_oldest].execution_time; - self->by_duration[idx_of_oldest].execution_time = value; + perf_window->by_duration[idx_of_oldest].execution_time = value; if (check_up) { for (uint16_t i = idx_of_oldest; i + 1 < PERF_WINDOW_BUFFER_SIZE - && self->by_duration[i + 1].execution_time < self->by_duration[i].execution_time; + && perf_window->by_duration[i + 1].execution_time < perf_window->by_duration[i].execution_time; i++) { - perf_window_swap(self, i, i + 1); + perf_window_swap(perf_window, i, i + 1); } } else { for (int i = idx_of_oldest; - i - 1 >= 0 && self->by_duration[i - 1].execution_time > self->by_duration[i].execution_time; i--) { - perf_window_swap(self, i, i - 1); + i - 1 >= 0 && perf_window->by_duration[i - 1].execution_time > perf_window->by_duration[i].execution_time; i--) { + perf_window_swap(perf_window, i, i - 1); } } /* The idx that we replaces should still point to the same value */ - assert(self->by_duration[self->by_termination[self->count % PERF_WINDOW_BUFFER_SIZE]].execution_time == value); + assert(perf_window->by_duration[perf_window->by_termination[perf_window->count % PERF_WINDOW_BUFFER_SIZE]].execution_time == value); /* The by_duration array should be ordered by execution time */ #ifndef NDEBUG for (int i = 1; i < PERF_WINDOW_BUFFER_SIZE; i++) { - assert(self->by_duration[i - 1].execution_time <= self->by_duration[i].execution_time); + assert(perf_window->by_duration[i - 1].execution_time <= perf_window->by_duration[i].execution_time); } #endif - self->count++; + perf_window->count++; done: return; @@ -135,22 +135,22 @@ done: /** * Returns pXX execution time - * @param self + * @param perf_window * @param percentile represented by int between 50 and 99 * @param precomputed_index memoized index for quick lookup when by_duration is full * @returns execution time */ static inline uint64_t -perf_window_get_percentile(struct perf_window *self, int percentile, int precomputed_index) +perf_window_get_percentile(struct perf_window *perf_window, int percentile, int precomputed_index) { - assert(self != NULL); + assert(perf_window != NULL); assert(percentile >= 50 && percentile <= 99); - int size = self->count; + int size = perf_window->count; assert(size > 0); - if (likely(size >= PERF_WINDOW_BUFFER_SIZE)) return self->by_duration[precomputed_index].execution_time; + if (likely(size >= PERF_WINDOW_BUFFER_SIZE)) return perf_window->by_duration[precomputed_index].execution_time; - return self->by_duration[size * percentile / 100].execution_time; + return perf_window->by_duration[size * percentile / 100].execution_time; } /** @@ -158,9 +158,9 @@ perf_window_get_percentile(struct perf_window *self, int percentile, int precomp * @returns total count */ static inline uint64_t -perf_window_get_count(struct perf_window *self) +perf_window_get_count(struct perf_window *perf_window) { - assert(self != NULL); + assert(perf_window != NULL); - return self->count; + return perf_window->count; } diff --git a/runtime/include/priority_queue.h b/runtime/include/priority_queue.h index ad7541b..ae7f647 100644 --- a/runtime/include/priority_queue.h +++ b/runtime/include/priority_queue.h @@ -37,36 +37,36 @@ struct priority_queue { * @returns value of highest priority value in queue or ULONG_MAX if empty */ static inline uint64_t -priority_queue_peek(struct priority_queue *self) +priority_queue_peek(struct priority_queue *priority_queue) { - return self->highest_priority; + return priority_queue->highest_priority; } static inline void -priority_queue_update_highest_priority(struct priority_queue *self, const uint64_t priority) +priority_queue_update_highest_priority(struct priority_queue *priority_queue, const uint64_t priority) { - self->highest_priority = priority; + priority_queue->highest_priority = priority; } /** * Adds a value to the end of the binary heap - * @param self the priority queue + * @param priority_queue the priority queue * @param new_item the value we are adding * @return 0 on success. -ENOSPC when priority queue is full */ static inline int -priority_queue_append(struct priority_queue *self, void *new_item) +priority_queue_append(struct priority_queue *priority_queue, void *new_item) { - assert(self != NULL); + assert(priority_queue != NULL); assert(new_item != NULL); - assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); + assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock)); int rc; - if (unlikely(self->size + 1 > self->capacity)) panic("PQ overflow"); - if (unlikely(self->size + 1 == self->capacity)) goto err_enospc; - self->items[++self->size] = new_item; + if (unlikely(priority_queue->size + 1 > priority_queue->capacity)) panic("PQ overflow"); + if (unlikely(priority_queue->size + 1 == priority_queue->capacity)) goto err_enospc; + priority_queue->items[++priority_queue->size] = new_item; rc = 0; done: @@ -78,71 +78,76 @@ err_enospc: /** * Checks if a priority queue is empty - * @param self the priority queue to check + * @param priority_queue the priority queue to check * @returns true if empty, else otherwise */ static inline bool -priority_queue_is_empty(struct priority_queue *self) +priority_queue_is_empty(struct priority_queue *priority_queue) { - assert(self != NULL); - assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); + assert(priority_queue != NULL); + assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock)); - return self->size == 0; + return priority_queue->size == 0; } /** * Shifts an appended value upwards to restore heap structure property - * @param self the priority queue + * @param priority_queue the priority queue */ static inline void -priority_queue_percolate_up(struct priority_queue *self) +priority_queue_percolate_up(struct priority_queue *priority_queue) { - assert(self != NULL); - assert(self->get_priority_fn != NULL); - assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); + assert(priority_queue != NULL); + assert(priority_queue->get_priority_fn != NULL); + assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock)); /* If there's only one element, set memoized lookup and early out */ - if (self->size == 1) { - priority_queue_update_highest_priority(self, self->get_priority_fn(self->items[1])); + if (priority_queue->size == 1) { + priority_queue_update_highest_priority(priority_queue, + priority_queue->get_priority_fn(priority_queue->items[1])); return; } - for (int i = self->size; - i / 2 != 0 && self->get_priority_fn(self->items[i]) < self->get_priority_fn(self->items[i / 2]); i /= 2) { - assert(self->get_priority_fn(self->items[i]) != ULONG_MAX); - void *temp = self->items[i / 2]; - self->items[i / 2] = self->items[i]; - self->items[i] = temp; + for (int i = priority_queue->size; i / 2 != 0 + && priority_queue->get_priority_fn(priority_queue->items[i]) + < priority_queue->get_priority_fn(priority_queue->items[i / 2]); + i /= 2) { + assert(priority_queue->get_priority_fn(priority_queue->items[i]) != ULONG_MAX); + void *temp = priority_queue->items[i / 2]; + priority_queue->items[i / 2] = priority_queue->items[i]; + priority_queue->items[i] = temp; /* If percolated to highest priority, update highest priority */ - if (i / 2 == 1) priority_queue_update_highest_priority(self, self->get_priority_fn(self->items[1])); + if (i / 2 == 1) + priority_queue_update_highest_priority(priority_queue, priority_queue->get_priority_fn( + priority_queue->items[1])); } } /** * Returns the index of a node's smallest child - * @param self the priority queue + * @param priority_queue the priority queue * @param parent_index * @returns the index of the smallest child */ static inline int -priority_queue_find_smallest_child(struct priority_queue *self, const int parent_index) +priority_queue_find_smallest_child(struct priority_queue *priority_queue, const int parent_index) { - assert(self != NULL); - assert(parent_index >= 1 && parent_index <= self->size); - assert(self->get_priority_fn != NULL); - assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); + assert(priority_queue != NULL); + assert(parent_index >= 1 && parent_index <= priority_queue->size); + assert(priority_queue->get_priority_fn != NULL); + assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock)); int left_child_index = 2 * parent_index; int right_child_index = 2 * parent_index + 1; - assert(self->items[left_child_index] != NULL); + assert(priority_queue->items[left_child_index] != NULL); int smallest_child_idx; /* If we don't have a right child or the left child is smaller, return it */ - if (right_child_index > self->size) { + if (right_child_index > priority_queue->size) { smallest_child_idx = left_child_index; - } else if (self->get_priority_fn(self->items[left_child_index]) - < self->get_priority_fn(self->items[right_child_index])) { + } else if (priority_queue->get_priority_fn(priority_queue->items[left_child_index]) + < priority_queue->get_priority_fn(priority_queue->items[right_child_index])) { smallest_child_idx = left_child_index; } else { /* Otherwise, return the right child */ @@ -155,29 +160,29 @@ priority_queue_find_smallest_child(struct priority_queue *self, const int parent /** * Shifts the top of the heap downwards. Used after placing the last value at * the top - * @param self the priority queue + * @param priority_queue the priority queue */ static inline void -priority_queue_percolate_down(struct priority_queue *self, int parent_index) +priority_queue_percolate_down(struct priority_queue *priority_queue, int parent_index) { - assert(self != NULL); - assert(self->get_priority_fn != NULL); - assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); + assert(priority_queue != NULL); + assert(priority_queue->get_priority_fn != NULL); + assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock)); assert(!listener_thread_is_running()); bool update_highest_value = parent_index == 1; int left_child_index = 2 * parent_index; - while (left_child_index >= 2 && left_child_index <= self->size) { - int smallest_child_index = priority_queue_find_smallest_child(self, parent_index); + while (left_child_index >= 2 && left_child_index <= priority_queue->size) { + int smallest_child_index = priority_queue_find_smallest_child(priority_queue, parent_index); /* Once the parent is equal to or less than its smallest child, break; */ - if (self->get_priority_fn(self->items[parent_index]) - <= self->get_priority_fn(self->items[smallest_child_index])) + if (priority_queue->get_priority_fn(priority_queue->items[parent_index]) + <= priority_queue->get_priority_fn(priority_queue->items[smallest_child_index])) break; /* Otherwise, swap and continue down the tree */ - void *temp = self->items[smallest_child_index]; - self->items[smallest_child_index] = self->items[parent_index]; - self->items[parent_index] = temp; + void *temp = priority_queue->items[smallest_child_index]; + priority_queue->items[smallest_child_index] = priority_queue->items[parent_index]; + priority_queue->items[parent_index] = temp; parent_index = smallest_child_index; left_child_index = 2 * parent_index; @@ -185,10 +190,11 @@ priority_queue_percolate_down(struct priority_queue *self, int parent_index) /* Update memoized value if we touched the head */ if (update_highest_value) { - if (!priority_queue_is_empty(self)) { - priority_queue_update_highest_priority(self, self->get_priority_fn(self->items[1])); + if (!priority_queue_is_empty(priority_queue)) { + priority_queue_update_highest_priority(priority_queue, priority_queue->get_priority_fn( + priority_queue->items[1])); } else { - priority_queue_update_highest_priority(self, ULONG_MAX); + priority_queue_update_highest_priority(priority_queue, ULONG_MAX); } } } @@ -198,30 +204,32 @@ priority_queue_percolate_down(struct priority_queue *self, int parent_index) ********************/ /** - * @param self - the priority queue we want to add to + * @param priority_queue - the priority queue we want to add to * @param dequeued_element a pointer to set to the dequeued element * @param target_deadline the deadline that the request must be earlier than in order to dequeue * @returns RC 0 if successfully set dequeued_element, -ENOENT if empty or if none meet target_deadline */ static inline int -priority_queue_dequeue_if_earlier_nolock(struct priority_queue *self, void **dequeued_element, uint64_t target_deadline) +priority_queue_dequeue_if_earlier_nolock(struct priority_queue *priority_queue, void **dequeued_element, + uint64_t target_deadline) { - assert(self != NULL); + assert(priority_queue != NULL); assert(dequeued_element != NULL); - assert(self->get_priority_fn != NULL); + assert(priority_queue->get_priority_fn != NULL); assert(!listener_thread_is_running()); - assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); + assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock)); int return_code; /* If the dequeue is not higher priority (earlier timestamp) than targed_deadline, return immediately */ - if (priority_queue_is_empty(self) || self->highest_priority >= target_deadline) goto err_enoent; + if (priority_queue_is_empty(priority_queue) || priority_queue->highest_priority >= target_deadline) + goto err_enoent; - *dequeued_element = self->items[1]; - self->items[1] = self->items[self->size]; - self->items[self->size--] = NULL; + *dequeued_element = priority_queue->items[1]; + priority_queue->items[1] = priority_queue->items[priority_queue->size]; + priority_queue->items[priority_queue->size--] = NULL; - priority_queue_percolate_down(self, 1); + priority_queue_percolate_down(priority_queue, 1); return_code = 0; done: @@ -232,19 +240,20 @@ err_enoent: } /** - * @param self - the priority queue we want to add to + * @param priority_queue - the priority queue we want to add to * @param dequeued_element a pointer to set to the dequeued element * @param target_deadline the deadline that the request must be earlier than in order to dequeue * @returns RC 0 if successfully set dequeued_element, -ENOENT if empty or if none meet target_deadline */ static inline int -priority_queue_dequeue_if_earlier(struct priority_queue *self, void **dequeued_element, uint64_t target_deadline) +priority_queue_dequeue_if_earlier(struct priority_queue *priority_queue, void **dequeued_element, + uint64_t target_deadline) { int return_code; - LOCK_LOCK(&self->lock); - return_code = priority_queue_dequeue_if_earlier_nolock(self, dequeued_element, target_deadline); - LOCK_UNLOCK(&self->lock); + LOCK_LOCK(&priority_queue->lock); + return_code = priority_queue_dequeue_if_earlier_nolock(priority_queue, dequeued_element, target_deadline); + LOCK_UNLOCK(&priority_queue->lock); return return_code; } @@ -264,79 +273,79 @@ priority_queue_initialize(size_t capacity, bool use_lock, priority_queue_get_pri /* Add one to capacity because this data structure ignores the element at 0 */ size_t one_based_capacity = capacity + 1; - struct priority_queue *self = (struct priority_queue *)calloc(sizeof(struct priority_queue) - + sizeof(void *) * one_based_capacity, - 1); + struct priority_queue *priority_queue = (struct priority_queue *)calloc(sizeof(struct priority_queue) + + sizeof(void *) * one_based_capacity, + 1); /* We're assuming a min-heap implementation, so set to larget possible value */ - priority_queue_update_highest_priority(self, ULONG_MAX); - self->size = 0; - self->capacity = one_based_capacity; // Add one because we skip element 0 - self->get_priority_fn = get_priority_fn; - self->use_lock = use_lock; + priority_queue_update_highest_priority(priority_queue, ULONG_MAX); + priority_queue->size = 0; + priority_queue->capacity = one_based_capacity; // Add one because we skip element 0 + priority_queue->get_priority_fn = get_priority_fn; + priority_queue->use_lock = use_lock; - if (use_lock) LOCK_INIT(&self->lock); + if (use_lock) LOCK_INIT(&priority_queue->lock); - return self; + return priority_queue; } /** * Free the Priority Queue Data structure - * @param self the priority_queue to initialize + * @param priority_queue the priority_queue to initialize */ static inline void -priority_queue_free(struct priority_queue *self) +priority_queue_free(struct priority_queue *priority_queue) { - assert(self != NULL); + assert(priority_queue != NULL); - free(self); + free(priority_queue); } /** - * @param self the priority_queue + * @param priority_queue the priority_queue * @returns the number of elements in the priority queue */ static inline int -priority_queue_length_nolock(struct priority_queue *self) +priority_queue_length_nolock(struct priority_queue *priority_queue) { - assert(self != NULL); + assert(priority_queue != NULL); assert(!listener_thread_is_running()); - assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); + assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock)); - return self->size; + return priority_queue->size; } /** - * @param self the priority_queue + * @param priority_queue the priority_queue * @returns the number of elements in the priority queue */ static inline int -priority_queue_length(struct priority_queue *self) +priority_queue_length(struct priority_queue *priority_queue) { - LOCK_LOCK(&self->lock); - int size = priority_queue_length_nolock(self); - LOCK_UNLOCK(&self->lock); + LOCK_LOCK(&priority_queue->lock); + int size = priority_queue_length_nolock(priority_queue); + LOCK_UNLOCK(&priority_queue->lock); return size; } /** - * @param self - the priority queue we want to add to + * @param priority_queue - the priority queue we want to add to * @param value - the value we want to add * @returns 0 on success. -ENOSPC on full. */ static inline int -priority_queue_enqueue_nolock(struct priority_queue *self, void *value) +priority_queue_enqueue_nolock(struct priority_queue *priority_queue, void *value) { - assert(self != NULL); + assert(priority_queue != NULL); assert(value != NULL); - assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); + assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock)); int rc; - if (unlikely(priority_queue_append(self, value) == -ENOSPC)) goto err_enospc; + if (unlikely(priority_queue_append(priority_queue, value) == -ENOSPC)) goto err_enospc; - priority_queue_percolate_up(self); + priority_queue_percolate_up(priority_queue); rc = 0; done: @@ -347,40 +356,40 @@ err_enospc: } /** - * @param self - the priority queue we want to add to + * @param priority_queue - the priority queue we want to add to * @param value - the value we want to add * @returns 0 on success. -ENOSPC on full. */ static inline int -priority_queue_enqueue(struct priority_queue *self, void *value) +priority_queue_enqueue(struct priority_queue *priority_queue, void *value) { int rc; - LOCK_LOCK(&self->lock); - rc = priority_queue_enqueue_nolock(self, value); - LOCK_UNLOCK(&self->lock); + LOCK_LOCK(&priority_queue->lock); + rc = priority_queue_enqueue_nolock(priority_queue, value); + LOCK_UNLOCK(&priority_queue->lock); return rc; } /** - * @param self - the priority queue we want to delete from + * @param priority_queue - the priority queue we want to delete from * @param value - the value we want to delete * @returns 0 on success. -1 on not found */ static inline int -priority_queue_delete_nolock(struct priority_queue *self, void *value) +priority_queue_delete_nolock(struct priority_queue *priority_queue, void *value) { - assert(self != NULL); + assert(priority_queue != NULL); assert(value != NULL); assert(!listener_thread_is_running()); - assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); + assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock)); - for (int i = 1; i <= self->size; i++) { - if (self->items[i] == value) { - self->items[i] = self->items[self->size]; - self->items[self->size--] = NULL; - priority_queue_percolate_down(self, i); + for (int i = 1; i <= priority_queue->size; i++) { + if (priority_queue->items[i] == value) { + priority_queue->items[i] = priority_queue->items[priority_queue->size]; + priority_queue->items[priority_queue->size--] = NULL; + priority_queue_percolate_down(priority_queue, i); return 0; } } @@ -389,64 +398,64 @@ priority_queue_delete_nolock(struct priority_queue *self, void *value) } /** - * @param self - the priority queue we want to delete from + * @param priority_queue - the priority queue we want to delete from * @param value - the value we want to delete * @returns 0 on success. -1 on not found */ static inline int -priority_queue_delete(struct priority_queue *self, void *value) +priority_queue_delete(struct priority_queue *priority_queue, void *value) { int rc; - LOCK_LOCK(&self->lock); - rc = priority_queue_delete_nolock(self, value); - LOCK_UNLOCK(&self->lock); + LOCK_LOCK(&priority_queue->lock); + rc = priority_queue_delete_nolock(priority_queue, value); + LOCK_UNLOCK(&priority_queue->lock); return rc; } /** - * @param self - the priority queue we want to add to + * @param priority_queue - the priority queue we want to add to * @param dequeued_element a pointer to set to the dequeued element * @returns RC 0 if successfully set dequeued_element, -ENOENT if empty */ static inline int -priority_queue_dequeue(struct priority_queue *self, void **dequeued_element) +priority_queue_dequeue(struct priority_queue *priority_queue, void **dequeued_element) { - return priority_queue_dequeue_if_earlier(self, dequeued_element, UINT64_MAX); + return priority_queue_dequeue_if_earlier(priority_queue, dequeued_element, UINT64_MAX); } /** - * @param self - the priority queue we want to add to + * @param priority_queue - the priority queue we want to add to * @param dequeued_element a pointer to set to the dequeued element * @returns RC 0 if successfully set dequeued_element, -ENOENT if empty */ static inline int -priority_queue_dequeue_nolock(struct priority_queue *self, void **dequeued_element) +priority_queue_dequeue_nolock(struct priority_queue *priority_queue, void **dequeued_element) { - return priority_queue_dequeue_if_earlier_nolock(self, dequeued_element, UINT64_MAX); + return priority_queue_dequeue_if_earlier_nolock(priority_queue, dequeued_element, UINT64_MAX); } /** * Returns the top of the priority queue without removing it - * @param self - the priority queue we want to add to + * @param priority_queue - the priority queue we want to add to * @param dequeued_element a pointer to set to the top element * @returns RC 0 if successfully set dequeued_element, -ENOENT if empty */ static inline int -priority_queue_top_nolock(struct priority_queue *self, void **dequeued_element) +priority_queue_top_nolock(struct priority_queue *priority_queue, void **dequeued_element) { - assert(self != NULL); + assert(priority_queue != NULL); assert(dequeued_element != NULL); - assert(self->get_priority_fn != NULL); + assert(priority_queue->get_priority_fn != NULL); assert(!listener_thread_is_running()); - assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); + assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock)); int return_code; - if (priority_queue_is_empty(self)) goto err_enoent; + if (priority_queue_is_empty(priority_queue)) goto err_enoent; - *dequeued_element = self->items[1]; + *dequeued_element = priority_queue->items[1]; return_code = 0; done: @@ -458,18 +467,18 @@ err_enoent: /** * Returns the top of the priority queue without removing it - * @param self - the priority queue we want to add to + * @param priority_queue - the priority queue we want to add to * @param dequeued_element a pointer to set to the top element * @returns RC 0 if successfully set dequeued_element, -ENOENT if empty */ static inline int -priority_queue_top(struct priority_queue *self, void **dequeued_element) +priority_queue_top(struct priority_queue *priority_queue, void **dequeued_element) { int return_code; - LOCK_LOCK(&self->lock); - return_code = priority_queue_top_nolock(self, dequeued_element); - LOCK_UNLOCK(&self->lock); + LOCK_LOCK(&priority_queue->lock); + return_code = priority_queue_top_nolock(priority_queue, dequeued_element); + LOCK_UNLOCK(&priority_queue->lock); return return_code; } diff --git a/runtime/include/sandbox_state_history.h b/runtime/include/sandbox_state_history.h index fa2e1d3..d1d61b9 100644 --- a/runtime/include/sandbox_state_history.h +++ b/runtime/include/sandbox_state_history.h @@ -15,18 +15,20 @@ struct sandbox_state_history { }; static inline void -sandbox_state_history_init(struct sandbox_state_history *self) +sandbox_state_history_init(struct sandbox_state_history *sandbox_state_history) { #ifdef LOG_STATE_CHANGES - memset(self, 0, + memset(sandbox_state_history, 0, sizeof(struct sandbox_state_history) + SANDBOX_STATE_HISTORY_CAPACITY * sizeof(sandbox_state_t)); #endif } static inline void -sandbox_state_history_append(struct sandbox_state_history *self, sandbox_state_t state) +sandbox_state_history_append(struct sandbox_state_history *sandbox_state_history, sandbox_state_t state) { #ifdef LOG_STATE_CHANGES - if (likely(self->size < SANDBOX_STATE_HISTORY_CAPACITY)) { self->buffer[self->size++] = state; } + if (likely(sandbox_state_history->size < SANDBOX_STATE_HISTORY_CAPACITY)) { + sandbox_state_history->buffer[sandbox_state_history->size++] = state; + } #endif } diff --git a/runtime/include/vec_u8.h b/runtime/include/vec_u8.h index 39b8f54..681e53a 100644 --- a/runtime/include/vec_u8.h +++ b/runtime/include/vec_u8.h @@ -10,11 +10,11 @@ struct vec_u8 { }; static inline struct vec_u8 *vec_u8_alloc(void); -static inline int vec_u8_init(struct vec_u8 *self, size_t capacity); +static inline int vec_u8_init(struct vec_u8 *vec_u8, size_t capacity); static inline struct vec_u8 *vec_u8_new(size_t capacity); -static inline void vec_u8_deinit(struct vec_u8 *self); -static inline void vec_u8_free(struct vec_u8 *self); -static inline void vec_u8_delete(struct vec_u8 *self); +static inline void vec_u8_deinit(struct vec_u8 *vec_u8); +static inline void vec_u8_free(struct vec_u8 *vec_u8); +static inline void vec_u8_delete(struct vec_u8 *vec_u8); /** * Allocates an uninitialized vec on the heap' @@ -28,22 +28,22 @@ vec_u8_alloc(void) /** * Initializes a vec, allocating a backing buffer for the provided capcity - * @param self pointer to an uninitialized vec + * @param vec_u8 pointer to an uninitialized vec * @param capacity * @returns 0 on success, -1 on failure */ static inline int -vec_u8_init(struct vec_u8 *self, size_t capacity) +vec_u8_init(struct vec_u8 *vec_u8, size_t capacity) { if (capacity == 0) { - self->buffer = NULL; + vec_u8->buffer = NULL; } else { - self->buffer = calloc(capacity, sizeof(uint8_t)); - if (self->buffer == NULL) return -1; + vec_u8->buffer = calloc(capacity, sizeof(uint8_t)); + if (vec_u8->buffer == NULL) return -1; } - self->length = 0; - self->capacity = capacity; + vec_u8->length = 0; + vec_u8->capacity = capacity; return 0; } @@ -56,36 +56,36 @@ vec_u8_init(struct vec_u8 *self, size_t capacity) static inline struct vec_u8 * vec_u8_new(size_t capacity) { - struct vec_u8 *self = vec_u8_alloc(); - if (self == NULL) return self; + struct vec_u8 *vec_u8 = vec_u8_alloc(); + if (vec_u8 == NULL) return vec_u8; - int rc = vec_u8_init(self, capacity); + int rc = vec_u8_init(vec_u8, capacity); if (rc < 0) { - vec_u8_free(self); + vec_u8_free(vec_u8); return NULL; } - return self; + return vec_u8; } /** * Deinitialize a vec, clearing out members and releasing the backing buffer - * @param self + * @param vec_u8 */ static inline void -vec_u8_deinit(struct vec_u8 *self) +vec_u8_deinit(struct vec_u8 *vec_u8) { - if (self->capacity == 0) { - assert(self->buffer == NULL); - assert(self->length == 0); + if (vec_u8->capacity == 0) { + assert(vec_u8->buffer == NULL); + assert(vec_u8->length == 0); return; } - assert(self->buffer != NULL); - free(self->buffer); - self->buffer = NULL; - self->length = 0; - self->capacity = 0; + assert(vec_u8->buffer != NULL); + free(vec_u8->buffer); + vec_u8->buffer = NULL; + vec_u8->length = 0; + vec_u8->capacity = 0; } /** @@ -93,21 +93,21 @@ vec_u8_deinit(struct vec_u8 *self) * Assumes that the vec has already been deinitialized */ static inline void -vec_u8_free(struct vec_u8 *self) +vec_u8_free(struct vec_u8 *vec_u8) { - assert(self->buffer == NULL); - assert(self->length == 0); - assert(self->capacity == 0); - free(self); + assert(vec_u8->buffer == NULL); + assert(vec_u8->length == 0); + assert(vec_u8->capacity == 0); + free(vec_u8); } /** * Deinitializes and frees a vec allocated to the heap - * @param self + * @param vec_u8 */ static inline void -vec_u8_delete(struct vec_u8 *self) +vec_u8_delete(struct vec_u8 *vec_u8) { - vec_u8_deinit(self); - vec_u8_free(self); + vec_u8_deinit(vec_u8); + vec_u8_free(vec_u8); } diff --git a/runtime/include/wasm_memory.h b/runtime/include/wasm_memory.h index 5045aec..438d561 100644 --- a/runtime/include/wasm_memory.h +++ b/runtime/include/wasm_memory.h @@ -23,11 +23,11 @@ struct wasm_memory { }; static INLINE struct wasm_memory *wasm_memory_alloc(void); -static INLINE int wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max); +static INLINE int wasm_memory_init(struct wasm_memory *wasm_memory, size_t initial, size_t max); static INLINE struct wasm_memory *wasm_memory_new(size_t initial, size_t max); -static INLINE void wasm_memory_deinit(struct wasm_memory *self); -static INLINE void wasm_memory_free(struct wasm_memory *self); -static INLINE void wasm_memory_delete(struct wasm_memory *self); +static INLINE void wasm_memory_deinit(struct wasm_memory *wasm_memory); +static INLINE void wasm_memory_free(struct wasm_memory *wasm_memory); +static INLINE void wasm_memory_delete(struct wasm_memory *wasm_memory); static INLINE struct wasm_memory * @@ -37,9 +37,9 @@ wasm_memory_alloc(void) } static INLINE int -wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max) +wasm_memory_init(struct wasm_memory *wasm_memory, size_t initial, size_t max) { - assert(self != NULL); + assert(wasm_memory != NULL); /* We assume WASI modules, which are required to declare and export a linear memory with a non-zero size to * allow a standard lib to initialize. Technically, a WebAssembly module that exports pure functions may not use @@ -50,20 +50,20 @@ wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max) assert(max <= (size_t)UINT32_MAX + 1); /* Allocate buffer of contiguous virtual addresses for full wasm32 linear memory and guard page */ - self->buffer = mmap(NULL, WASM_MEMORY_SIZE_TO_ALLOC, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (self->buffer == MAP_FAILED) return -1; + wasm_memory->buffer = mmap(NULL, WASM_MEMORY_SIZE_TO_ALLOC, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (wasm_memory->buffer == MAP_FAILED) return -1; /* Set the initial bytes to read / write */ - int rc = mprotect(self->buffer, initial, PROT_READ | PROT_WRITE); + int rc = mprotect(wasm_memory->buffer, initial, PROT_READ | PROT_WRITE); if (rc != 0) { - munmap(self->buffer, WASM_MEMORY_SIZE_TO_ALLOC); + munmap(wasm_memory->buffer, WASM_MEMORY_SIZE_TO_ALLOC); return -1; } - ps_list_init_d(self); - self->size = initial; - self->capacity = initial; - self->max = max; + ps_list_init_d(wasm_memory); + wasm_memory->size = initial; + wasm_memory->capacity = initial; + wasm_memory->max = max; return 0; } @@ -71,70 +71,70 @@ wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max) static INLINE struct wasm_memory * wasm_memory_new(size_t initial, size_t max) { - struct wasm_memory *self = wasm_memory_alloc(); - if (self == NULL) return self; + struct wasm_memory *wasm_memory = wasm_memory_alloc(); + if (wasm_memory == NULL) return wasm_memory; - int rc = wasm_memory_init(self, initial, max); + int rc = wasm_memory_init(wasm_memory, initial, max); if (rc < 0) { assert(0); - wasm_memory_free(self); + wasm_memory_free(wasm_memory); return NULL; } - return self; + return wasm_memory; } static INLINE void -wasm_memory_deinit(struct wasm_memory *self) +wasm_memory_deinit(struct wasm_memory *wasm_memory) { - assert(self != NULL); - assert(self->buffer != NULL); - - munmap(self->buffer, WASM_MEMORY_SIZE_TO_ALLOC); - self->buffer = NULL; - self->size = 0; - self->capacity = 0; - self->max = 0; + assert(wasm_memory != NULL); + assert(wasm_memory->buffer != NULL); + + munmap(wasm_memory->buffer, WASM_MEMORY_SIZE_TO_ALLOC); + wasm_memory->buffer = NULL; + wasm_memory->size = 0; + wasm_memory->capacity = 0; + wasm_memory->max = 0; } static INLINE void -wasm_memory_free(struct wasm_memory *self) +wasm_memory_free(struct wasm_memory *wasm_memory) { - assert(self != NULL); + assert(wasm_memory != NULL); /* Assume prior deinitialization so we don't leak buffers */ - assert(self->buffer == NULL); + assert(wasm_memory->buffer == NULL); - free(self); + free(wasm_memory); } static INLINE void -wasm_memory_delete(struct wasm_memory *self) +wasm_memory_delete(struct wasm_memory *wasm_memory) { - assert(self != NULL); + assert(wasm_memory != NULL); - wasm_memory_deinit(self); - wasm_memory_free(self); + wasm_memory_deinit(wasm_memory); + wasm_memory_free(wasm_memory); } static INLINE void -wasm_memory_wipe(struct wasm_memory *self) +wasm_memory_wipe(struct wasm_memory *wasm_memory) { - memset(self->buffer, 0, self->size); + memset(wasm_memory->buffer, 0, wasm_memory->size); } static INLINE void -wasm_memory_reinit(struct wasm_memory *self, size_t initial) +wasm_memory_reinit(struct wasm_memory *wasm_memory, size_t initial) { - wasm_memory_wipe(self); - self->size = initial; + wasm_memory_wipe(wasm_memory); + wasm_memory->size = initial; } static INLINE int -wasm_memory_expand(struct wasm_memory *self, size_t size_to_expand) +wasm_memory_expand(struct wasm_memory *wasm_memory, size_t size_to_expand) { - size_t target_size = self->size + size_to_expand; - if (unlikely(target_size > self->max)) { - fprintf(stderr, "wasm_memory_expand - Out of Memory!. %lu out of %lu\n", self->size, self->max); + size_t target_size = wasm_memory->size + size_to_expand; + if (unlikely(target_size > wasm_memory->max)) { + fprintf(stderr, "wasm_memory_expand - Out of Memory!. %lu out of %lu\n", wasm_memory->size, wasm_memory->max); return -1; } @@ -143,37 +143,37 @@ wasm_memory_expand(struct wasm_memory *self, size_t size_to_expand) * size is less than this "high water mark," we just need to update size for accounting purposes. Otherwise, we * need to actually issue an mprotect syscall. The goal of these optimizations is to reduce mmap and demand * paging overhead for repeated instantiations of a WebAssembly module. */ - if (target_size > self->capacity) { - int rc = mprotect(self->buffer, target_size, PROT_READ | PROT_WRITE); + if (target_size > wasm_memory->capacity) { + int rc = mprotect(wasm_memory->buffer, target_size, PROT_READ | PROT_WRITE); if (rc != 0) { perror("wasm_memory_expand mprotect"); return -1; } - self->capacity = target_size; + wasm_memory->capacity = target_size; } - self->size = target_size; + wasm_memory->size = target_size; return 0; } static INLINE void -wasm_memory_set_size(struct wasm_memory *self, size_t size) +wasm_memory_set_size(struct wasm_memory *wasm_memory, size_t size) { - self->size = size; + wasm_memory->size = size; } static INLINE size_t -wasm_memory_get_size(struct wasm_memory *self) +wasm_memory_get_size(struct wasm_memory *wasm_memory) { - return self->size; + return wasm_memory->size; } static INLINE void -wasm_memory_initialize_region(struct wasm_memory *self, uint32_t offset, uint32_t region_size, uint8_t region[]) +wasm_memory_initialize_region(struct wasm_memory *wasm_memory, uint32_t offset, uint32_t region_size, uint8_t region[]) { - assert((size_t)offset + region_size <= self->size); - memcpy(&self->buffer[offset], region, region_size); + assert((size_t)offset + region_size <= wasm_memory->size); + memcpy(&wasm_memory->buffer[offset], region, region_size); } /* NOTE: These wasm_memory functions require pointer dereferencing. For this reason, they are not directly by wasm32 @@ -186,10 +186,10 @@ wasm_memory_initialize_region(struct wasm_memory *self, uint32_t offset, uint32_ * @return void pointer to something in WebAssembly linear memory */ static INLINE void * -wasm_memory_get_ptr_void(struct wasm_memory *self, uint32_t offset, uint32_t size) +wasm_memory_get_ptr_void(struct wasm_memory *wasm_memory, uint32_t offset, uint32_t size) { - assert(offset + size <= self->size); - return (void *)&self->buffer[offset]; + assert(offset + size <= wasm_memory->size); + return (void *)&wasm_memory->buffer[offset]; } /** @@ -198,10 +198,10 @@ wasm_memory_get_ptr_void(struct wasm_memory *self, uint32_t offset, uint32_t siz * @return char at the offset */ static INLINE char -wasm_memory_get_char(struct wasm_memory *self, uint32_t offset) +wasm_memory_get_char(struct wasm_memory *wasm_memory, uint32_t offset) { - assert(offset + sizeof(char) <= self->size); - return *(char *)&self->buffer[offset]; + assert(offset + sizeof(char) <= wasm_memory->size); + return *(char *)&wasm_memory->buffer[offset]; } /** @@ -210,10 +210,10 @@ wasm_memory_get_char(struct wasm_memory *self, uint32_t offset) * @return float at the offset */ static INLINE float -wasm_memory_get_f32(struct wasm_memory *self, uint32_t offset) +wasm_memory_get_f32(struct wasm_memory *wasm_memory, uint32_t offset) { - assert(offset + sizeof(float) <= self->size); - return *(float *)&self->buffer[offset]; + assert(offset + sizeof(float) <= wasm_memory->size); + return *(float *)&wasm_memory->buffer[offset]; } /** @@ -222,10 +222,10 @@ wasm_memory_get_f32(struct wasm_memory *self, uint32_t offset) * @return double at the offset */ static INLINE double -wasm_memory_get_f64(struct wasm_memory *self, uint32_t offset) +wasm_memory_get_f64(struct wasm_memory *wasm_memory, uint32_t offset) { - assert(offset + sizeof(double) <= self->size); - return *(double *)&self->buffer[offset]; + assert(offset + sizeof(double) <= wasm_memory->size); + return *(double *)&wasm_memory->buffer[offset]; } /** @@ -234,10 +234,10 @@ wasm_memory_get_f64(struct wasm_memory *self, uint32_t offset) * @return int8_t at the offset */ static INLINE int8_t -wasm_memory_get_i8(struct wasm_memory *self, uint32_t offset) +wasm_memory_get_i8(struct wasm_memory *wasm_memory, uint32_t offset) { - assert(offset + sizeof(int8_t) <= self->size); - return *(int8_t *)&self->buffer[offset]; + assert(offset + sizeof(int8_t) <= wasm_memory->size); + return *(int8_t *)&wasm_memory->buffer[offset]; } /** @@ -246,10 +246,10 @@ wasm_memory_get_i8(struct wasm_memory *self, uint32_t offset) * @return int16_t at the offset */ static INLINE int16_t -wasm_memory_get_i16(struct wasm_memory *self, uint32_t offset) +wasm_memory_get_i16(struct wasm_memory *wasm_memory, uint32_t offset) { - assert(offset + sizeof(int16_t) <= self->size); - return *(int16_t *)&self->buffer[offset]; + assert(offset + sizeof(int16_t) <= wasm_memory->size); + return *(int16_t *)&wasm_memory->buffer[offset]; } /** @@ -258,10 +258,10 @@ wasm_memory_get_i16(struct wasm_memory *self, uint32_t offset) * @return int32_t at the offset */ static INLINE int32_t -wasm_memory_get_i32(struct wasm_memory *self, uint32_t offset) +wasm_memory_get_i32(struct wasm_memory *wasm_memory, uint32_t offset) { - assert(offset + sizeof(int32_t) <= self->size); - return *(int32_t *)&self->buffer[offset]; + assert(offset + sizeof(int32_t) <= wasm_memory->size); + return *(int32_t *)&wasm_memory->buffer[offset]; } /** @@ -270,16 +270,16 @@ wasm_memory_get_i32(struct wasm_memory *self, uint32_t offset) * @return int32_t at the offset */ static INLINE int64_t -wasm_memory_get_i64(struct wasm_memory *self, uint32_t offset) +wasm_memory_get_i64(struct wasm_memory *wasm_memory, uint32_t offset) { - assert(offset + sizeof(int64_t) <= self->size); - return *(int64_t *)&self->buffer[offset]; + assert(offset + sizeof(int64_t) <= wasm_memory->size); + return *(int64_t *)&wasm_memory->buffer[offset]; } static INLINE uint32_t -wasm_memory_get_page_count(struct wasm_memory *self) +wasm_memory_get_page_count(struct wasm_memory *wasm_memory) { - return (uint32_t)(self->size / WASM_PAGE_SIZE); + return (uint32_t)(wasm_memory->size / WASM_PAGE_SIZE); } /** @@ -289,12 +289,12 @@ wasm_memory_get_page_count(struct wasm_memory *self) * @return pointer to the string or NULL if max_length is reached without finding null-terminator */ static INLINE char * -wasm_memory_get_string(struct wasm_memory *self, uint32_t offset, uint32_t size) +wasm_memory_get_string(struct wasm_memory *wasm_memory, uint32_t offset, uint32_t size) { - assert(offset + (sizeof(char) * size) <= self->size); + assert(offset + (sizeof(char) * size) <= wasm_memory->size); - if (strnlen((const char *)&self->buffer[offset], size) < size) { - return (char *)&self->buffer[offset]; + if (strnlen((const char *)&wasm_memory->buffer[offset], size) < size) { + return (char *)&wasm_memory->buffer[offset]; } else { return NULL; } @@ -306,10 +306,10 @@ wasm_memory_get_string(struct wasm_memory *self, uint32_t offset, uint32_t size) * @return float at the offset */ static INLINE void -wasm_memory_set_f32(struct wasm_memory *self, uint32_t offset, float value) +wasm_memory_set_f32(struct wasm_memory *wasm_memory, uint32_t offset, float value) { - assert(offset + sizeof(float) <= self->size); - *(float *)&self->buffer[offset] = value; + assert(offset + sizeof(float) <= wasm_memory->size); + *(float *)&wasm_memory->buffer[offset] = value; } /** @@ -318,10 +318,10 @@ wasm_memory_set_f32(struct wasm_memory *self, uint32_t offset, float value) * @return double at the offset */ static INLINE void -wasm_memory_set_f64(struct wasm_memory *self, uint32_t offset, double value) +wasm_memory_set_f64(struct wasm_memory *wasm_memory, uint32_t offset, double value) { - assert(offset + sizeof(double) <= self->size); - *(double *)&self->buffer[offset] = value; + assert(offset + sizeof(double) <= wasm_memory->size); + *(double *)&wasm_memory->buffer[offset] = value; } /** @@ -330,10 +330,10 @@ wasm_memory_set_f64(struct wasm_memory *self, uint32_t offset, double value) * @return int8_t at the offset */ static INLINE void -wasm_memory_set_i8(struct wasm_memory *self, uint32_t offset, int8_t value) +wasm_memory_set_i8(struct wasm_memory *wasm_memory, uint32_t offset, int8_t value) { - assert(offset + sizeof(int8_t) <= self->size); - *(int8_t *)&self->buffer[offset] = value; + assert(offset + sizeof(int8_t) <= wasm_memory->size); + *(int8_t *)&wasm_memory->buffer[offset] = value; } /** @@ -342,10 +342,10 @@ wasm_memory_set_i8(struct wasm_memory *self, uint32_t offset, int8_t value) * @return int16_t at the offset */ static INLINE void -wasm_memory_set_i16(struct wasm_memory *self, uint32_t offset, int16_t value) +wasm_memory_set_i16(struct wasm_memory *wasm_memory, uint32_t offset, int16_t value) { - assert(offset + sizeof(int16_t) <= self->size); - *(int16_t *)&self->buffer[offset] = value; + assert(offset + sizeof(int16_t) <= wasm_memory->size); + *(int16_t *)&wasm_memory->buffer[offset] = value; } /** @@ -354,10 +354,10 @@ wasm_memory_set_i16(struct wasm_memory *self, uint32_t offset, int16_t value) * @return int32_t at the offset */ static INLINE void -wasm_memory_set_i32(struct wasm_memory *self, uint32_t offset, int32_t value) +wasm_memory_set_i32(struct wasm_memory *wasm_memory, uint32_t offset, int32_t value) { - assert(offset + sizeof(int32_t) <= self->size); - *(int32_t *)&self->buffer[offset] = value; + assert(offset + sizeof(int32_t) <= wasm_memory->size); + *(int32_t *)&wasm_memory->buffer[offset] = value; } /** @@ -366,8 +366,8 @@ wasm_memory_set_i32(struct wasm_memory *self, uint32_t offset, int32_t value) * @return int64_t at the offset */ static INLINE void -wasm_memory_set_i64(struct wasm_memory *self, uint64_t offset, int64_t value) +wasm_memory_set_i64(struct wasm_memory *wasm_memory, uint64_t offset, int64_t value) { - assert(offset + sizeof(int64_t) <= self->size); - *(int64_t *)&self->buffer[offset] = value; + assert(offset + sizeof(int64_t) <= wasm_memory->size); + *(int64_t *)&wasm_memory->buffer[offset] = value; } diff --git a/runtime/include/wasm_stack.h b/runtime/include/wasm_stack.h index 4832d24..87deff8 100644 --- a/runtime/include/wasm_stack.h +++ b/runtime/include/wasm_stack.h @@ -29,93 +29,93 @@ wasm_stack_allocate(void) * @returns 0 on success, -1 on error */ static inline int -wasm_stack_init(struct wasm_stack *self, size_t capacity) +wasm_stack_init(struct wasm_stack *wasm_stack, size_t capacity) { - assert(self); + assert(wasm_stack); int rc = 0; - self->buffer = (uint8_t *)mmap(NULL, /* guard page */ PAGE_SIZE + capacity, PROT_NONE, + wasm_stack->buffer = (uint8_t *)mmap(NULL, /* guard page */ PAGE_SIZE + capacity, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (unlikely(self->buffer == MAP_FAILED)) { + if (unlikely(wasm_stack->buffer == MAP_FAILED)) { perror("sandbox allocate stack"); goto err_stack_allocation_failed; } - self->low = (uint8_t *)mmap(self->buffer + /* guard page */ PAGE_SIZE, capacity, PROT_READ | PROT_WRITE, + wasm_stack->low = (uint8_t *)mmap(wasm_stack->buffer + /* guard page */ PAGE_SIZE, capacity, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); - if (unlikely(self->low == MAP_FAILED)) { + if (unlikely(wasm_stack->low == MAP_FAILED)) { perror("sandbox set stack read/write"); goto err_stack_prot_failed; } - ps_list_init_d(self); - self->capacity = capacity; - self->high = self->low + capacity; + ps_list_init_d(wasm_stack); + wasm_stack->capacity = capacity; + wasm_stack->high = wasm_stack->low + capacity; rc = 0; done: return rc; err_stack_prot_failed: - rc = munmap(self->buffer, PAGE_SIZE + capacity); + rc = munmap(wasm_stack->buffer, PAGE_SIZE + capacity); if (rc == -1) perror("munmap"); err_stack_allocation_failed: - self->buffer = NULL; + wasm_stack->buffer = NULL; rc = -1; goto done; } static INLINE void -wasm_stack_free(struct wasm_stack *self) +wasm_stack_free(struct wasm_stack *wasm_stack) { - free(self); + free(wasm_stack); } static struct wasm_stack * wasm_stack_new(size_t capacity) { - struct wasm_stack *self = wasm_stack_allocate(); - int rc = wasm_stack_init(self, capacity); + struct wasm_stack *wasm_stack = wasm_stack_allocate(); + int rc = wasm_stack_init(wasm_stack, capacity); if (rc < 0) { - wasm_stack_free(self); + wasm_stack_free(wasm_stack); return NULL; } - return self; + return wasm_stack; } static inline void -wasm_stack_deinit(struct wasm_stack *self) +wasm_stack_deinit(struct wasm_stack *wasm_stack) { - assert(self != NULL); - assert(self->buffer != NULL); + assert(wasm_stack != NULL); + assert(wasm_stack->buffer != NULL); /* The stack start is the bottom of the usable stack, but we allocated a guard page below this */ - munmap(self->buffer, self->capacity + PAGE_SIZE); - self->buffer = NULL; - self->high = NULL; - self->low = NULL; + munmap(wasm_stack->buffer, wasm_stack->capacity + PAGE_SIZE); + wasm_stack->buffer = NULL; + wasm_stack->high = NULL; + wasm_stack->low = NULL; } static inline void -wasm_stack_delete(struct wasm_stack *self) +wasm_stack_delete(struct wasm_stack *wasm_stack) { - assert(self != NULL); - assert(self->buffer != NULL); - wasm_stack_deinit(self); - wasm_stack_free(self); + assert(wasm_stack != NULL); + assert(wasm_stack->buffer != NULL); + wasm_stack_deinit(wasm_stack); + wasm_stack_free(wasm_stack); } static inline void -wasm_stack_reinit(struct wasm_stack *self) +wasm_stack_reinit(struct wasm_stack *wasm_stack) { - assert(self != NULL); - assert(self->buffer != NULL); + assert(wasm_stack != NULL); + assert(wasm_stack->buffer != NULL); - self->low = self->buffer + /* guard page */ PAGE_SIZE; + wasm_stack->low = wasm_stack->buffer + /* guard page */ PAGE_SIZE; - memset(self->low, 0, self->capacity); - ps_list_init_d(self); - self->high = self->low + self->capacity; + memset(wasm_stack->low, 0, wasm_stack->capacity); + ps_list_init_d(wasm_stack); + wasm_stack->high = wasm_stack->low + wasm_stack->capacity; } diff --git a/runtime/include/wasm_table.h b/runtime/include/wasm_table.h index 33b0b50..4a3b0ac 100644 --- a/runtime/include/wasm_table.h +++ b/runtime/include/wasm_table.h @@ -21,11 +21,11 @@ struct wasm_table { }; static INLINE struct wasm_table *wasm_table_alloc(void); -static INLINE int wasm_table_init(struct wasm_table *self, size_t capacity); +static INLINE int wasm_table_init(struct wasm_table *wasm_table, size_t capacity); static INLINE struct wasm_table *wasm_table_new(size_t capacity); -static INLINE void wasm_table_deinit(struct wasm_table *self); -static INLINE void wasm_table_free(struct wasm_table *self); -static INLINE void wasm_table_delete(struct wasm_table *self); +static INLINE void wasm_table_deinit(struct wasm_table *wasm_table); +static INLINE void wasm_table_free(struct wasm_table *wasm_table); +static INLINE void wasm_table_delete(struct wasm_table *wasm_table); static INLINE struct wasm_table * wasm_table_alloc(void) @@ -34,17 +34,17 @@ wasm_table_alloc(void) } static INLINE int -wasm_table_init(struct wasm_table *self, size_t capacity) +wasm_table_init(struct wasm_table *wasm_table, size_t capacity) { - assert(self != NULL); + assert(wasm_table != NULL); if (capacity > 0) { - self->buffer = calloc(capacity, sizeof(struct wasm_table_entry)); - if (self->buffer == NULL) return -1; + wasm_table->buffer = calloc(capacity, sizeof(struct wasm_table_entry)); + if (wasm_table->buffer == NULL) return -1; } - self->capacity = capacity; - self->length = 0; + wasm_table->capacity = capacity; + wasm_table->length = 0; return 0; } @@ -52,50 +52,50 @@ wasm_table_init(struct wasm_table *self, size_t capacity) static INLINE struct wasm_table * wasm_table_new(size_t capacity) { - struct wasm_table *self = wasm_table_alloc(); - if (self == NULL) return NULL; + struct wasm_table *wasm_table = wasm_table_alloc(); + if (wasm_table == NULL) return NULL; - int rc = wasm_table_init(self, capacity); + int rc = wasm_table_init(wasm_table, capacity); if (rc < 0) { - wasm_table_free(self); + wasm_table_free(wasm_table); return NULL; } - return self; + return wasm_table; } static INLINE void -wasm_table_deinit(struct wasm_table *self) +wasm_table_deinit(struct wasm_table *wasm_table) { - assert(self != NULL); + assert(wasm_table != NULL); - if (self->capacity > 0) { - assert(self->buffer == NULL); - assert(self->length == 0); + if (wasm_table->capacity > 0) { + assert(wasm_table->buffer == NULL); + assert(wasm_table->length == 0); return; } - assert(self->buffer != NULL); - free(self->buffer); - self->buffer = NULL; - self->length = 0; - self->capacity = 0; + assert(wasm_table->buffer != NULL); + free(wasm_table->buffer); + wasm_table->buffer = NULL; + wasm_table->length = 0; + wasm_table->capacity = 0; } static INLINE void -wasm_table_free(struct wasm_table *self) +wasm_table_free(struct wasm_table *wasm_table) { - assert(self != NULL); - free(self); + assert(wasm_table != NULL); + free(wasm_table); } static INLINE void * -wasm_table_get(struct wasm_table *self, uint32_t idx, uint32_t type_id) +wasm_table_get(struct wasm_table *wasm_table, uint32_t idx, uint32_t type_id) { - assert(self != NULL); - assert(idx < self->capacity); + assert(wasm_table != NULL); + assert(idx < wasm_table->capacity); - struct wasm_table_entry f = self->buffer[idx]; + struct wasm_table_entry f = wasm_table->buffer[idx]; // FIXME: Commented out function type check because of gocr // assert(f.type_id == type_id); @@ -105,14 +105,14 @@ wasm_table_get(struct wasm_table *self, uint32_t idx, uint32_t type_id) } static INLINE void -wasm_table_set(struct wasm_table *self, uint32_t idx, uint32_t type_id, char *pointer) +wasm_table_set(struct wasm_table *wasm_table, uint32_t idx, uint32_t type_id, char *pointer) { - assert(self != NULL); - assert(idx < self->capacity); + assert(wasm_table != NULL); + assert(idx < wasm_table->capacity); assert(pointer != NULL); /* TODO: atomic for multiple concurrent invocations? Issue #97 */ - if (self->buffer[idx].type_id == type_id && self->buffer[idx].func_pointer == pointer) return; + if (wasm_table->buffer[idx].type_id == type_id && wasm_table->buffer[idx].func_pointer == pointer) return; - self->buffer[idx] = (struct wasm_table_entry){ .type_id = type_id, .func_pointer = pointer }; + wasm_table->buffer[idx] = (struct wasm_table_entry){ .type_id = type_id, .func_pointer = pointer }; } diff --git a/runtime/src/admissions_info.c b/runtime/src/admissions_info.c index cee9bf3..4fa6503 100644 --- a/runtime/src/admissions_info.c +++ b/runtime/src/admissions_info.c @@ -3,29 +3,29 @@ /** * Initializes perf window - * @param self + * @param admissions_info */ void -admissions_info_initialize(struct admissions_info *self, int percentile, uint64_t expected_execution, +admissions_info_initialize(struct admissions_info *admissions_info, int percentile, uint64_t expected_execution, uint64_t relative_deadline) { #ifdef ADMISSIONS_CONTROL assert(relative_deadline > 0); assert(expected_execution > 0); - self->relative_deadline = relative_deadline; - self->estimate = admissions_control_calculate_estimate(expected_execution, relative_deadline); - debuglog("Initial Estimate: %lu\n", self->estimate); - assert(self != NULL); + admissions_info->relative_deadline = relative_deadline; + admissions_info->estimate = admissions_control_calculate_estimate(expected_execution, relative_deadline); + debuglog("Initial Estimate: %lu\n", admissions_info->estimate); + assert(admissions_info != NULL); - perf_window_initialize(&self->perf_window); + perf_window_initialize(&admissions_info->perf_window); if (unlikely(percentile < 50 || percentile > 99)) panic("Invalid admissions percentile"); - self->percentile = percentile; + admissions_info->percentile = percentile; - self->control_index = PERF_WINDOW_BUFFER_SIZE * percentile / 100; + admissions_info->control_index = PERF_WINDOW_BUFFER_SIZE * percentile / 100; #ifdef LOG_ADMISSIONS_CONTROL - debuglog("Percentile: %d\n", self->percentile); - debuglog("Control Index: %d\n", self->control_index); + debuglog("Percentile: %d\n", admissions_info->percentile); + debuglog("Control Index: %d\n", admissions_info->control_index); #endif #endif } @@ -33,19 +33,21 @@ admissions_info_initialize(struct admissions_info *self, int percentile, uint64_ /* * Adds an execution value to the perf window and calculates and caches and updated estimate - * @param self + * @param admissions_info * @param execution_duration */ void -admissions_info_update(struct admissions_info *self, uint64_t execution_duration) +admissions_info_update(struct admissions_info *admissions_info, uint64_t execution_duration) { #ifdef ADMISSIONS_CONTROL - struct perf_window *perf_window = &self->perf_window; + struct perf_window *perf_window = &admissions_info->perf_window; - LOCK_LOCK(&self->perf_window.lock); + LOCK_LOCK(&admissions_info->perf_window.lock); perf_window_add(perf_window, execution_duration); - uint64_t estimated_execution = perf_window_get_percentile(perf_window, self->percentile, self->control_index); - self->estimate = admissions_control_calculate_estimate(estimated_execution, self->relative_deadline); - LOCK_UNLOCK(&self->perf_window.lock); + uint64_t estimated_execution = perf_window_get_percentile(perf_window, admissions_info->percentile, + admissions_info->control_index); + admissions_info->estimate = admissions_control_calculate_estimate(estimated_execution, + admissions_info->relative_deadline); + LOCK_UNLOCK(&admissions_info->perf_window.lock); #endif } diff --git a/runtime/src/http_request.c b/runtime/src/http_request.c index 8c9a71b..50bbfdf 100644 --- a/runtime/src/http_request.c +++ b/runtime/src/http_request.c @@ -7,16 +7,20 @@ **************************************************/ void -http_request_print(struct http_request *self) +http_request_print(struct http_request *http_request) { - printf("Header Count %d\n", self->header_count); + printf("Header Count %d\n", http_request->header_count); printf("Header Content:\n"); - for (int i = 0; i < self->header_count; i++) { - for (int j = 0; j < self->headers[i].key_length; j++) { putchar(self->headers[i].key[j]); } + for (int i = 0; i < http_request->header_count; i++) { + for (int j = 0; j < http_request->headers[i].key_length; j++) { + putchar(http_request->headers[i].key[j]); + } putchar(':'); - for (int j = 0; j < self->headers[i].value_length; j++) { putchar(self->headers[i].value[j]); } + for (int j = 0; j < http_request->headers[i].value_length; j++) { + putchar(http_request->headers[i].value[j]); + } putchar('\n'); } - printf("Body Length %d\n", self->body_length); - printf("Body Read Length %d\n", self->body_read_length); + printf("Body Length %d\n", http_request->body_length); + printf("Body Read Length %d\n", http_request->body_read_length); } diff --git a/runtime/src/sandbox.c b/runtime/src/sandbox.c index 61e62cc..1326ec8 100644 --- a/runtime/src/sandbox.c +++ b/runtime/src/sandbox.c @@ -67,15 +67,15 @@ sandbox_free_stack(struct sandbox *sandbox) * @returns 0 on success, -1 on error */ static inline int -sandbox_allocate_http_buffers(struct sandbox *self) +sandbox_allocate_http_buffers(struct sandbox *sandbox) { int rc; - rc = vec_u8_init(&self->request, self->module->max_request_size); + rc = vec_u8_init(&sandbox->request, sandbox->module->max_request_size); if (rc < 0) return -1; - rc = vec_u8_init(&self->response, self->module->max_response_size); + rc = vec_u8_init(&sandbox->response, sandbox->module->max_response_size); if (rc < 0) { - vec_u8_deinit(&self->request); + vec_u8_deinit(&sandbox->request); return -1; } From fdaff6c6662c1e8b2f4c6f43c0195de6e867066d Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Thu, 16 Dec 2021 10:48:06 -0500 Subject: [PATCH 10/15] docs: Add more inline docs about buffers --- runtime/include/perf_window.h | 12 ++++++++---- runtime/include/vec_u8.h | 2 +- runtime/include/wasm_memory.h | 5 +++-- runtime/include/wasm_stack.h | 28 +++++++++++++++++++++------- runtime/include/wasm_table.h | 2 +- 5 files changed, 34 insertions(+), 15 deletions(-) diff --git a/runtime/include/perf_window.h b/runtime/include/perf_window.h index 0c0ec9e..6bc218f 100644 --- a/runtime/include/perf_window.h +++ b/runtime/include/perf_window.h @@ -56,7 +56,7 @@ perf_window_swap(struct perf_window *perf_window, uint16_t first_by_duration_idx perf_window->by_termination[second_by_termination_idx] = first_by_duration_idx; /* Swap by_termination_idx */ - struct execution_node tmp_node = perf_window->by_duration[first_by_duration_idx]; + struct execution_node tmp_node = perf_window->by_duration[first_by_duration_idx]; perf_window->by_duration[first_by_duration_idx] = perf_window->by_duration[second_by_duration_idx]; perf_window->by_duration[second_by_duration_idx] = tmp_node; @@ -91,7 +91,7 @@ perf_window_add(struct perf_window *perf_window, uint64_t value) for (int i = 0; i < PERF_WINDOW_BUFFER_SIZE; i++) { perf_window->by_termination[i] = i; perf_window->by_duration[i] = (struct execution_node){ .execution_time = value, - .by_termination_idx = i }; + .by_termination_idx = i }; } perf_window->count = PERF_WINDOW_BUFFER_SIZE; goto done; @@ -112,13 +112,17 @@ perf_window_add(struct perf_window *perf_window, uint64_t value) } } else { for (int i = idx_of_oldest; - i - 1 >= 0 && perf_window->by_duration[i - 1].execution_time > perf_window->by_duration[i].execution_time; i--) { + i - 1 >= 0 + && perf_window->by_duration[i - 1].execution_time > perf_window->by_duration[i].execution_time; + i--) { perf_window_swap(perf_window, i, i - 1); } } /* The idx that we replaces should still point to the same value */ - assert(perf_window->by_duration[perf_window->by_termination[perf_window->count % PERF_WINDOW_BUFFER_SIZE]].execution_time == value); + assert(perf_window->by_duration[perf_window->by_termination[perf_window->count % PERF_WINDOW_BUFFER_SIZE]] + .execution_time + == value); /* The by_duration array should be ordered by execution time */ #ifndef NDEBUG diff --git a/runtime/include/vec_u8.h b/runtime/include/vec_u8.h index 681e53a..343f54b 100644 --- a/runtime/include/vec_u8.h +++ b/runtime/include/vec_u8.h @@ -6,7 +6,7 @@ struct vec_u8 { size_t length; size_t capacity; - uint8_t *buffer; + uint8_t *buffer; /* Backing heap allocation. Different lifetime because realloc might move this */ }; static inline struct vec_u8 *vec_u8_alloc(void); diff --git a/runtime/include/wasm_memory.h b/runtime/include/wasm_memory.h index 438d561..e47cff7 100644 --- a/runtime/include/wasm_memory.h +++ b/runtime/include/wasm_memory.h @@ -19,7 +19,7 @@ struct wasm_memory { size_t size; /* Initial Size in bytes */ size_t capacity; /* Size backed by actual pages */ size_t max; /* Soft cap in bytes. Defaults to 4GB */ - uint8_t * buffer; + uint8_t * buffer; /* Backing heap allocation. Different lifetime because realloc might move this */ }; static INLINE struct wasm_memory *wasm_memory_alloc(void); @@ -134,7 +134,8 @@ wasm_memory_expand(struct wasm_memory *wasm_memory, size_t size_to_expand) { size_t target_size = wasm_memory->size + size_to_expand; if (unlikely(target_size > wasm_memory->max)) { - fprintf(stderr, "wasm_memory_expand - Out of Memory!. %lu out of %lu\n", wasm_memory->size, wasm_memory->max); + fprintf(stderr, "wasm_memory_expand - Out of Memory!. %lu out of %lu\n", wasm_memory->size, + wasm_memory->max); return -1; } diff --git a/runtime/include/wasm_stack.h b/runtime/include/wasm_stack.h index 87deff8..71c627a 100644 --- a/runtime/include/wasm_stack.h +++ b/runtime/include/wasm_stack.h @@ -7,12 +7,26 @@ #include "sandbox_types.h" #include "types.h" + +/** + * @brief wasm_stack is a stack used to execute an AOT-compiled WebAssembly instance. It is allocated with a static size + * and a guard page beneath the lowest usuable address. Because the stack grows down, this protects against stack + * overflow. + * + * Low Address <---------------------------------------------------------------------------> High Address + * | GUARD PAGE | USEABE FOR STACK FRAMES (SIZE of capacity) | + * /\ /\ /\ + * buffer low high + * + * | Frame 2 | Frame 1 | Frame 0 | + * <<<<<<< Direction of Stack Growth + */ struct wasm_stack { struct ps_list list; /* Linked List Node used for object pool */ size_t capacity; /* Usable capacity. Excludes size of guard page that we need to free */ uint8_t * high; /* The highest address of the stack. Grows down from here */ - uint8_t * low; /* The address of the lowest usabe address. Above guard page */ - uint8_t * buffer; /* Points to Guard Page */ + uint8_t * low; /* The address of the lowest useabe address. Above guard page */ + uint8_t * buffer; /* Points base address of backing heap allocation (Guard Page) */ }; static inline struct wasm_stack * @@ -36,14 +50,14 @@ wasm_stack_init(struct wasm_stack *wasm_stack, size_t capacity) int rc = 0; wasm_stack->buffer = (uint8_t *)mmap(NULL, /* guard page */ PAGE_SIZE + capacity, PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (unlikely(wasm_stack->buffer == MAP_FAILED)) { perror("sandbox allocate stack"); goto err_stack_allocation_failed; } - wasm_stack->low = (uint8_t *)mmap(wasm_stack->buffer + /* guard page */ PAGE_SIZE, capacity, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + wasm_stack->low = (uint8_t *)mmap(wasm_stack->buffer + /* guard page */ PAGE_SIZE, capacity, + PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); if (unlikely(wasm_stack->low == MAP_FAILED)) { perror("sandbox set stack read/write"); goto err_stack_prot_failed; @@ -61,7 +75,7 @@ err_stack_prot_failed: if (rc == -1) perror("munmap"); err_stack_allocation_failed: wasm_stack->buffer = NULL; - rc = -1; + rc = -1; goto done; } @@ -76,7 +90,7 @@ static struct wasm_stack * wasm_stack_new(size_t capacity) { struct wasm_stack *wasm_stack = wasm_stack_allocate(); - int rc = wasm_stack_init(wasm_stack, capacity); + int rc = wasm_stack_init(wasm_stack, capacity); if (rc < 0) { wasm_stack_free(wasm_stack); return NULL; diff --git a/runtime/include/wasm_table.h b/runtime/include/wasm_table.h index 4a3b0ac..64344ff 100644 --- a/runtime/include/wasm_table.h +++ b/runtime/include/wasm_table.h @@ -17,7 +17,7 @@ struct wasm_table_entry { struct wasm_table { uint32_t length; uint32_t capacity; - struct wasm_table_entry *buffer; + struct wasm_table_entry *buffer; /* Backing heap allocation */ }; static INLINE struct wasm_table *wasm_table_alloc(void); From 35b83ba090845df077a52c098029929ea9db63de Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Thu, 16 Dec 2021 13:54:26 -0500 Subject: [PATCH 11/15] refactor: Eliminate _new and _delete functions --- runtime/include/module.h | 55 +++++++-- runtime/include/sandbox_functions.h | 8 +- runtime/include/sandbox_set_as_error.h | 2 +- runtime/include/sandbox_set_as_returned.h | 2 +- runtime/include/vec_u8.h | 35 +----- runtime/include/wasm_memory.h | 86 +++++++------- runtime/include/wasm_stack.h | 26 ++--- runtime/include/wasm_table.h | 14 +-- runtime/src/listener_thread.c | 6 +- runtime/src/main.c | 2 +- runtime/src/module.c | 130 +++++++++++----------- runtime/src/sandbox.c | 56 ++++------ 12 files changed, 200 insertions(+), 222 deletions(-) diff --git a/runtime/include/module.h b/runtime/include/module.h index 2d4afd5..89ae955 100644 --- a/runtime/include/module.h +++ b/runtime/include/module.h @@ -8,12 +8,14 @@ #include "admissions_control.h" #include "admissions_info.h" #include "awsm_abi.h" +#include "current_wasm_module_instance.h" #include "http.h" #include "panic.h" #include "pool.h" #include "types.h" #include "wasm_stack.h" #include "wasm_memory.h" +#include "wasm_table.h" #define MODULE_DEFAULT_REQUEST_RESPONSE_SIZE (PAGE_SIZE) @@ -22,8 +24,8 @@ extern thread_local int worker_thread_idx; -INIT_POOL(wasm_memory, wasm_memory_delete) -INIT_POOL(wasm_stack, wasm_stack_delete) +INIT_POOL(wasm_memory, wasm_memory_free) +INIT_POOL(wasm_stack, wasm_stack_free) /* * Defines the listen backlog, the queue length for completely established socketeds waiting to be accepted @@ -102,13 +104,50 @@ module_initialize_globals(struct module *module) } /** - * Invoke a module's initialize_tables + * @brief Invoke a module's initialize_tables * @param module + * + * Table initialization calls a function that runs within the sandbox. Rather than setting the current sandbox, + * we partially fake this out by only setting the table and then clearing after table + * initialization is complete. + * + * assumption: This approach depends on module_alloc only being invoked at program start before preemption is + * enabled. We are check that current_wasm_module_instance.table is NULL to gain confidence that + * we are not invoking this in a way that clobbers a current module. + * + * If we want to be able to do this later, we can possibly defer module_initialize_table until the first + * invocation. Alternatively, we can maintain the table per sandbox and call initialize + * on each sandbox if this "assumption" is too restrictive and we're ready to pay a per-sandbox performance hit. */ static inline void module_initialize_table(struct module *module) { + assert(current_wasm_module_instance.table == NULL); + current_wasm_module_instance.table = module->indirect_table; module->abi.initialize_tables(); + current_wasm_module_instance.table = NULL; +} + +static inline int +module_alloc_table(struct module *module) +{ + /* WebAssembly Indirect Table */ + /* TODO: Should this be part of the module or per-sandbox? */ + /* TODO: How should this table be sized? */ + module->indirect_table = wasm_table_alloc(INDIRECT_TABLE_SIZE); + if (module->indirect_table == NULL) return -1; + + module_initialize_table(module); + return 0; +} + +static inline void +module_initialize_pools(struct module *module) +{ + for (int i = 0; i < MAX_WORKER_THREADS; i++) { + wasm_memory_pool_init(&module->pools[i].memory, false); + wasm_stack_pool_init(&module->pools[i].stack, false); + } } /** @@ -166,7 +205,7 @@ module_allocate_stack(struct module *module) struct wasm_stack *stack = wasm_stack_pool_remove_nolock(&module->pools[worker_thread_idx].stack); if (stack == NULL) { - stack = wasm_stack_new(module->stack_size); + stack = wasm_stack_alloc(module->stack_size); if (unlikely(stack == NULL)) return NULL; } @@ -193,7 +232,7 @@ module_allocate_linear_memory(struct module *module) struct wasm_memory *linear_memory = wasm_memory_pool_remove_nolock(&module->pools[worker_thread_idx].memory); if (linear_memory == NULL) { - linear_memory = wasm_memory_new(initial, max); + linear_memory = wasm_memory_alloc(initial, max); if (unlikely(linear_memory == NULL)) return NULL; } @@ -212,6 +251,6 @@ module_free_linear_memory(struct module *module, struct wasm_memory *memory) *******************************/ void module_free(struct module *module); -struct module *module_new(char *mod_name, char *mod_path, uint32_t stack_sz, uint32_t relative_deadline_us, int port, - int req_sz, int resp_sz, int admissions_percentile, uint32_t expected_execution_us); -int module_new_from_json(char *filename); +struct module *module_alloc(char *mod_name, char *mod_path, uint32_t stack_sz, uint32_t relative_deadline_us, int port, + int req_sz, int resp_sz, int admissions_percentile, uint32_t expected_execution_us); +int module_alloc_from_json(char *filename); diff --git a/runtime/include/sandbox_functions.h b/runtime/include/sandbox_functions.h index 05726ad..1203b9f 100644 --- a/runtime/include/sandbox_functions.h +++ b/runtime/include/sandbox_functions.h @@ -12,8 +12,8 @@ * Public API * **************************/ -struct sandbox *sandbox_new(struct module *module, int socket_descriptor, const struct sockaddr *socket_address, - uint64_t request_arrival_timestamp, uint64_t admissions_estimate); +struct sandbox *sandbox_alloc(struct module *module, int socket_descriptor, const struct sockaddr *socket_address, + uint64_t request_arrival_timestamp, uint64_t admissions_estimate); int sandbox_prepare_execution_environment(struct sandbox *sandbox); void sandbox_free(struct sandbox *sandbox); void sandbox_main(struct sandbox *sandbox); @@ -43,11 +43,11 @@ sandbox_free_linear_memory(struct sandbox *sandbox) } /** - * Free Linear Memory, leaving stack in place + * Deinitialize Linear Memory, cleaning up the backing buffer * @param sandbox */ static inline void -sandbox_free_http_buffers(struct sandbox *sandbox) +sandbox_deinit_http_buffers(struct sandbox *sandbox) { assert(sandbox); vec_u8_deinit(&sandbox->request); diff --git a/runtime/include/sandbox_set_as_error.h b/runtime/include/sandbox_set_as_error.h index 55bcb4c..4a684f1 100644 --- a/runtime/include/sandbox_set_as_error.h +++ b/runtime/include/sandbox_set_as_error.h @@ -38,7 +38,7 @@ sandbox_set_as_error(struct sandbox *sandbox, sandbox_state_t last_state) case SANDBOX_RUNNING_SYS: { local_runqueue_delete(sandbox); sandbox_free_linear_memory(sandbox); - sandbox_free_http_buffers(sandbox); + sandbox_deinit_http_buffers(sandbox); break; } default: { diff --git a/runtime/include/sandbox_set_as_returned.h b/runtime/include/sandbox_set_as_returned.h index d57801d..83e77dd 100644 --- a/runtime/include/sandbox_set_as_returned.h +++ b/runtime/include/sandbox_set_as_returned.h @@ -33,7 +33,7 @@ sandbox_set_as_returned(struct sandbox *sandbox, sandbox_state_t last_state) sandbox->total_time = now - sandbox->timestamp_of.request_arrival; local_runqueue_delete(sandbox); sandbox_free_linear_memory(sandbox); - sandbox_free_http_buffers(sandbox); + sandbox_deinit_http_buffers(sandbox); break; } default: { diff --git a/runtime/include/vec_u8.h b/runtime/include/vec_u8.h index 343f54b..0ddc46f 100644 --- a/runtime/include/vec_u8.h +++ b/runtime/include/vec_u8.h @@ -9,22 +9,10 @@ struct vec_u8 { uint8_t *buffer; /* Backing heap allocation. Different lifetime because realloc might move this */ }; -static inline struct vec_u8 *vec_u8_alloc(void); static inline int vec_u8_init(struct vec_u8 *vec_u8, size_t capacity); -static inline struct vec_u8 *vec_u8_new(size_t capacity); +static inline struct vec_u8 *vec_u8_alloc(size_t capacity); static inline void vec_u8_deinit(struct vec_u8 *vec_u8); static inline void vec_u8_free(struct vec_u8 *vec_u8); -static inline void vec_u8_delete(struct vec_u8 *vec_u8); - -/** - * Allocates an uninitialized vec on the heap' - * @returns a pointer to an uninitialized vec on the heap - */ -static inline struct vec_u8 * -vec_u8_alloc(void) -{ - return (struct vec_u8 *)calloc(1, sizeof(struct vec_u8)); -} /** * Initializes a vec, allocating a backing buffer for the provided capcity @@ -54,9 +42,9 @@ vec_u8_init(struct vec_u8 *vec_u8, size_t capacity) * @returns a pointer to an initialized vec on the heap, ready for use */ static inline struct vec_u8 * -vec_u8_new(size_t capacity) +vec_u8_alloc(size_t capacity) { - struct vec_u8 *vec_u8 = vec_u8_alloc(); + struct vec_u8 *vec_u8 = (struct vec_u8 *)malloc(sizeof(struct vec_u8)); if (vec_u8 == NULL) return vec_u8; int rc = vec_u8_init(vec_u8, capacity); @@ -88,26 +76,13 @@ vec_u8_deinit(struct vec_u8 *vec_u8) vec_u8->capacity = 0; } -/** - * Frees a vec struct allocated on the heap - * Assumes that the vec has already been deinitialized - */ -static inline void -vec_u8_free(struct vec_u8 *vec_u8) -{ - assert(vec_u8->buffer == NULL); - assert(vec_u8->length == 0); - assert(vec_u8->capacity == 0); - free(vec_u8); -} - /** * Deinitializes and frees a vec allocated to the heap * @param vec_u8 */ static inline void -vec_u8_delete(struct vec_u8 *vec_u8) +vec_u8_free(struct vec_u8 *vec_u8) { vec_u8_deinit(vec_u8); - vec_u8_free(vec_u8); + free(vec_u8); } diff --git a/runtime/include/wasm_memory.h b/runtime/include/wasm_memory.h index e47cff7..054edad 100644 --- a/runtime/include/wasm_memory.h +++ b/runtime/include/wasm_memory.h @@ -22,18 +22,51 @@ struct wasm_memory { uint8_t * buffer; /* Backing heap allocation. Different lifetime because realloc might move this */ }; -static INLINE struct wasm_memory *wasm_memory_alloc(void); +/* Object Lifecycle Functions */ +static INLINE struct wasm_memory *wasm_memory_alloc(size_t initial, size_t max); static INLINE int wasm_memory_init(struct wasm_memory *wasm_memory, size_t initial, size_t max); -static INLINE struct wasm_memory *wasm_memory_new(size_t initial, size_t max); static INLINE void wasm_memory_deinit(struct wasm_memory *wasm_memory); static INLINE void wasm_memory_free(struct wasm_memory *wasm_memory); -static INLINE void wasm_memory_delete(struct wasm_memory *wasm_memory); +static INLINE void wasm_memory_reinit(struct wasm_memory *wasm_memory, size_t initial); +/* Memory Size */ +static INLINE int wasm_memory_expand(struct wasm_memory *wasm_memory, size_t size_to_expand); +static INLINE size_t wasm_memory_get_size(struct wasm_memory *wasm_memory); +static INLINE uint32_t wasm_memory_get_page_count(struct wasm_memory *wasm_memory); + +/* Reading and writing to wasm_memory */ +static INLINE void +wasm_memory_initialize_region(struct wasm_memory *wasm_memory, uint32_t offset, uint32_t region_size, uint8_t region[]); +static INLINE void * wasm_memory_get_ptr_void(struct wasm_memory *wasm_memory, uint32_t offset, uint32_t size); +static INLINE int8_t wasm_memory_get_i8(struct wasm_memory *wasm_memory, uint32_t offset); +static INLINE int16_t wasm_memory_get_i16(struct wasm_memory *wasm_memory, uint32_t offset); +static INLINE int32_t wasm_memory_get_i32(struct wasm_memory *wasm_memory, uint32_t offset); +static INLINE int64_t wasm_memory_get_i64(struct wasm_memory *wasm_memory, uint32_t offset); +static INLINE float wasm_memory_get_f32(struct wasm_memory *wasm_memory, uint32_t offset); +static INLINE double wasm_memory_get_f64(struct wasm_memory *wasm_memory, uint32_t offset); +static INLINE char wasm_memory_get_char(struct wasm_memory *wasm_memory, uint32_t offset); +static INLINE char * wasm_memory_get_string(struct wasm_memory *wasm_memory, uint32_t offset, uint32_t size); +static INLINE void wasm_memory_set_i8(struct wasm_memory *wasm_memory, uint32_t offset, int8_t value); +static INLINE void wasm_memory_set_i16(struct wasm_memory *wasm_memory, uint32_t offset, int16_t value); +static INLINE void wasm_memory_set_i32(struct wasm_memory *wasm_memory, uint32_t offset, int32_t value); +static INLINE void wasm_memory_set_i64(struct wasm_memory *wasm_memory, uint64_t offset, int64_t value); +static INLINE void wasm_memory_set_f32(struct wasm_memory *wasm_memory, uint32_t offset, float value); +static INLINE void wasm_memory_set_f64(struct wasm_memory *wasm_memory, uint32_t offset, double value); static INLINE struct wasm_memory * -wasm_memory_alloc(void) +wasm_memory_alloc(size_t initial, size_t max) { - return malloc(sizeof(struct wasm_memory)); + struct wasm_memory *wasm_memory = malloc(sizeof(struct wasm_memory)); + if (wasm_memory == NULL) return wasm_memory; + + int rc = wasm_memory_init(wasm_memory, initial, max); + if (rc < 0) { + assert(0); + wasm_memory_free(wasm_memory); + return NULL; + } + + return wasm_memory; } static INLINE int @@ -68,22 +101,6 @@ wasm_memory_init(struct wasm_memory *wasm_memory, size_t initial, size_t max) return 0; } -static INLINE struct wasm_memory * -wasm_memory_new(size_t initial, size_t max) -{ - struct wasm_memory *wasm_memory = wasm_memory_alloc(); - if (wasm_memory == NULL) return wasm_memory; - - int rc = wasm_memory_init(wasm_memory, initial, max); - if (rc < 0) { - assert(0); - wasm_memory_free(wasm_memory); - return NULL; - } - - return wasm_memory; -} - static INLINE void wasm_memory_deinit(struct wasm_memory *wasm_memory) { @@ -101,31 +118,14 @@ static INLINE void wasm_memory_free(struct wasm_memory *wasm_memory) { assert(wasm_memory != NULL); - /* Assume prior deinitialization so we don't leak buffers */ - assert(wasm_memory->buffer == NULL); - - free(wasm_memory); -} - -static INLINE void -wasm_memory_delete(struct wasm_memory *wasm_memory) -{ - assert(wasm_memory != NULL); - wasm_memory_deinit(wasm_memory); - wasm_memory_free(wasm_memory); -} - -static INLINE void -wasm_memory_wipe(struct wasm_memory *wasm_memory) -{ - memset(wasm_memory->buffer, 0, wasm_memory->size); + free(wasm_memory); } static INLINE void wasm_memory_reinit(struct wasm_memory *wasm_memory, size_t initial) { - wasm_memory_wipe(wasm_memory); + memset(wasm_memory->buffer, 0, wasm_memory->size); wasm_memory->size = initial; } @@ -158,12 +158,6 @@ wasm_memory_expand(struct wasm_memory *wasm_memory, size_t size_to_expand) return 0; } -static INLINE void -wasm_memory_set_size(struct wasm_memory *wasm_memory, size_t size) -{ - wasm_memory->size = size; -} - static INLINE size_t wasm_memory_get_size(struct wasm_memory *wasm_memory) { diff --git a/runtime/include/wasm_stack.h b/runtime/include/wasm_stack.h index 71c627a..7e79ec5 100644 --- a/runtime/include/wasm_stack.h +++ b/runtime/include/wasm_stack.h @@ -7,7 +7,6 @@ #include "sandbox_types.h" #include "types.h" - /** * @brief wasm_stack is a stack used to execute an AOT-compiled WebAssembly instance. It is allocated with a static size * and a guard page beneath the lowest usuable address. Because the stack grows down, this protects against stack @@ -29,11 +28,11 @@ struct wasm_stack { uint8_t * buffer; /* Points base address of backing heap allocation (Guard Page) */ }; -static inline struct wasm_stack * -wasm_stack_allocate(void) -{ - return calloc(1, sizeof(struct wasm_stack)); -} +static struct wasm_stack *wasm_stack_alloc(size_t capacity); +static inline int wasm_stack_init(struct wasm_stack *wasm_stack, size_t capacity); +static inline void wasm_stack_reinit(struct wasm_stack *wasm_stack); +static inline void wasm_stack_deinit(struct wasm_stack *wasm_stack); +static inline void wasm_stack_free(struct wasm_stack *wasm_stack); /** * Allocates a static sized stack for a sandbox with a guard page underneath @@ -79,17 +78,10 @@ err_stack_allocation_failed: goto done; } -static INLINE void -wasm_stack_free(struct wasm_stack *wasm_stack) -{ - free(wasm_stack); -} - - static struct wasm_stack * -wasm_stack_new(size_t capacity) +wasm_stack_alloc(size_t capacity) { - struct wasm_stack *wasm_stack = wasm_stack_allocate(); + struct wasm_stack *wasm_stack = calloc(1, sizeof(struct wasm_stack)); int rc = wasm_stack_init(wasm_stack, capacity); if (rc < 0) { wasm_stack_free(wasm_stack); @@ -113,12 +105,12 @@ wasm_stack_deinit(struct wasm_stack *wasm_stack) } static inline void -wasm_stack_delete(struct wasm_stack *wasm_stack) +wasm_stack_free(struct wasm_stack *wasm_stack) { assert(wasm_stack != NULL); assert(wasm_stack->buffer != NULL); wasm_stack_deinit(wasm_stack); - wasm_stack_free(wasm_stack); + free(wasm_stack); } static inline void diff --git a/runtime/include/wasm_table.h b/runtime/include/wasm_table.h index 64344ff..ce463aa 100644 --- a/runtime/include/wasm_table.h +++ b/runtime/include/wasm_table.h @@ -20,18 +20,10 @@ struct wasm_table { struct wasm_table_entry *buffer; /* Backing heap allocation */ }; -static INLINE struct wasm_table *wasm_table_alloc(void); static INLINE int wasm_table_init(struct wasm_table *wasm_table, size_t capacity); -static INLINE struct wasm_table *wasm_table_new(size_t capacity); +static INLINE struct wasm_table *wasm_table_alloc(size_t capacity); static INLINE void wasm_table_deinit(struct wasm_table *wasm_table); static INLINE void wasm_table_free(struct wasm_table *wasm_table); -static INLINE void wasm_table_delete(struct wasm_table *wasm_table); - -static INLINE struct wasm_table * -wasm_table_alloc(void) -{ - return (struct wasm_table *)malloc(sizeof(struct wasm_table)); -} static INLINE int wasm_table_init(struct wasm_table *wasm_table, size_t capacity) @@ -50,9 +42,9 @@ wasm_table_init(struct wasm_table *wasm_table, size_t capacity) } static INLINE struct wasm_table * -wasm_table_new(size_t capacity) +wasm_table_alloc(size_t capacity) { - struct wasm_table *wasm_table = wasm_table_alloc(); + struct wasm_table *wasm_table = (struct wasm_table *)malloc(sizeof(struct wasm_table)); if (wasm_table == NULL) return NULL; int rc = wasm_table_init(wasm_table, capacity); diff --git a/runtime/src/listener_thread.c b/runtime/src/listener_thread.c index e47470c..e4b2da6 100644 --- a/runtime/src/listener_thread.c +++ b/runtime/src/listener_thread.c @@ -179,9 +179,9 @@ listener_thread_main(void *dummy) } /* Allocate a Sandbox */ - struct sandbox *sandbox = sandbox_new(module, client_socket, - (const struct sockaddr *)&client_address, - request_arrival_timestamp, work_admitted); + struct sandbox *sandbox = sandbox_alloc(module, client_socket, + (const struct sockaddr *)&client_address, + request_arrival_timestamp, work_admitted); if (unlikely(sandbox == NULL)) { client_socket_send_oneshot(sandbox->client_socket_descriptor, http_header_build(503), http_header_len(503)); diff --git a/runtime/src/main.c b/runtime/src/main.c index 7819b2c..15c5fdf 100644 --- a/runtime/src/main.c +++ b/runtime/src/main.c @@ -369,7 +369,7 @@ main(int argc, char **argv) #ifdef LOG_MODULE_LOADING debuglog("Parsing modules file [%s]\n", argv[1]); #endif - if (module_new_from_json(argv[1])) panic("failed to initialize module(s) defined in %s\n", argv[1]); + if (module_alloc_from_json(argv[1])) panic("failed to initialize module(s) defined in %s\n", argv[1]); for (int i = 0; i < runtime_worker_threads_count; i++) { diff --git a/runtime/src/module.c b/runtime/src/module.c index 45a96f2..0978075 100644 --- a/runtime/src/module.c +++ b/runtime/src/module.c @@ -126,53 +126,42 @@ module_free(struct module *module) free(module); } - -/** - * Module Contructor - * Creates a new module, invokes initialize_tables to initialize the indirect table, adds it to the module DB, and - *starts listening for HTTP Requests - * - * @param name - * @param path - * @param stack_size - * @param relative_deadline_us - * @param port - * @param request_size - * @returns A new module or NULL in case of failure - */ - -struct module * -module_new(char *name, char *path, uint32_t stack_size, uint32_t relative_deadline_us, int port, int request_size, - int response_size, int admissions_percentile, uint32_t expected_execution_us) +static inline int +module_init(struct module *module, char *name, char *path, uint32_t stack_size, uint32_t relative_deadline_us, int port, + int request_size, int response_size, int admissions_percentile, uint32_t expected_execution_us) { - int rc = 0; + assert(module != NULL); + assert(name != NULL); + assert(strlen(name) > 0); + assert(strlen(name) < MODULE_MAX_NAME_LENGTH); + assert(path != NULL); + assert(strlen(path) > 0); + assert(strlen(path) < MODULE_MAX_PATH_LENGTH); + assert(stack_size > 0); + assert(relative_deadline_us > 0); + assert(relative_deadline_us < RUNTIME_RELATIVE_DEADLINE_US_MAX); + assert(port > 0); + assert(admissions_percentile > 0); + assert(expected_execution_us > 0); - struct module *module = (struct module *)calloc(1, sizeof(struct module)); - if (!module) { - fprintf(stderr, "Failed to allocate module: %s\n", strerror(errno)); - goto err; - }; + int rc = 0; atomic_init(&module->reference_count, 0); rc = awsm_abi_init(&module->abi, path); - if (rc != 0) goto awsm_abi_init_err; + if (rc != 0) goto err; /* Set fields in the module struct */ strncpy(module->name, name, MODULE_MAX_NAME_LENGTH); strncpy(module->path, path, MODULE_MAX_PATH_LENGTH); - module->stack_size = ((uint32_t)(round_up_to_page(stack_size == 0 ? WASM_STACK_SIZE : stack_size))); - debuglog("Stack Size: %u", module->stack_size); + module->stack_size = ((uint32_t)(round_up_to_page(stack_size == 0 ? WASM_STACK_SIZE : stack_size))); module->socket_descriptor = -1; module->port = port; /* Deadlines */ module->relative_deadline_us = relative_deadline_us; - /* This should have been handled when a module was loaded */ - assert(relative_deadline_us < RUNTIME_RELATIVE_DEADLINE_US_MAX); - /* This can overflow a uint32_t, so be sure to cast appropriately */ module->relative_deadline = (uint64_t)relative_deadline_us * runtime_processor_speed_MHz; @@ -181,49 +170,58 @@ module_new(char *name, char *path, uint32_t stack_size, uint32_t relative_deadli admissions_info_initialize(&module->admissions_info, admissions_percentile, expected_execution, module->relative_deadline); - /* WebAssembly Indirect Table */ - /* TODO: Should this be part of the module or per-sandbox? */ - /* TODO: How should this table be sized? */ - module->indirect_table = wasm_table_new(INDIRECT_TABLE_SIZE); - /* Request Response Buffer */ if (request_size == 0) request_size = MODULE_DEFAULT_REQUEST_RESPONSE_SIZE; if (response_size == 0) response_size = MODULE_DEFAULT_REQUEST_RESPONSE_SIZE; module->max_request_size = round_up_to_page(request_size); module->max_response_size = round_up_to_page(response_size); - /* Table initialization calls a function that runs within the sandbox. Rather than setting the current sandbox, - * we partially fake this out by only setting the table and then clearing after table - * initialization is complete. - * - * assumption: This approach depends on module_new only being invoked at program start before preemption is - * enabled. We are check that current_wasm_module_instance.table is NULL to gain confidence that - * we are not invoking this in a way that clobbers a current module. - * - * If we want to be able to do this later, we can possibly defer module_initialize_table until the first - * invocation. Alternatively, we can maintain the table per sandbox and call initialize - * on each sandbox if this "assumption" is too restrictive and we're ready to pay a per-sandbox performance hit. - */ - - assert(current_wasm_module_instance.table == NULL); - current_wasm_module_instance.table = module->indirect_table; - module_initialize_table(module); - current_wasm_module_instance.table = NULL; - - for (int i = 0; i < MAX_WORKER_THREADS; i++) { - wasm_memory_pool_init(&module->pools[i].memory, false); - wasm_stack_pool_init(&module->pools[i].stack, false); - } + module_alloc_table(module); + module_initialize_pools(module); /* Start listening for requests */ rc = module_listen(module); - if (rc < 0) goto err_listen; + if (rc < 0) goto err; + +done: + return rc; +err: + rc = -1; + goto done; +} + +/** + * Module Contructor + * Creates a new module, invokes initialize_tables to initialize the indirect table, adds it to the module DB, and + *starts listening for HTTP Requests + * + * @param name + * @param path + * @param stack_size + * @param relative_deadline_us + * @param port + * @param request_size + * @returns A new module or NULL in case of failure + */ + +struct module * +module_alloc(char *name, char *path, uint32_t stack_size, uint32_t relative_deadline_us, int port, int request_size, + int response_size, int admissions_percentile, uint32_t expected_execution_us) +{ + struct module *module = (struct module *)calloc(1, sizeof(struct module)); + if (!module) { + fprintf(stderr, "Failed to allocate module: %s\n", strerror(errno)); + goto err; + }; + + int rc = module_init(module, name, path, stack_size, relative_deadline_us, port, request_size, response_size, + admissions_percentile, expected_execution_us); + if (rc < 0) goto init_err; done: return module; -err_listen: -awsm_abi_init_err: +init_err: free(module); err: module = NULL; @@ -236,7 +234,7 @@ err: * @return RC 0 on Success. -1 on Error */ int -module_new_from_json(char *file_name) +module_alloc_from_json(char *file_name) { assert(file_name != NULL); int return_code = -1; @@ -424,10 +422,10 @@ module_new_from_json(char *file_name) #endif /* Allocate a module based on the values from the JSON */ - struct module *module = module_new(module_name, module_path, 0, relative_deadline_us, port, - request_size, response_size, admissions_percentile, - expected_execution_us); - if (module == NULL) goto module_new_err; + struct module *module = module_alloc(module_name, module_path, 0, relative_deadline_us, port, + request_size, response_size, admissions_percentile, + expected_execution_us); + if (module == NULL) goto module_alloc_err; assert(module); module_set_http_info(module, response_content_type); @@ -444,7 +442,7 @@ module_new_from_json(char *file_name) done: return return_code; -module_new_err: +module_alloc_err: json_parse_err: fclose_err: /* We will retry fclose when we fall through into stat_buffer_alloc_err */ diff --git a/runtime/src/sandbox.c b/runtime/src/sandbox.c index 1326ec8..bce1052 100644 --- a/runtime/src/sandbox.c +++ b/runtime/src/sandbox.c @@ -82,15 +82,6 @@ sandbox_allocate_http_buffers(struct sandbox *sandbox) return 0; } -static inline struct sandbox * -sandbox_allocate(void) -{ - struct sandbox *sandbox = NULL; - size_t page_aligned_sandbox_size = round_up_to_page(sizeof(struct sandbox)); - sandbox = calloc(1, page_aligned_sandbox_size); - sandbox_set_as_allocated(sandbox); - return sandbox; -} /** * Allocates HTTP buffers and performs our approximation of "WebAssembly instantiation" @@ -180,12 +171,15 @@ sandbox_init(struct sandbox *sandbox, struct module *module, int socket_descript * @return the new sandbox request */ struct sandbox * -sandbox_new(struct module *module, int socket_descriptor, const struct sockaddr *socket_address, - uint64_t request_arrival_timestamp, uint64_t admissions_estimate) +sandbox_alloc(struct module *module, int socket_descriptor, const struct sockaddr *socket_address, + uint64_t request_arrival_timestamp, uint64_t admissions_estimate) { - struct sandbox *sandbox = sandbox_allocate(); - assert(sandbox); + struct sandbox *sandbox = NULL; + size_t page_aligned_sandbox_size = round_up_to_page(sizeof(struct sandbox)); + sandbox = calloc(1, page_aligned_sandbox_size); + if (unlikely(sandbox == NULL)) return NULL; + sandbox_set_as_allocated(sandbox); sandbox_init(sandbox, module, socket_descriptor, socket_address, request_arrival_timestamp, admissions_estimate); @@ -193,39 +187,33 @@ sandbox_new(struct module *module, int socket_descriptor, const struct sockaddr return sandbox; } - -/** - * Free stack and heap resources.. also any I/O handles. - * @param sandbox - */ void -sandbox_free(struct sandbox *sandbox) +sandbox_deinit(struct sandbox *sandbox) { assert(sandbox != NULL); assert(sandbox != current_sandbox_get()); assert(sandbox->state == SANDBOX_ERROR || sandbox->state == SANDBOX_COMPLETE); - int rc; - module_release(sandbox->module); /* Linear Memory and Guard Page should already have been munmaped and set to NULL */ assert(sandbox->memory == NULL); - /* Free Sandbox Struct and HTTP Request and Response Buffers */ - + /* Free Sandbox Struct*/ if (likely(sandbox->stack != NULL)) sandbox_free_stack(sandbox); - free(sandbox); +} - if (rc == -1) { - debuglog("Failed to unmap Sandbox %lu\n", sandbox->id); - goto err_free_sandbox_failed; - }; +/** + * Free stack and heap resources.. also any I/O handles. + * @param sandbox + */ +void +sandbox_free(struct sandbox *sandbox) +{ + assert(sandbox != NULL); + assert(sandbox != current_sandbox_get()); + assert(sandbox->state == SANDBOX_ERROR || sandbox->state == SANDBOX_COMPLETE); -done: - return; -err_free_sandbox_failed: -err_free_stack_failed: - /* Errors freeing memory is a fatal error */ - panic("Failed to free Sandbox %lu\n", sandbox->id); + sandbox_deinit(sandbox); + free(sandbox); } From 639fa953b621d919a29d1cc1249dc9ae39716299 Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Thu, 16 Dec 2021 14:17:24 -0500 Subject: [PATCH 12/15] docs: Better explain bounds check assertions --- runtime/include/module.h | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/runtime/include/module.h b/runtime/include/module.h index 89ae955..31e1dac 100644 --- a/runtime/include/module.h +++ b/runtime/include/module.h @@ -224,15 +224,18 @@ module_allocate_linear_memory(struct module *module) { assert(module != NULL); - size_t initial = (size_t)module->abi.starting_pages * WASM_PAGE_SIZE; - size_t max = (size_t)module->abi.max_pages * WASM_PAGE_SIZE; + uint64_t starting_bytes = (uint64_t)module->abi.starting_pages * WASM_PAGE_SIZE; + uint64_t max_bytes = (uint64_t)module->abi.max_pages * WASM_PAGE_SIZE; - assert(initial <= (size_t)UINT32_MAX + 1); - assert(max <= (size_t)UINT32_MAX + 1); + /* UINT32_MAX is the largest representable integral value that can fit into type uint32_t. Because C counts from + zero, the number of states in the range 0..UINT32_MAX is thus UINT32_MAX + 1. This means that the maximum + possible buffer that can be byte-addressed by a full 32-bit address space is UNIT32_MAX + 1 */ + assert(starting_bytes <= (uint64_t)UINT32_MAX + 1); + assert(max_bytes <= (uint64_t)UINT32_MAX + 1); struct wasm_memory *linear_memory = wasm_memory_pool_remove_nolock(&module->pools[worker_thread_idx].memory); if (linear_memory == NULL) { - linear_memory = wasm_memory_alloc(initial, max); + linear_memory = wasm_memory_alloc(starting_bytes, max_bytes); if (unlikely(linear_memory == NULL)) return NULL; } From d562a9315d823b54d8ce076a7db75d94e2f7a0b7 Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Thu, 16 Dec 2021 14:26:46 -0500 Subject: [PATCH 13/15] refactor: Shift module valildation to JSON parsing --- runtime/src/module.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/runtime/src/module.c b/runtime/src/module.c index 0978075..50f915d 100644 --- a/runtime/src/module.c +++ b/runtime/src/module.c @@ -131,18 +131,6 @@ module_init(struct module *module, char *name, char *path, uint32_t stack_size, int request_size, int response_size, int admissions_percentile, uint32_t expected_execution_us) { assert(module != NULL); - assert(name != NULL); - assert(strlen(name) > 0); - assert(strlen(name) < MODULE_MAX_NAME_LENGTH); - assert(path != NULL); - assert(strlen(path) > 0); - assert(strlen(path) < MODULE_MAX_PATH_LENGTH); - assert(stack_size > 0); - assert(relative_deadline_us > 0); - assert(relative_deadline_us < RUNTIME_RELATIVE_DEADLINE_US_MAX); - assert(port > 0); - assert(admissions_percentile > 0); - assert(expected_execution_us > 0); int rc = 0; From 9eec575e9e31ac83f6fa050a786e9518436d57cf Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Thu, 16 Dec 2021 15:39:48 -0500 Subject: [PATCH 14/15] refactor: remove size_t from wasm_stack --- runtime/include/wasm_stack.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/runtime/include/wasm_stack.h b/runtime/include/wasm_stack.h index 7e79ec5..5dd5f50 100644 --- a/runtime/include/wasm_stack.h +++ b/runtime/include/wasm_stack.h @@ -22,14 +22,14 @@ */ struct wasm_stack { struct ps_list list; /* Linked List Node used for object pool */ - size_t capacity; /* Usable capacity. Excludes size of guard page that we need to free */ + uint64_t capacity; /* Usable capacity. Excludes size of guard page that we need to free */ uint8_t * high; /* The highest address of the stack. Grows down from here */ uint8_t * low; /* The address of the lowest useabe address. Above guard page */ uint8_t * buffer; /* Points base address of backing heap allocation (Guard Page) */ }; -static struct wasm_stack *wasm_stack_alloc(size_t capacity); -static inline int wasm_stack_init(struct wasm_stack *wasm_stack, size_t capacity); +static struct wasm_stack *wasm_stack_alloc(uint64_t capacity); +static inline int wasm_stack_init(struct wasm_stack *wasm_stack, uint64_t capacity); static inline void wasm_stack_reinit(struct wasm_stack *wasm_stack); static inline void wasm_stack_deinit(struct wasm_stack *wasm_stack); static inline void wasm_stack_free(struct wasm_stack *wasm_stack); @@ -42,7 +42,7 @@ static inline void wasm_stack_free(struct wasm_stack *wasm_stack); * @returns 0 on success, -1 on error */ static inline int -wasm_stack_init(struct wasm_stack *wasm_stack, size_t capacity) +wasm_stack_init(struct wasm_stack *wasm_stack, uint64_t capacity) { assert(wasm_stack); @@ -79,7 +79,7 @@ err_stack_allocation_failed: } static struct wasm_stack * -wasm_stack_alloc(size_t capacity) +wasm_stack_alloc(uint64_t capacity) { struct wasm_stack *wasm_stack = calloc(1, sizeof(struct wasm_stack)); int rc = wasm_stack_init(wasm_stack, capacity); From e89696d911732573d17b8abce8c1f9d4ab92e5fa Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Thu, 16 Dec 2021 16:26:01 -0500 Subject: [PATCH 15/15] refactor: DRY up pool macro --- runtime/include/pool.h | 173 +++++++++++++++++++---------------------- 1 file changed, 82 insertions(+), 91 deletions(-) diff --git a/runtime/include/pool.h b/runtime/include/pool.h index 26d7527..fb83743 100644 --- a/runtime/include/pool.h +++ b/runtime/include/pool.h @@ -8,95 +8,86 @@ #include "lock.h" #include "ps_list.h" -#define INIT_POOL(STRUCT_NAME, DTOR_FN) \ - struct STRUCT_NAME##_pool { \ - bool use_lock; \ - lock_t lock; \ - struct ps_list_head list; \ - }; \ - \ - static inline bool STRUCT_NAME##_pool_is_empty(struct STRUCT_NAME##_pool *self) \ - { \ - assert(self != NULL); \ - \ - return ps_list_head_empty(&self->list); \ - } \ - \ - static inline void STRUCT_NAME##_pool_init(struct STRUCT_NAME##_pool *self, bool use_lock) \ - { \ - ps_list_head_init(&self->list); \ - self->use_lock = use_lock; \ - if (use_lock) LOCK_INIT(&self->lock); \ - } \ - \ - static inline void STRUCT_NAME##_pool_deinit(struct STRUCT_NAME##_pool *self) \ - { \ - if (STRUCT_NAME##_pool_is_empty(self)) return; \ - struct STRUCT_NAME *iterator = NULL; \ - struct STRUCT_NAME *buffer = NULL; \ - ps_list_foreach_del_d(&self->list, iterator, buffer) \ - { \ - ps_list_rem_d(iterator); \ - DTOR_FN(iterator); \ - } \ - } \ - \ - static inline struct STRUCT_NAME *STRUCT_NAME##_pool_remove_nolock(struct STRUCT_NAME##_pool *self) \ - { \ - assert(self != NULL); \ - assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); \ - \ - struct STRUCT_NAME *obj = NULL; \ - \ - if (STRUCT_NAME##_pool_is_empty(self)) return obj; \ - \ - obj = ps_list_head_first_d(&self->list, struct STRUCT_NAME); \ - assert(obj); \ - ps_list_rem_d(obj); \ - \ - return obj; \ - } \ - \ - static inline struct STRUCT_NAME *STRUCT_NAME##_pool_remove(struct STRUCT_NAME##_pool *self) \ - { \ - assert(self != NULL); \ - assert(self->use_lock); \ - \ - struct STRUCT_NAME *obj = NULL; \ - \ - if (STRUCT_NAME##_pool_is_empty(self)) return obj; \ - \ - LOCK_LOCK(&self->lock); \ - if (STRUCT_NAME##_pool_is_empty(self)) { \ - LOCK_UNLOCK(&self->lock); \ - return obj; \ - } \ - \ - obj = ps_list_head_first_d(&self->list, struct STRUCT_NAME); \ - assert(obj); \ - ps_list_rem_d(obj); \ - LOCK_UNLOCK(&self->lock); \ - return obj; \ - } \ - \ - static inline int STRUCT_NAME##_pool_add_nolock(struct STRUCT_NAME##_pool *self, struct STRUCT_NAME *obj) \ - { \ - assert(self != NULL); \ - assert(obj != NULL); \ - assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); \ - \ - ps_list_head_add_d(&self->list, obj); \ - return 0; \ - } \ - \ - static inline int STRUCT_NAME##_pool_add(struct STRUCT_NAME##_pool *self, struct STRUCT_NAME *obj) \ - { \ - assert(self != NULL); \ - assert(obj != NULL); \ - assert(self->use_lock); \ - \ - LOCK_LOCK(&self->lock); \ - ps_list_head_add_d(&self->list, obj); \ - LOCK_UNLOCK(&self->lock); \ - return 0; \ +#define INIT_POOL(STRUCT_NAME, DTOR_FN) \ + struct STRUCT_NAME##_pool { \ + bool use_lock; \ + lock_t lock; \ + struct ps_list_head list; \ + }; \ + \ + static inline bool STRUCT_NAME##_pool_is_empty(struct STRUCT_NAME##_pool *self) \ + { \ + assert(self != NULL); \ + \ + return ps_list_head_empty(&self->list); \ + } \ + \ + static inline void STRUCT_NAME##_pool_init(struct STRUCT_NAME##_pool *self, bool use_lock) \ + { \ + ps_list_head_init(&self->list); \ + self->use_lock = use_lock; \ + if (use_lock) LOCK_INIT(&self->lock); \ + } \ + \ + static inline void STRUCT_NAME##_pool_deinit(struct STRUCT_NAME##_pool *self) \ + { \ + if (STRUCT_NAME##_pool_is_empty(self)) return; \ + struct STRUCT_NAME *iterator = NULL; \ + struct STRUCT_NAME *buffer = NULL; \ + ps_list_foreach_del_d(&self->list, iterator, buffer) \ + { \ + ps_list_rem_d(iterator); \ + DTOR_FN(iterator); \ + } \ + } \ + \ + static inline struct STRUCT_NAME *STRUCT_NAME##_pool_remove_nolock(struct STRUCT_NAME##_pool *self) \ + { \ + assert(self != NULL); \ + assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); \ + \ + struct STRUCT_NAME *obj = NULL; \ + \ + if (STRUCT_NAME##_pool_is_empty(self)) return obj; \ + \ + obj = ps_list_head_first_d(&self->list, struct STRUCT_NAME); \ + assert(obj); \ + ps_list_rem_d(obj); \ + \ + return obj; \ + } \ + \ + static inline struct STRUCT_NAME *STRUCT_NAME##_pool_remove(struct STRUCT_NAME##_pool *self) \ + { \ + assert(self != NULL); \ + assert(self->use_lock); \ + \ + struct STRUCT_NAME *obj = NULL; \ + bool is_empty = STRUCT_NAME##_pool_is_empty(self); \ + if (is_empty) return obj; \ + \ + LOCK_LOCK(&self->lock); \ + obj = STRUCT_NAME##_pool_remove_nolock(self); \ + LOCK_UNLOCK(&self->lock); \ + return obj; \ + } \ + \ + static inline void STRUCT_NAME##_pool_add_nolock(struct STRUCT_NAME##_pool *self, struct STRUCT_NAME *obj) \ + { \ + assert(self != NULL); \ + assert(obj != NULL); \ + assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); \ + \ + ps_list_head_add_d(&self->list, obj); \ + } \ + \ + static inline void STRUCT_NAME##_pool_add(struct STRUCT_NAME##_pool *self, struct STRUCT_NAME *obj) \ + { \ + assert(self != NULL); \ + assert(obj != NULL); \ + assert(self->use_lock); \ + \ + LOCK_LOCK(&self->lock); \ + STRUCT_NAME##_pool_add_nolock(self, obj); \ + LOCK_UNLOCK(&self->lock); \ }