Merge pull request #288 from gwsystems/object-pool

Reduce Allocations via Object Pools
master
Sean McBride 3 years ago committed by GitHub
commit 24fc351b02
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -102,6 +102,7 @@
"software_interrupt_counts.h": "c",
"sandbox_set_as_running_sys.h": "c",
"wasm_module_instance.h": "c",
"wasm_stack.h": "c",
"wasm_table.h": "c"
},
"files.exclude": {

@ -10,6 +10,6 @@ struct admissions_info {
uint64_t relative_deadline; /* Relative deadline in cycles. This is duplicated state */
};
void admissions_info_initialize(struct admissions_info *self, int percentile, uint64_t expected_execution,
void admissions_info_initialize(struct admissions_info *admissions_info, int percentile, uint64_t expected_execution,
uint64_t relative_deadline);
void admissions_info_update(struct admissions_info *self, uint64_t execution_duration);
void admissions_info_update(struct admissions_info *admissions_info, uint64_t execution_duration);

@ -27,4 +27,4 @@ struct http_request {
bool message_end; /* boolean flag set when body processing is complete */
};
void http_request_print(struct http_request *self);
void http_request_print(struct http_request *http_request);

@ -8,15 +8,25 @@
#include "admissions_control.h"
#include "admissions_info.h"
#include "awsm_abi.h"
#include "current_wasm_module_instance.h"
#include "http.h"
#include "panic.h"
#include "pool.h"
#include "types.h"
#include "wasm_stack.h"
#include "wasm_memory.h"
#include "wasm_table.h"
#define MODULE_DEFAULT_REQUEST_RESPONSE_SIZE (PAGE_SIZE)
#define MODULE_MAX_NAME_LENGTH 32
#define MODULE_MAX_PATH_LENGTH 256
extern thread_local int worker_thread_idx;
INIT_POOL(wasm_memory, wasm_memory_free)
INIT_POOL(wasm_stack, wasm_stack_free)
/*
* Defines the listen backlog, the queue length for completely established socketeds waiting to be accepted
* If this value is greater than the value in /proc/sys/net/core/somaxconn (typically 128), then it is silently
@ -32,12 +42,19 @@
"MODULE_MAX_PENDING_CLIENT_REQUESTS likely exceeds the value in /proc/sys/net/core/somaxconn and thus may be silently truncated";
#endif
/* TODO: Dynamically size based on number of threads */
#define MAX_WORKER_THREADS 64
struct module_pools {
struct wasm_memory_pool memory;
struct wasm_stack_pool stack;
} __attribute__((aligned(CACHE_PAD)));
struct module {
/* Metadata from JSON Config */
char name[MODULE_MAX_NAME_LENGTH];
char path[MODULE_MAX_PATH_LENGTH];
uint32_t stack_size; /* a specification? */
uint64_t max_memory; /* perhaps a specification of the module. (max 4GB) */
uint32_t relative_deadline_us;
int port;
struct admissions_info admissions_info;
@ -55,6 +72,8 @@ struct module {
_Atomic uint32_t reference_count; /* ref count how many instances exist here. */
struct wasm_table *indirect_table;
struct module_pools pools[MAX_WORKER_THREADS];
};
/*************************
@ -85,13 +104,50 @@ module_initialize_globals(struct module *module)
}
/**
* Invoke a module's initialize_tables
* @brief Invoke a module's initialize_tables
* @param module
*
* Table initialization calls a function that runs within the sandbox. Rather than setting the current sandbox,
* we partially fake this out by only setting the table and then clearing after table
* initialization is complete.
*
* assumption: This approach depends on module_alloc only being invoked at program start before preemption is
* enabled. We are check that current_wasm_module_instance.table is NULL to gain confidence that
* we are not invoking this in a way that clobbers a current module.
*
* If we want to be able to do this later, we can possibly defer module_initialize_table until the first
* invocation. Alternatively, we can maintain the table per sandbox and call initialize
* on each sandbox if this "assumption" is too restrictive and we're ready to pay a per-sandbox performance hit.
*/
static inline void
module_initialize_table(struct module *module)
{
assert(current_wasm_module_instance.table == NULL);
current_wasm_module_instance.table = module->indirect_table;
module->abi.initialize_tables();
current_wasm_module_instance.table = NULL;
}
static inline int
module_alloc_table(struct module *module)
{
/* WebAssembly Indirect Table */
/* TODO: Should this be part of the module or per-sandbox? */
/* TODO: How should this table be sized? */
module->indirect_table = wasm_table_alloc(INDIRECT_TABLE_SIZE);
if (module->indirect_table == NULL) return -1;
module_initialize_table(module);
return 0;
}
static inline void
module_initialize_pools(struct module *module)
{
for (int i = 0; i < MAX_WORKER_THREADS; i++) {
wasm_memory_pool_init(&module->pools[i].memory, false);
wasm_stack_pool_init(&module->pools[i].stack, false);
}
}
/**
@ -141,12 +197,63 @@ module_release(struct module *module)
return;
}
static inline struct wasm_stack *
module_allocate_stack(struct module *module)
{
assert(module != NULL);
struct wasm_stack *stack = wasm_stack_pool_remove_nolock(&module->pools[worker_thread_idx].stack);
if (stack == NULL) {
stack = wasm_stack_alloc(module->stack_size);
if (unlikely(stack == NULL)) return NULL;
}
return stack;
}
static inline void
module_free_stack(struct module *module, struct wasm_stack *stack)
{
wasm_stack_reinit(stack);
wasm_stack_pool_add_nolock(&module->pools[worker_thread_idx].stack, stack);
}
static inline struct wasm_memory *
module_allocate_linear_memory(struct module *module)
{
assert(module != NULL);
uint64_t starting_bytes = (uint64_t)module->abi.starting_pages * WASM_PAGE_SIZE;
uint64_t max_bytes = (uint64_t)module->abi.max_pages * WASM_PAGE_SIZE;
/* UINT32_MAX is the largest representable integral value that can fit into type uint32_t. Because C counts from
zero, the number of states in the range 0..UINT32_MAX is thus UINT32_MAX + 1. This means that the maximum
possible buffer that can be byte-addressed by a full 32-bit address space is UNIT32_MAX + 1 */
assert(starting_bytes <= (uint64_t)UINT32_MAX + 1);
assert(max_bytes <= (uint64_t)UINT32_MAX + 1);
struct wasm_memory *linear_memory = wasm_memory_pool_remove_nolock(&module->pools[worker_thread_idx].memory);
if (linear_memory == NULL) {
linear_memory = wasm_memory_alloc(starting_bytes, max_bytes);
if (unlikely(linear_memory == NULL)) return NULL;
}
return linear_memory;
}
static inline void
module_free_linear_memory(struct module *module, struct wasm_memory *memory)
{
wasm_memory_reinit(memory, module->abi.starting_pages * WASM_PAGE_SIZE);
wasm_memory_pool_add_nolock(&module->pools[worker_thread_idx].memory, memory);
}
/********************************
* Public Methods from module.c *
*******************************/
void module_free(struct module *module);
struct module *
module_new(char *mod_name, char *mod_path, uint32_t stack_sz, uint32_t max_heap, uint32_t relative_deadline_us,
int port, int req_sz, int resp_sz, int admissions_percentile, uint32_t expected_execution_us);
int module_new_from_json(char *filename);
void module_free(struct module *module);
struct module *module_alloc(char *mod_name, char *mod_path, uint32_t stack_sz, uint32_t relative_deadline_us, int port,
int req_sz, int resp_sz, int admissions_percentile, uint32_t expected_execution_us);
int module_alloc_from_json(char *filename);

@ -12,122 +12,126 @@
/**
* Initializes perf window
* @param self
* @param perf_window
*/
static inline void
perf_window_initialize(struct perf_window *self)
perf_window_initialize(struct perf_window *perf_window)
{
assert(self != NULL);
assert(perf_window != NULL);
LOCK_INIT(&self->lock);
self->count = 0;
memset(&self->by_duration, 0, sizeof(struct execution_node) * PERF_WINDOW_BUFFER_SIZE);
memset(&self->by_termination, 0, sizeof(uint16_t) * PERF_WINDOW_BUFFER_SIZE);
LOCK_INIT(&perf_window->lock);
perf_window->count = 0;
memset(&perf_window->by_duration, 0, sizeof(struct execution_node) * PERF_WINDOW_BUFFER_SIZE);
memset(&perf_window->by_termination, 0, sizeof(uint16_t) * PERF_WINDOW_BUFFER_SIZE);
}
/**
* Swaps two execution nodes in the by_duration array, including updating the indices in the by_termination circular
* buffer
* @param self
* @param perf_window
* @param first_by_duration_idx
* @param second_by_duration_idx
*/
static inline void
perf_window_swap(struct perf_window *self, uint16_t first_by_duration_idx, uint16_t second_by_duration_idx)
perf_window_swap(struct perf_window *perf_window, uint16_t first_by_duration_idx, uint16_t second_by_duration_idx)
{
assert(LOCK_IS_LOCKED(&self->lock));
assert(self != NULL);
assert(LOCK_IS_LOCKED(&perf_window->lock));
assert(perf_window != NULL);
assert(first_by_duration_idx >= 0 && first_by_duration_idx < PERF_WINDOW_BUFFER_SIZE);
assert(second_by_duration_idx >= 0 && second_by_duration_idx < PERF_WINDOW_BUFFER_SIZE);
uint16_t first_by_termination_idx = self->by_duration[first_by_duration_idx].by_termination_idx;
uint16_t second_by_termination_idx = self->by_duration[second_by_duration_idx].by_termination_idx;
uint16_t first_by_termination_idx = perf_window->by_duration[first_by_duration_idx].by_termination_idx;
uint16_t second_by_termination_idx = perf_window->by_duration[second_by_duration_idx].by_termination_idx;
/* The execution node's by_termination_idx points to a by_termination cell equal to its own by_duration index */
assert(self->by_termination[first_by_termination_idx] == first_by_duration_idx);
assert(self->by_termination[second_by_termination_idx] == second_by_duration_idx);
assert(perf_window->by_termination[first_by_termination_idx] == first_by_duration_idx);
assert(perf_window->by_termination[second_by_termination_idx] == second_by_duration_idx);
uint64_t first_execution_time = self->by_duration[first_by_duration_idx].execution_time;
uint64_t second_execution_time = self->by_duration[second_by_duration_idx].execution_time;
uint64_t first_execution_time = perf_window->by_duration[first_by_duration_idx].execution_time;
uint64_t second_execution_time = perf_window->by_duration[second_by_duration_idx].execution_time;
/* Swap Indices in Buffer*/
self->by_termination[first_by_termination_idx] = second_by_duration_idx;
self->by_termination[second_by_termination_idx] = first_by_duration_idx;
perf_window->by_termination[first_by_termination_idx] = second_by_duration_idx;
perf_window->by_termination[second_by_termination_idx] = first_by_duration_idx;
/* Swap by_termination_idx */
struct execution_node tmp_node = self->by_duration[first_by_duration_idx];
self->by_duration[first_by_duration_idx] = self->by_duration[second_by_duration_idx];
self->by_duration[second_by_duration_idx] = tmp_node;
struct execution_node tmp_node = perf_window->by_duration[first_by_duration_idx];
perf_window->by_duration[first_by_duration_idx] = perf_window->by_duration[second_by_duration_idx];
perf_window->by_duration[second_by_duration_idx] = tmp_node;
/* The circular by_termination indices should always point to the same execution times across all swaps */
assert(self->by_duration[self->by_termination[first_by_termination_idx]].execution_time
assert(perf_window->by_duration[perf_window->by_termination[first_by_termination_idx]].execution_time
== first_execution_time);
assert(self->by_duration[self->by_termination[second_by_termination_idx]].execution_time
assert(perf_window->by_duration[perf_window->by_termination[second_by_termination_idx]].execution_time
== second_execution_time);
}
/**
* Adds a new value to the perf window
* Not intended to be called directly!
* @param self
* @param perf_window
* @param value
*/
static inline void
perf_window_add(struct perf_window *self, uint64_t value)
perf_window_add(struct perf_window *perf_window, uint64_t value)
{
assert(self != NULL);
assert(perf_window != NULL);
uint16_t idx_of_oldest;
bool check_up;
if (unlikely(!LOCK_IS_LOCKED(&self->lock))) panic("lock not held when calling perf_window_add\n");
if (unlikely(!LOCK_IS_LOCKED(&perf_window->lock))) panic("lock not held when calling perf_window_add\n");
/* A successful invocation should run for a non-zero amount of time */
assert(value > 0);
/* If count is 0, then fill entire array with initial execution times */
if (self->count == 0) {
if (perf_window->count == 0) {
for (int i = 0; i < PERF_WINDOW_BUFFER_SIZE; i++) {
self->by_termination[i] = i;
self->by_duration[i] = (struct execution_node){ .execution_time = value,
.by_termination_idx = i };
perf_window->by_termination[i] = i;
perf_window->by_duration[i] = (struct execution_node){ .execution_time = value,
.by_termination_idx = i };
}
self->count = PERF_WINDOW_BUFFER_SIZE;
perf_window->count = PERF_WINDOW_BUFFER_SIZE;
goto done;
}
/* Otherwise, replace the oldest value, and then sort */
idx_of_oldest = self->by_termination[self->count % PERF_WINDOW_BUFFER_SIZE];
check_up = value > self->by_duration[idx_of_oldest].execution_time;
idx_of_oldest = perf_window->by_termination[perf_window->count % PERF_WINDOW_BUFFER_SIZE];
check_up = value > perf_window->by_duration[idx_of_oldest].execution_time;
self->by_duration[idx_of_oldest].execution_time = value;
perf_window->by_duration[idx_of_oldest].execution_time = value;
if (check_up) {
for (uint16_t i = idx_of_oldest;
i + 1 < PERF_WINDOW_BUFFER_SIZE
&& self->by_duration[i + 1].execution_time < self->by_duration[i].execution_time;
&& perf_window->by_duration[i + 1].execution_time < perf_window->by_duration[i].execution_time;
i++) {
perf_window_swap(self, i, i + 1);
perf_window_swap(perf_window, i, i + 1);
}
} else {
for (int i = idx_of_oldest;
i - 1 >= 0 && self->by_duration[i - 1].execution_time > self->by_duration[i].execution_time; i--) {
perf_window_swap(self, i, i - 1);
i - 1 >= 0
&& perf_window->by_duration[i - 1].execution_time > perf_window->by_duration[i].execution_time;
i--) {
perf_window_swap(perf_window, i, i - 1);
}
}
/* The idx that we replaces should still point to the same value */
assert(self->by_duration[self->by_termination[self->count % PERF_WINDOW_BUFFER_SIZE]].execution_time == value);
assert(perf_window->by_duration[perf_window->by_termination[perf_window->count % PERF_WINDOW_BUFFER_SIZE]]
.execution_time
== value);
/* The by_duration array should be ordered by execution time */
#ifndef NDEBUG
for (int i = 1; i < PERF_WINDOW_BUFFER_SIZE; i++) {
assert(self->by_duration[i - 1].execution_time <= self->by_duration[i].execution_time);
assert(perf_window->by_duration[i - 1].execution_time <= perf_window->by_duration[i].execution_time);
}
#endif
self->count++;
perf_window->count++;
done:
return;
@ -135,22 +139,22 @@ done:
/**
* Returns pXX execution time
* @param self
* @param perf_window
* @param percentile represented by int between 50 and 99
* @param precomputed_index memoized index for quick lookup when by_duration is full
* @returns execution time
*/
static inline uint64_t
perf_window_get_percentile(struct perf_window *self, int percentile, int precomputed_index)
perf_window_get_percentile(struct perf_window *perf_window, int percentile, int precomputed_index)
{
assert(self != NULL);
assert(perf_window != NULL);
assert(percentile >= 50 && percentile <= 99);
int size = self->count;
int size = perf_window->count;
assert(size > 0);
if (likely(size >= PERF_WINDOW_BUFFER_SIZE)) return self->by_duration[precomputed_index].execution_time;
if (likely(size >= PERF_WINDOW_BUFFER_SIZE)) return perf_window->by_duration[precomputed_index].execution_time;
return self->by_duration[size * percentile / 100].execution_time;
return perf_window->by_duration[size * percentile / 100].execution_time;
}
/**
@ -158,9 +162,9 @@ perf_window_get_percentile(struct perf_window *self, int percentile, int precomp
* @returns total count
*/
static inline uint64_t
perf_window_get_count(struct perf_window *self)
perf_window_get_count(struct perf_window *perf_window)
{
assert(self != NULL);
assert(perf_window != NULL);
return self->count;
return perf_window->count;
}

@ -0,0 +1,93 @@
#pragma once
#include <assert.h>
#include <stdbool.h>
#include <stdlib.h>
#include "generic_thread.h"
#include "lock.h"
#include "ps_list.h"
#define INIT_POOL(STRUCT_NAME, DTOR_FN) \
struct STRUCT_NAME##_pool { \
bool use_lock; \
lock_t lock; \
struct ps_list_head list; \
}; \
\
static inline bool STRUCT_NAME##_pool_is_empty(struct STRUCT_NAME##_pool *self) \
{ \
assert(self != NULL); \
\
return ps_list_head_empty(&self->list); \
} \
\
static inline void STRUCT_NAME##_pool_init(struct STRUCT_NAME##_pool *self, bool use_lock) \
{ \
ps_list_head_init(&self->list); \
self->use_lock = use_lock; \
if (use_lock) LOCK_INIT(&self->lock); \
} \
\
static inline void STRUCT_NAME##_pool_deinit(struct STRUCT_NAME##_pool *self) \
{ \
if (STRUCT_NAME##_pool_is_empty(self)) return; \
struct STRUCT_NAME *iterator = NULL; \
struct STRUCT_NAME *buffer = NULL; \
ps_list_foreach_del_d(&self->list, iterator, buffer) \
{ \
ps_list_rem_d(iterator); \
DTOR_FN(iterator); \
} \
} \
\
static inline struct STRUCT_NAME *STRUCT_NAME##_pool_remove_nolock(struct STRUCT_NAME##_pool *self) \
{ \
assert(self != NULL); \
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); \
\
struct STRUCT_NAME *obj = NULL; \
\
if (STRUCT_NAME##_pool_is_empty(self)) return obj; \
\
obj = ps_list_head_first_d(&self->list, struct STRUCT_NAME); \
assert(obj); \
ps_list_rem_d(obj); \
\
return obj; \
} \
\
static inline struct STRUCT_NAME *STRUCT_NAME##_pool_remove(struct STRUCT_NAME##_pool *self) \
{ \
assert(self != NULL); \
assert(self->use_lock); \
\
struct STRUCT_NAME *obj = NULL; \
bool is_empty = STRUCT_NAME##_pool_is_empty(self); \
if (is_empty) return obj; \
\
LOCK_LOCK(&self->lock); \
obj = STRUCT_NAME##_pool_remove_nolock(self); \
LOCK_UNLOCK(&self->lock); \
return obj; \
} \
\
static inline void STRUCT_NAME##_pool_add_nolock(struct STRUCT_NAME##_pool *self, struct STRUCT_NAME *obj) \
{ \
assert(self != NULL); \
assert(obj != NULL); \
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); \
\
ps_list_head_add_d(&self->list, obj); \
} \
\
static inline void STRUCT_NAME##_pool_add(struct STRUCT_NAME##_pool *self, struct STRUCT_NAME *obj) \
{ \
assert(self != NULL); \
assert(obj != NULL); \
assert(self->use_lock); \
\
LOCK_LOCK(&self->lock); \
STRUCT_NAME##_pool_add_nolock(self, obj); \
LOCK_UNLOCK(&self->lock); \
}

@ -37,36 +37,36 @@ struct priority_queue {
* @returns value of highest priority value in queue or ULONG_MAX if empty
*/
static inline uint64_t
priority_queue_peek(struct priority_queue *self)
priority_queue_peek(struct priority_queue *priority_queue)
{
return self->highest_priority;
return priority_queue->highest_priority;
}
static inline void
priority_queue_update_highest_priority(struct priority_queue *self, const uint64_t priority)
priority_queue_update_highest_priority(struct priority_queue *priority_queue, const uint64_t priority)
{
self->highest_priority = priority;
priority_queue->highest_priority = priority;
}
/**
* Adds a value to the end of the binary heap
* @param self the priority queue
* @param priority_queue the priority queue
* @param new_item the value we are adding
* @return 0 on success. -ENOSPC when priority queue is full
*/
static inline int
priority_queue_append(struct priority_queue *self, void *new_item)
priority_queue_append(struct priority_queue *priority_queue, void *new_item)
{
assert(self != NULL);
assert(priority_queue != NULL);
assert(new_item != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
int rc;
if (unlikely(self->size + 1 > self->capacity)) panic("PQ overflow");
if (unlikely(self->size + 1 == self->capacity)) goto err_enospc;
self->items[++self->size] = new_item;
if (unlikely(priority_queue->size + 1 > priority_queue->capacity)) panic("PQ overflow");
if (unlikely(priority_queue->size + 1 == priority_queue->capacity)) goto err_enospc;
priority_queue->items[++priority_queue->size] = new_item;
rc = 0;
done:
@ -78,71 +78,76 @@ err_enospc:
/**
* Checks if a priority queue is empty
* @param self the priority queue to check
* @param priority_queue the priority queue to check
* @returns true if empty, else otherwise
*/
static inline bool
priority_queue_is_empty(struct priority_queue *self)
priority_queue_is_empty(struct priority_queue *priority_queue)
{
assert(self != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(priority_queue != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
return self->size == 0;
return priority_queue->size == 0;
}
/**
* Shifts an appended value upwards to restore heap structure property
* @param self the priority queue
* @param priority_queue the priority queue
*/
static inline void
priority_queue_percolate_up(struct priority_queue *self)
priority_queue_percolate_up(struct priority_queue *priority_queue)
{
assert(self != NULL);
assert(self->get_priority_fn != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(priority_queue != NULL);
assert(priority_queue->get_priority_fn != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
/* If there's only one element, set memoized lookup and early out */
if (self->size == 1) {
priority_queue_update_highest_priority(self, self->get_priority_fn(self->items[1]));
if (priority_queue->size == 1) {
priority_queue_update_highest_priority(priority_queue,
priority_queue->get_priority_fn(priority_queue->items[1]));
return;
}
for (int i = self->size;
i / 2 != 0 && self->get_priority_fn(self->items[i]) < self->get_priority_fn(self->items[i / 2]); i /= 2) {
assert(self->get_priority_fn(self->items[i]) != ULONG_MAX);
void *temp = self->items[i / 2];
self->items[i / 2] = self->items[i];
self->items[i] = temp;
for (int i = priority_queue->size; i / 2 != 0
&& priority_queue->get_priority_fn(priority_queue->items[i])
< priority_queue->get_priority_fn(priority_queue->items[i / 2]);
i /= 2) {
assert(priority_queue->get_priority_fn(priority_queue->items[i]) != ULONG_MAX);
void *temp = priority_queue->items[i / 2];
priority_queue->items[i / 2] = priority_queue->items[i];
priority_queue->items[i] = temp;
/* If percolated to highest priority, update highest priority */
if (i / 2 == 1) priority_queue_update_highest_priority(self, self->get_priority_fn(self->items[1]));
if (i / 2 == 1)
priority_queue_update_highest_priority(priority_queue, priority_queue->get_priority_fn(
priority_queue->items[1]));
}
}
/**
* Returns the index of a node's smallest child
* @param self the priority queue
* @param priority_queue the priority queue
* @param parent_index
* @returns the index of the smallest child
*/
static inline int
priority_queue_find_smallest_child(struct priority_queue *self, const int parent_index)
priority_queue_find_smallest_child(struct priority_queue *priority_queue, const int parent_index)
{
assert(self != NULL);
assert(parent_index >= 1 && parent_index <= self->size);
assert(self->get_priority_fn != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(priority_queue != NULL);
assert(parent_index >= 1 && parent_index <= priority_queue->size);
assert(priority_queue->get_priority_fn != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
int left_child_index = 2 * parent_index;
int right_child_index = 2 * parent_index + 1;
assert(self->items[left_child_index] != NULL);
assert(priority_queue->items[left_child_index] != NULL);
int smallest_child_idx;
/* If we don't have a right child or the left child is smaller, return it */
if (right_child_index > self->size) {
if (right_child_index > priority_queue->size) {
smallest_child_idx = left_child_index;
} else if (self->get_priority_fn(self->items[left_child_index])
< self->get_priority_fn(self->items[right_child_index])) {
} else if (priority_queue->get_priority_fn(priority_queue->items[left_child_index])
< priority_queue->get_priority_fn(priority_queue->items[right_child_index])) {
smallest_child_idx = left_child_index;
} else {
/* Otherwise, return the right child */
@ -155,29 +160,29 @@ priority_queue_find_smallest_child(struct priority_queue *self, const int parent
/**
* Shifts the top of the heap downwards. Used after placing the last value at
* the top
* @param self the priority queue
* @param priority_queue the priority queue
*/
static inline void
priority_queue_percolate_down(struct priority_queue *self, int parent_index)
priority_queue_percolate_down(struct priority_queue *priority_queue, int parent_index)
{
assert(self != NULL);
assert(self->get_priority_fn != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(priority_queue != NULL);
assert(priority_queue->get_priority_fn != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
assert(!listener_thread_is_running());
bool update_highest_value = parent_index == 1;
int left_child_index = 2 * parent_index;
while (left_child_index >= 2 && left_child_index <= self->size) {
int smallest_child_index = priority_queue_find_smallest_child(self, parent_index);
while (left_child_index >= 2 && left_child_index <= priority_queue->size) {
int smallest_child_index = priority_queue_find_smallest_child(priority_queue, parent_index);
/* Once the parent is equal to or less than its smallest child, break; */
if (self->get_priority_fn(self->items[parent_index])
<= self->get_priority_fn(self->items[smallest_child_index]))
if (priority_queue->get_priority_fn(priority_queue->items[parent_index])
<= priority_queue->get_priority_fn(priority_queue->items[smallest_child_index]))
break;
/* Otherwise, swap and continue down the tree */
void *temp = self->items[smallest_child_index];
self->items[smallest_child_index] = self->items[parent_index];
self->items[parent_index] = temp;
void *temp = priority_queue->items[smallest_child_index];
priority_queue->items[smallest_child_index] = priority_queue->items[parent_index];
priority_queue->items[parent_index] = temp;
parent_index = smallest_child_index;
left_child_index = 2 * parent_index;
@ -185,10 +190,11 @@ priority_queue_percolate_down(struct priority_queue *self, int parent_index)
/* Update memoized value if we touched the head */
if (update_highest_value) {
if (!priority_queue_is_empty(self)) {
priority_queue_update_highest_priority(self, self->get_priority_fn(self->items[1]));
if (!priority_queue_is_empty(priority_queue)) {
priority_queue_update_highest_priority(priority_queue, priority_queue->get_priority_fn(
priority_queue->items[1]));
} else {
priority_queue_update_highest_priority(self, ULONG_MAX);
priority_queue_update_highest_priority(priority_queue, ULONG_MAX);
}
}
}
@ -198,30 +204,32 @@ priority_queue_percolate_down(struct priority_queue *self, int parent_index)
********************/
/**
* @param self - the priority queue we want to add to
* @param priority_queue - the priority queue we want to add to
* @param dequeued_element a pointer to set to the dequeued element
* @param target_deadline the deadline that the request must be earlier than in order to dequeue
* @returns RC 0 if successfully set dequeued_element, -ENOENT if empty or if none meet target_deadline
*/
static inline int
priority_queue_dequeue_if_earlier_nolock(struct priority_queue *self, void **dequeued_element, uint64_t target_deadline)
priority_queue_dequeue_if_earlier_nolock(struct priority_queue *priority_queue, void **dequeued_element,
uint64_t target_deadline)
{
assert(self != NULL);
assert(priority_queue != NULL);
assert(dequeued_element != NULL);
assert(self->get_priority_fn != NULL);
assert(priority_queue->get_priority_fn != NULL);
assert(!listener_thread_is_running());
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
int return_code;
/* If the dequeue is not higher priority (earlier timestamp) than targed_deadline, return immediately */
if (priority_queue_is_empty(self) || self->highest_priority >= target_deadline) goto err_enoent;
if (priority_queue_is_empty(priority_queue) || priority_queue->highest_priority >= target_deadline)
goto err_enoent;
*dequeued_element = self->items[1];
self->items[1] = self->items[self->size];
self->items[self->size--] = NULL;
*dequeued_element = priority_queue->items[1];
priority_queue->items[1] = priority_queue->items[priority_queue->size];
priority_queue->items[priority_queue->size--] = NULL;
priority_queue_percolate_down(self, 1);
priority_queue_percolate_down(priority_queue, 1);
return_code = 0;
done:
@ -232,19 +240,20 @@ err_enoent:
}
/**
* @param self - the priority queue we want to add to
* @param priority_queue - the priority queue we want to add to
* @param dequeued_element a pointer to set to the dequeued element
* @param target_deadline the deadline that the request must be earlier than in order to dequeue
* @returns RC 0 if successfully set dequeued_element, -ENOENT if empty or if none meet target_deadline
*/
static inline int
priority_queue_dequeue_if_earlier(struct priority_queue *self, void **dequeued_element, uint64_t target_deadline)
priority_queue_dequeue_if_earlier(struct priority_queue *priority_queue, void **dequeued_element,
uint64_t target_deadline)
{
int return_code;
LOCK_LOCK(&self->lock);
return_code = priority_queue_dequeue_if_earlier_nolock(self, dequeued_element, target_deadline);
LOCK_UNLOCK(&self->lock);
LOCK_LOCK(&priority_queue->lock);
return_code = priority_queue_dequeue_if_earlier_nolock(priority_queue, dequeued_element, target_deadline);
LOCK_UNLOCK(&priority_queue->lock);
return return_code;
}
@ -264,79 +273,79 @@ priority_queue_initialize(size_t capacity, bool use_lock, priority_queue_get_pri
/* Add one to capacity because this data structure ignores the element at 0 */
size_t one_based_capacity = capacity + 1;
struct priority_queue *self = (struct priority_queue *)calloc(sizeof(struct priority_queue)
+ sizeof(void *) * one_based_capacity,
1);
struct priority_queue *priority_queue = (struct priority_queue *)calloc(sizeof(struct priority_queue)
+ sizeof(void *) * one_based_capacity,
1);
/* We're assuming a min-heap implementation, so set to larget possible value */
priority_queue_update_highest_priority(self, ULONG_MAX);
self->size = 0;
self->capacity = one_based_capacity; // Add one because we skip element 0
self->get_priority_fn = get_priority_fn;
self->use_lock = use_lock;
priority_queue_update_highest_priority(priority_queue, ULONG_MAX);
priority_queue->size = 0;
priority_queue->capacity = one_based_capacity; // Add one because we skip element 0
priority_queue->get_priority_fn = get_priority_fn;
priority_queue->use_lock = use_lock;
if (use_lock) LOCK_INIT(&self->lock);
if (use_lock) LOCK_INIT(&priority_queue->lock);
return self;
return priority_queue;
}
/**
* Free the Priority Queue Data structure
* @param self the priority_queue to initialize
* @param priority_queue the priority_queue to initialize
*/
static inline void
priority_queue_free(struct priority_queue *self)
priority_queue_free(struct priority_queue *priority_queue)
{
assert(self != NULL);
assert(priority_queue != NULL);
free(self);
free(priority_queue);
}
/**
* @param self the priority_queue
* @param priority_queue the priority_queue
* @returns the number of elements in the priority queue
*/
static inline int
priority_queue_length_nolock(struct priority_queue *self)
priority_queue_length_nolock(struct priority_queue *priority_queue)
{
assert(self != NULL);
assert(priority_queue != NULL);
assert(!listener_thread_is_running());
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
return self->size;
return priority_queue->size;
}
/**
* @param self the priority_queue
* @param priority_queue the priority_queue
* @returns the number of elements in the priority queue
*/
static inline int
priority_queue_length(struct priority_queue *self)
priority_queue_length(struct priority_queue *priority_queue)
{
LOCK_LOCK(&self->lock);
int size = priority_queue_length_nolock(self);
LOCK_UNLOCK(&self->lock);
LOCK_LOCK(&priority_queue->lock);
int size = priority_queue_length_nolock(priority_queue);
LOCK_UNLOCK(&priority_queue->lock);
return size;
}
/**
* @param self - the priority queue we want to add to
* @param priority_queue - the priority queue we want to add to
* @param value - the value we want to add
* @returns 0 on success. -ENOSPC on full.
*/
static inline int
priority_queue_enqueue_nolock(struct priority_queue *self, void *value)
priority_queue_enqueue_nolock(struct priority_queue *priority_queue, void *value)
{
assert(self != NULL);
assert(priority_queue != NULL);
assert(value != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
int rc;
if (unlikely(priority_queue_append(self, value) == -ENOSPC)) goto err_enospc;
if (unlikely(priority_queue_append(priority_queue, value) == -ENOSPC)) goto err_enospc;
priority_queue_percolate_up(self);
priority_queue_percolate_up(priority_queue);
rc = 0;
done:
@ -347,40 +356,40 @@ err_enospc:
}
/**
* @param self - the priority queue we want to add to
* @param priority_queue - the priority queue we want to add to
* @param value - the value we want to add
* @returns 0 on success. -ENOSPC on full.
*/
static inline int
priority_queue_enqueue(struct priority_queue *self, void *value)
priority_queue_enqueue(struct priority_queue *priority_queue, void *value)
{
int rc;
LOCK_LOCK(&self->lock);
rc = priority_queue_enqueue_nolock(self, value);
LOCK_UNLOCK(&self->lock);
LOCK_LOCK(&priority_queue->lock);
rc = priority_queue_enqueue_nolock(priority_queue, value);
LOCK_UNLOCK(&priority_queue->lock);
return rc;
}
/**
* @param self - the priority queue we want to delete from
* @param priority_queue - the priority queue we want to delete from
* @param value - the value we want to delete
* @returns 0 on success. -1 on not found
*/
static inline int
priority_queue_delete_nolock(struct priority_queue *self, void *value)
priority_queue_delete_nolock(struct priority_queue *priority_queue, void *value)
{
assert(self != NULL);
assert(priority_queue != NULL);
assert(value != NULL);
assert(!listener_thread_is_running());
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
for (int i = 1; i <= self->size; i++) {
if (self->items[i] == value) {
self->items[i] = self->items[self->size];
self->items[self->size--] = NULL;
priority_queue_percolate_down(self, i);
for (int i = 1; i <= priority_queue->size; i++) {
if (priority_queue->items[i] == value) {
priority_queue->items[i] = priority_queue->items[priority_queue->size];
priority_queue->items[priority_queue->size--] = NULL;
priority_queue_percolate_down(priority_queue, i);
return 0;
}
}
@ -389,64 +398,64 @@ priority_queue_delete_nolock(struct priority_queue *self, void *value)
}
/**
* @param self - the priority queue we want to delete from
* @param priority_queue - the priority queue we want to delete from
* @param value - the value we want to delete
* @returns 0 on success. -1 on not found
*/
static inline int
priority_queue_delete(struct priority_queue *self, void *value)
priority_queue_delete(struct priority_queue *priority_queue, void *value)
{
int rc;
LOCK_LOCK(&self->lock);
rc = priority_queue_delete_nolock(self, value);
LOCK_UNLOCK(&self->lock);
LOCK_LOCK(&priority_queue->lock);
rc = priority_queue_delete_nolock(priority_queue, value);
LOCK_UNLOCK(&priority_queue->lock);
return rc;
}
/**
* @param self - the priority queue we want to add to
* @param priority_queue - the priority queue we want to add to
* @param dequeued_element a pointer to set to the dequeued element
* @returns RC 0 if successfully set dequeued_element, -ENOENT if empty
*/
static inline int
priority_queue_dequeue(struct priority_queue *self, void **dequeued_element)
priority_queue_dequeue(struct priority_queue *priority_queue, void **dequeued_element)
{
return priority_queue_dequeue_if_earlier(self, dequeued_element, UINT64_MAX);
return priority_queue_dequeue_if_earlier(priority_queue, dequeued_element, UINT64_MAX);
}
/**
* @param self - the priority queue we want to add to
* @param priority_queue - the priority queue we want to add to
* @param dequeued_element a pointer to set to the dequeued element
* @returns RC 0 if successfully set dequeued_element, -ENOENT if empty
*/
static inline int
priority_queue_dequeue_nolock(struct priority_queue *self, void **dequeued_element)
priority_queue_dequeue_nolock(struct priority_queue *priority_queue, void **dequeued_element)
{
return priority_queue_dequeue_if_earlier_nolock(self, dequeued_element, UINT64_MAX);
return priority_queue_dequeue_if_earlier_nolock(priority_queue, dequeued_element, UINT64_MAX);
}
/**
* Returns the top of the priority queue without removing it
* @param self - the priority queue we want to add to
* @param priority_queue - the priority queue we want to add to
* @param dequeued_element a pointer to set to the top element
* @returns RC 0 if successfully set dequeued_element, -ENOENT if empty
*/
static inline int
priority_queue_top_nolock(struct priority_queue *self, void **dequeued_element)
priority_queue_top_nolock(struct priority_queue *priority_queue, void **dequeued_element)
{
assert(self != NULL);
assert(priority_queue != NULL);
assert(dequeued_element != NULL);
assert(self->get_priority_fn != NULL);
assert(priority_queue->get_priority_fn != NULL);
assert(!listener_thread_is_running());
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
int return_code;
if (priority_queue_is_empty(self)) goto err_enoent;
if (priority_queue_is_empty(priority_queue)) goto err_enoent;
*dequeued_element = self->items[1];
*dequeued_element = priority_queue->items[1];
return_code = 0;
done:
@ -458,18 +467,18 @@ err_enoent:
/**
* Returns the top of the priority queue without removing it
* @param self - the priority queue we want to add to
* @param priority_queue - the priority queue we want to add to
* @param dequeued_element a pointer to set to the top element
* @returns RC 0 if successfully set dequeued_element, -ENOENT if empty
*/
static inline int
priority_queue_top(struct priority_queue *self, void **dequeued_element)
priority_queue_top(struct priority_queue *priority_queue, void **dequeued_element)
{
int return_code;
LOCK_LOCK(&self->lock);
return_code = priority_queue_top_nolock(self, dequeued_element);
LOCK_UNLOCK(&self->lock);
LOCK_LOCK(&priority_queue->lock);
return_code = priority_queue_top_nolock(priority_queue, dequeued_element);
LOCK_UNLOCK(&priority_queue->lock);
return return_code;
}

@ -12,8 +12,8 @@
* Public API *
**************************/
struct sandbox *sandbox_new(struct module *module, int socket_descriptor, const struct sockaddr *socket_address,
uint64_t request_arrival_timestamp, uint64_t admissions_estimate);
struct sandbox *sandbox_alloc(struct module *module, int socket_descriptor, const struct sockaddr *socket_address,
uint64_t request_arrival_timestamp, uint64_t admissions_estimate);
int sandbox_prepare_execution_environment(struct sandbox *sandbox);
void sandbox_free(struct sandbox *sandbox);
void sandbox_main(struct sandbox *sandbox);
@ -36,16 +36,18 @@ sandbox_close_http(struct sandbox *sandbox)
static inline void
sandbox_free_linear_memory(struct sandbox *sandbox)
{
wasm_memory_delete(sandbox->memory);
assert(sandbox != NULL);
assert(sandbox->memory != NULL);
module_free_linear_memory(sandbox->module, sandbox->memory);
sandbox->memory = NULL;
}
/**
* Free Linear Memory, leaving stack in place
* Deinitialize Linear Memory, cleaning up the backing buffer
* @param sandbox
*/
static inline void
sandbox_free_http_buffers(struct sandbox *sandbox)
sandbox_deinit_http_buffers(struct sandbox *sandbox)
{
assert(sandbox);
vec_u8_deinit(&sandbox->request);

@ -38,7 +38,7 @@ sandbox_set_as_error(struct sandbox *sandbox, sandbox_state_t last_state)
case SANDBOX_RUNNING_SYS: {
local_runqueue_delete(sandbox);
sandbox_free_linear_memory(sandbox);
sandbox_free_http_buffers(sandbox);
sandbox_deinit_http_buffers(sandbox);
break;
}
default: {

@ -33,7 +33,7 @@ sandbox_set_as_returned(struct sandbox *sandbox, sandbox_state_t last_state)
sandbox->total_time = now - sandbox->timestamp_of.request_arrival;
local_runqueue_delete(sandbox);
sandbox_free_linear_memory(sandbox);
sandbox_free_http_buffers(sandbox);
sandbox_deinit_http_buffers(sandbox);
break;
}
default: {

@ -15,18 +15,20 @@ struct sandbox_state_history {
};
static inline void
sandbox_state_history_init(struct sandbox_state_history *self)
sandbox_state_history_init(struct sandbox_state_history *sandbox_state_history)
{
#ifdef LOG_STATE_CHANGES
memset(self, 0,
memset(sandbox_state_history, 0,
sizeof(struct sandbox_state_history) + SANDBOX_STATE_HISTORY_CAPACITY * sizeof(sandbox_state_t));
#endif
}
static inline void
sandbox_state_history_append(struct sandbox_state_history *self, sandbox_state_t state)
sandbox_state_history_append(struct sandbox_state_history *sandbox_state_history, sandbox_state_t state)
{
#ifdef LOG_STATE_CHANGES
if (likely(self->size < SANDBOX_STATE_HISTORY_CAPACITY)) { self->buffer[self->size++] = state; }
if (likely(sandbox_state_history->size < SANDBOX_STATE_HISTORY_CAPACITY)) {
sandbox_state_history->buffer[sandbox_state_history->size++] = state;
}
#endif
}

@ -58,7 +58,7 @@ struct sandbox {
/* WebAssembly Instance State */
struct arch_context ctxt;
struct wasm_stack stack;
struct wasm_stack * stack;
struct wasm_memory *memory;
/* Scheduling and Temporal State */

@ -18,6 +18,11 @@
#define PAGE_SIZE (unsigned long)(1 << 12)
#define WEAK __attribute__((weak))
#define CACHE_LINE 64
/* This might be Intel specific. ARM and x64 both have the same CACHE_LINE size, but x64 uses Intel uses a double
* cache-line as a coherency unit */
#define CACHE_PAD (CACHE_LINE * 2)
#ifndef unlikely
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif

@ -6,44 +6,32 @@
struct vec_u8 {
size_t length;
size_t capacity;
uint8_t *buffer;
uint8_t *buffer; /* Backing heap allocation. Different lifetime because realloc might move this */
};
static inline struct vec_u8 *vec_u8_alloc(void);
static inline int vec_u8_init(struct vec_u8 *self, size_t capacity);
static inline struct vec_u8 *vec_u8_new(size_t capacity);
static inline void vec_u8_deinit(struct vec_u8 *self);
static inline void vec_u8_free(struct vec_u8 *self);
static inline void vec_u8_delete(struct vec_u8 *self);
/**
* Allocates an uninitialized vec on the heap'
* @returns a pointer to an uninitialized vec on the heap
*/
static inline struct vec_u8 *
vec_u8_alloc(void)
{
return (struct vec_u8 *)calloc(1, sizeof(struct vec_u8));
}
static inline int vec_u8_init(struct vec_u8 *vec_u8, size_t capacity);
static inline struct vec_u8 *vec_u8_alloc(size_t capacity);
static inline void vec_u8_deinit(struct vec_u8 *vec_u8);
static inline void vec_u8_free(struct vec_u8 *vec_u8);
/**
* Initializes a vec, allocating a backing buffer for the provided capcity
* @param self pointer to an uninitialized vec
* @param vec_u8 pointer to an uninitialized vec
* @param capacity
* @returns 0 on success, -1 on failure
*/
static inline int
vec_u8_init(struct vec_u8 *self, size_t capacity)
vec_u8_init(struct vec_u8 *vec_u8, size_t capacity)
{
if (capacity == 0) {
self->buffer = NULL;
vec_u8->buffer = NULL;
} else {
self->buffer = calloc(capacity, sizeof(uint8_t));
if (self->buffer == NULL) return -1;
vec_u8->buffer = calloc(capacity, sizeof(uint8_t));
if (vec_u8->buffer == NULL) return -1;
}
self->length = 0;
self->capacity = capacity;
vec_u8->length = 0;
vec_u8->capacity = capacity;
return 0;
}
@ -54,60 +42,47 @@ vec_u8_init(struct vec_u8 *self, size_t capacity)
* @returns a pointer to an initialized vec on the heap, ready for use
*/
static inline struct vec_u8 *
vec_u8_new(size_t capacity)
vec_u8_alloc(size_t capacity)
{
struct vec_u8 *self = vec_u8_alloc();
if (self == NULL) return self;
struct vec_u8 *vec_u8 = (struct vec_u8 *)malloc(sizeof(struct vec_u8));
if (vec_u8 == NULL) return vec_u8;
int rc = vec_u8_init(self, capacity);
int rc = vec_u8_init(vec_u8, capacity);
if (rc < 0) {
vec_u8_free(self);
vec_u8_free(vec_u8);
return NULL;
}
return self;
return vec_u8;
}
/**
* Deinitialize a vec, clearing out members and releasing the backing buffer
* @param self
* @param vec_u8
*/
static inline void
vec_u8_deinit(struct vec_u8 *self)
vec_u8_deinit(struct vec_u8 *vec_u8)
{
if (self->capacity == 0) {
assert(self->buffer == NULL);
assert(self->length == 0);
if (vec_u8->capacity == 0) {
assert(vec_u8->buffer == NULL);
assert(vec_u8->length == 0);
return;
}
assert(self->buffer != NULL);
free(self->buffer);
self->buffer = NULL;
self->length = 0;
self->capacity = 0;
}
/**
* Frees a vec struct allocated on the heap
* Assumes that the vec has already been deinitialized
*/
static inline void
vec_u8_free(struct vec_u8 *self)
{
assert(self->buffer == NULL);
assert(self->length == 0);
assert(self->capacity == 0);
free(self);
assert(vec_u8->buffer != NULL);
free(vec_u8->buffer);
vec_u8->buffer = NULL;
vec_u8->length = 0;
vec_u8->capacity = 0;
}
/**
* Deinitializes and frees a vec allocated to the heap
* @param self
* @param vec_u8
*/
static inline void
vec_u8_delete(struct vec_u8 *self)
vec_u8_free(struct vec_u8 *vec_u8)
{
vec_u8_deinit(self);
vec_u8_free(self);
vec_u8_deinit(vec_u8);
free(vec_u8);
}

@ -7,6 +7,7 @@
#include <string.h>
#include <sys/mman.h>
#include "ps_list.h"
#include "types.h" /* PAGE_SIZE */
#include "wasm_types.h"
@ -14,30 +15,64 @@
#define WASM_MEMORY_SIZE_TO_ALLOC ((size_t)WASM_MEMORY_MAX + /* guard page */ PAGE_SIZE)
struct wasm_memory {
size_t size; /* Initial Size in bytes */
size_t capacity; /* Size backed by actual pages */
size_t max; /* Soft cap in bytes. Defaults to 4GB */
uint8_t *buffer;
struct ps_list list; /* Linked List Node used for object pool */
size_t size; /* Initial Size in bytes */
size_t capacity; /* Size backed by actual pages */
size_t max; /* Soft cap in bytes. Defaults to 4GB */
uint8_t * buffer; /* Backing heap allocation. Different lifetime because realloc might move this */
};
static INLINE struct wasm_memory *wasm_memory_alloc(void);
static INLINE int wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max);
static INLINE struct wasm_memory *wasm_memory_new(size_t initial, size_t max);
static INLINE void wasm_memory_deinit(struct wasm_memory *self);
static INLINE void wasm_memory_free(struct wasm_memory *self);
static INLINE void wasm_memory_delete(struct wasm_memory *self);
/* Object Lifecycle Functions */
static INLINE struct wasm_memory *wasm_memory_alloc(size_t initial, size_t max);
static INLINE int wasm_memory_init(struct wasm_memory *wasm_memory, size_t initial, size_t max);
static INLINE void wasm_memory_deinit(struct wasm_memory *wasm_memory);
static INLINE void wasm_memory_free(struct wasm_memory *wasm_memory);
static INLINE void wasm_memory_reinit(struct wasm_memory *wasm_memory, size_t initial);
/* Memory Size */
static INLINE int wasm_memory_expand(struct wasm_memory *wasm_memory, size_t size_to_expand);
static INLINE size_t wasm_memory_get_size(struct wasm_memory *wasm_memory);
static INLINE uint32_t wasm_memory_get_page_count(struct wasm_memory *wasm_memory);
/* Reading and writing to wasm_memory */
static INLINE void
wasm_memory_initialize_region(struct wasm_memory *wasm_memory, uint32_t offset, uint32_t region_size, uint8_t region[]);
static INLINE void * wasm_memory_get_ptr_void(struct wasm_memory *wasm_memory, uint32_t offset, uint32_t size);
static INLINE int8_t wasm_memory_get_i8(struct wasm_memory *wasm_memory, uint32_t offset);
static INLINE int16_t wasm_memory_get_i16(struct wasm_memory *wasm_memory, uint32_t offset);
static INLINE int32_t wasm_memory_get_i32(struct wasm_memory *wasm_memory, uint32_t offset);
static INLINE int64_t wasm_memory_get_i64(struct wasm_memory *wasm_memory, uint32_t offset);
static INLINE float wasm_memory_get_f32(struct wasm_memory *wasm_memory, uint32_t offset);
static INLINE double wasm_memory_get_f64(struct wasm_memory *wasm_memory, uint32_t offset);
static INLINE char wasm_memory_get_char(struct wasm_memory *wasm_memory, uint32_t offset);
static INLINE char * wasm_memory_get_string(struct wasm_memory *wasm_memory, uint32_t offset, uint32_t size);
static INLINE void wasm_memory_set_i8(struct wasm_memory *wasm_memory, uint32_t offset, int8_t value);
static INLINE void wasm_memory_set_i16(struct wasm_memory *wasm_memory, uint32_t offset, int16_t value);
static INLINE void wasm_memory_set_i32(struct wasm_memory *wasm_memory, uint32_t offset, int32_t value);
static INLINE void wasm_memory_set_i64(struct wasm_memory *wasm_memory, uint64_t offset, int64_t value);
static INLINE void wasm_memory_set_f32(struct wasm_memory *wasm_memory, uint32_t offset, float value);
static INLINE void wasm_memory_set_f64(struct wasm_memory *wasm_memory, uint32_t offset, double value);
static INLINE struct wasm_memory *
wasm_memory_alloc(void)
wasm_memory_alloc(size_t initial, size_t max)
{
return malloc(sizeof(struct wasm_memory));
struct wasm_memory *wasm_memory = malloc(sizeof(struct wasm_memory));
if (wasm_memory == NULL) return wasm_memory;
int rc = wasm_memory_init(wasm_memory, initial, max);
if (rc < 0) {
assert(0);
wasm_memory_free(wasm_memory);
return NULL;
}
return wasm_memory;
}
static INLINE int
wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max)
wasm_memory_init(struct wasm_memory *wasm_memory, size_t initial, size_t max)
{
assert(self != NULL);
assert(wasm_memory != NULL);
/* We assume WASI modules, which are required to declare and export a linear memory with a non-zero size to
* allow a standard lib to initialize. Technically, a WebAssembly module that exports pure functions may not use
@ -48,83 +83,59 @@ wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max)
assert(max <= (size_t)UINT32_MAX + 1);
/* Allocate buffer of contiguous virtual addresses for full wasm32 linear memory and guard page */
self->buffer = mmap(NULL, WASM_MEMORY_SIZE_TO_ALLOC, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (self->buffer == MAP_FAILED) return -1;
wasm_memory->buffer = mmap(NULL, WASM_MEMORY_SIZE_TO_ALLOC, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (wasm_memory->buffer == MAP_FAILED) return -1;
/* Set the initial bytes to read / write */
int rc = mprotect(self->buffer, initial, PROT_READ | PROT_WRITE);
int rc = mprotect(wasm_memory->buffer, initial, PROT_READ | PROT_WRITE);
if (rc != 0) {
munmap(self->buffer, WASM_MEMORY_SIZE_TO_ALLOC);
munmap(wasm_memory->buffer, WASM_MEMORY_SIZE_TO_ALLOC);
return -1;
}
self->size = initial;
self->capacity = initial;
self->max = max;
ps_list_init_d(wasm_memory);
wasm_memory->size = initial;
wasm_memory->capacity = initial;
wasm_memory->max = max;
return 0;
}
static INLINE struct wasm_memory *
wasm_memory_new(size_t initial, size_t max)
{
struct wasm_memory *self = wasm_memory_alloc();
if (self == NULL) return self;
int rc = wasm_memory_init(self, initial, max);
if (rc < 0) {
assert(0);
wasm_memory_free(self);
return NULL;
}
return self;
}
static INLINE void
wasm_memory_deinit(struct wasm_memory *self)
{
assert(self != NULL);
assert(self->buffer != NULL);
munmap(self->buffer, WASM_MEMORY_SIZE_TO_ALLOC);
self->buffer = NULL;
self->size = 0;
self->capacity = 0;
self->max = 0;
}
static INLINE void
wasm_memory_free(struct wasm_memory *self)
wasm_memory_deinit(struct wasm_memory *wasm_memory)
{
assert(self != NULL);
/* Assume prior deinitialization so we don't leak buffers */
assert(self->buffer == NULL);
free(self);
assert(wasm_memory != NULL);
assert(wasm_memory->buffer != NULL);
munmap(wasm_memory->buffer, WASM_MEMORY_SIZE_TO_ALLOC);
wasm_memory->buffer = NULL;
wasm_memory->size = 0;
wasm_memory->capacity = 0;
wasm_memory->max = 0;
}
static INLINE void
wasm_memory_delete(struct wasm_memory *self)
wasm_memory_free(struct wasm_memory *wasm_memory)
{
assert(self != NULL);
wasm_memory_deinit(self);
wasm_memory_free(self);
assert(wasm_memory != NULL);
wasm_memory_deinit(wasm_memory);
free(wasm_memory);
}
static INLINE void
wasm_memory_wipe(struct wasm_memory *self)
wasm_memory_reinit(struct wasm_memory *wasm_memory, size_t initial)
{
memset(self->buffer, 0, self->size);
memset(wasm_memory->buffer, 0, wasm_memory->size);
wasm_memory->size = initial;
}
static INLINE int
wasm_memory_expand(struct wasm_memory *self, size_t size_to_expand)
wasm_memory_expand(struct wasm_memory *wasm_memory, size_t size_to_expand)
{
size_t target_size = self->size + size_to_expand;
if (unlikely(target_size > self->max)) {
fprintf(stderr, "wasm_memory_expand - Out of Memory!. %lu out of %lu\n", self->size, self->max);
size_t target_size = wasm_memory->size + size_to_expand;
if (unlikely(target_size > wasm_memory->max)) {
fprintf(stderr, "wasm_memory_expand - Out of Memory!. %lu out of %lu\n", wasm_memory->size,
wasm_memory->max);
return -1;
}
@ -133,37 +144,31 @@ wasm_memory_expand(struct wasm_memory *self, size_t size_to_expand)
* size is less than this "high water mark," we just need to update size for accounting purposes. Otherwise, we
* need to actually issue an mprotect syscall. The goal of these optimizations is to reduce mmap and demand
* paging overhead for repeated instantiations of a WebAssembly module. */
if (target_size > self->capacity) {
int rc = mprotect(self->buffer, target_size, PROT_READ | PROT_WRITE);
if (target_size > wasm_memory->capacity) {
int rc = mprotect(wasm_memory->buffer, target_size, PROT_READ | PROT_WRITE);
if (rc != 0) {
perror("wasm_memory_expand mprotect");
return -1;
}
self->capacity = target_size;
wasm_memory->capacity = target_size;
}
self->size = target_size;
wasm_memory->size = target_size;
return 0;
}
static INLINE void
wasm_memory_set_size(struct wasm_memory *self, size_t size)
{
self->size = size;
}
static INLINE size_t
wasm_memory_get_size(struct wasm_memory *self)
wasm_memory_get_size(struct wasm_memory *wasm_memory)
{
return self->size;
return wasm_memory->size;
}
static INLINE void
wasm_memory_initialize_region(struct wasm_memory *self, uint32_t offset, uint32_t region_size, uint8_t region[])
wasm_memory_initialize_region(struct wasm_memory *wasm_memory, uint32_t offset, uint32_t region_size, uint8_t region[])
{
assert((size_t)offset + region_size <= self->size);
memcpy(&self->buffer[offset], region, region_size);
assert((size_t)offset + region_size <= wasm_memory->size);
memcpy(&wasm_memory->buffer[offset], region, region_size);
}
/* NOTE: These wasm_memory functions require pointer dereferencing. For this reason, they are not directly by wasm32
@ -176,10 +181,10 @@ wasm_memory_initialize_region(struct wasm_memory *self, uint32_t offset, uint32_
* @return void pointer to something in WebAssembly linear memory
*/
static INLINE void *
wasm_memory_get_ptr_void(struct wasm_memory *self, uint32_t offset, uint32_t size)
wasm_memory_get_ptr_void(struct wasm_memory *wasm_memory, uint32_t offset, uint32_t size)
{
assert(offset + size <= self->size);
return (void *)&self->buffer[offset];
assert(offset + size <= wasm_memory->size);
return (void *)&wasm_memory->buffer[offset];
}
/**
@ -188,10 +193,10 @@ wasm_memory_get_ptr_void(struct wasm_memory *self, uint32_t offset, uint32_t siz
* @return char at the offset
*/
static INLINE char
wasm_memory_get_char(struct wasm_memory *self, uint32_t offset)
wasm_memory_get_char(struct wasm_memory *wasm_memory, uint32_t offset)
{
assert(offset + sizeof(char) <= self->size);
return *(char *)&self->buffer[offset];
assert(offset + sizeof(char) <= wasm_memory->size);
return *(char *)&wasm_memory->buffer[offset];
}
/**
@ -200,10 +205,10 @@ wasm_memory_get_char(struct wasm_memory *self, uint32_t offset)
* @return float at the offset
*/
static INLINE float
wasm_memory_get_f32(struct wasm_memory *self, uint32_t offset)
wasm_memory_get_f32(struct wasm_memory *wasm_memory, uint32_t offset)
{
assert(offset + sizeof(float) <= self->size);
return *(float *)&self->buffer[offset];
assert(offset + sizeof(float) <= wasm_memory->size);
return *(float *)&wasm_memory->buffer[offset];
}
/**
@ -212,10 +217,10 @@ wasm_memory_get_f32(struct wasm_memory *self, uint32_t offset)
* @return double at the offset
*/
static INLINE double
wasm_memory_get_f64(struct wasm_memory *self, uint32_t offset)
wasm_memory_get_f64(struct wasm_memory *wasm_memory, uint32_t offset)
{
assert(offset + sizeof(double) <= self->size);
return *(double *)&self->buffer[offset];
assert(offset + sizeof(double) <= wasm_memory->size);
return *(double *)&wasm_memory->buffer[offset];
}
/**
@ -224,10 +229,10 @@ wasm_memory_get_f64(struct wasm_memory *self, uint32_t offset)
* @return int8_t at the offset
*/
static INLINE int8_t
wasm_memory_get_i8(struct wasm_memory *self, uint32_t offset)
wasm_memory_get_i8(struct wasm_memory *wasm_memory, uint32_t offset)
{
assert(offset + sizeof(int8_t) <= self->size);
return *(int8_t *)&self->buffer[offset];
assert(offset + sizeof(int8_t) <= wasm_memory->size);
return *(int8_t *)&wasm_memory->buffer[offset];
}
/**
@ -236,10 +241,10 @@ wasm_memory_get_i8(struct wasm_memory *self, uint32_t offset)
* @return int16_t at the offset
*/
static INLINE int16_t
wasm_memory_get_i16(struct wasm_memory *self, uint32_t offset)
wasm_memory_get_i16(struct wasm_memory *wasm_memory, uint32_t offset)
{
assert(offset + sizeof(int16_t) <= self->size);
return *(int16_t *)&self->buffer[offset];
assert(offset + sizeof(int16_t) <= wasm_memory->size);
return *(int16_t *)&wasm_memory->buffer[offset];
}
/**
@ -248,10 +253,10 @@ wasm_memory_get_i16(struct wasm_memory *self, uint32_t offset)
* @return int32_t at the offset
*/
static INLINE int32_t
wasm_memory_get_i32(struct wasm_memory *self, uint32_t offset)
wasm_memory_get_i32(struct wasm_memory *wasm_memory, uint32_t offset)
{
assert(offset + sizeof(int32_t) <= self->size);
return *(int32_t *)&self->buffer[offset];
assert(offset + sizeof(int32_t) <= wasm_memory->size);
return *(int32_t *)&wasm_memory->buffer[offset];
}
/**
@ -260,16 +265,16 @@ wasm_memory_get_i32(struct wasm_memory *self, uint32_t offset)
* @return int32_t at the offset
*/
static INLINE int64_t
wasm_memory_get_i64(struct wasm_memory *self, uint32_t offset)
wasm_memory_get_i64(struct wasm_memory *wasm_memory, uint32_t offset)
{
assert(offset + sizeof(int64_t) <= self->size);
return *(int64_t *)&self->buffer[offset];
assert(offset + sizeof(int64_t) <= wasm_memory->size);
return *(int64_t *)&wasm_memory->buffer[offset];
}
static INLINE uint32_t
wasm_memory_get_page_count(struct wasm_memory *self)
wasm_memory_get_page_count(struct wasm_memory *wasm_memory)
{
return (uint32_t)(self->size / WASM_PAGE_SIZE);
return (uint32_t)(wasm_memory->size / WASM_PAGE_SIZE);
}
/**
@ -279,12 +284,12 @@ wasm_memory_get_page_count(struct wasm_memory *self)
* @return pointer to the string or NULL if max_length is reached without finding null-terminator
*/
static INLINE char *
wasm_memory_get_string(struct wasm_memory *self, uint32_t offset, uint32_t size)
wasm_memory_get_string(struct wasm_memory *wasm_memory, uint32_t offset, uint32_t size)
{
assert(offset + (sizeof(char) * size) <= self->size);
assert(offset + (sizeof(char) * size) <= wasm_memory->size);
if (strnlen((const char *)&self->buffer[offset], size) < size) {
return (char *)&self->buffer[offset];
if (strnlen((const char *)&wasm_memory->buffer[offset], size) < size) {
return (char *)&wasm_memory->buffer[offset];
} else {
return NULL;
}
@ -296,10 +301,10 @@ wasm_memory_get_string(struct wasm_memory *self, uint32_t offset, uint32_t size)
* @return float at the offset
*/
static INLINE void
wasm_memory_set_f32(struct wasm_memory *self, uint32_t offset, float value)
wasm_memory_set_f32(struct wasm_memory *wasm_memory, uint32_t offset, float value)
{
assert(offset + sizeof(float) <= self->size);
*(float *)&self->buffer[offset] = value;
assert(offset + sizeof(float) <= wasm_memory->size);
*(float *)&wasm_memory->buffer[offset] = value;
}
/**
@ -308,10 +313,10 @@ wasm_memory_set_f32(struct wasm_memory *self, uint32_t offset, float value)
* @return double at the offset
*/
static INLINE void
wasm_memory_set_f64(struct wasm_memory *self, uint32_t offset, double value)
wasm_memory_set_f64(struct wasm_memory *wasm_memory, uint32_t offset, double value)
{
assert(offset + sizeof(double) <= self->size);
*(double *)&self->buffer[offset] = value;
assert(offset + sizeof(double) <= wasm_memory->size);
*(double *)&wasm_memory->buffer[offset] = value;
}
/**
@ -320,10 +325,10 @@ wasm_memory_set_f64(struct wasm_memory *self, uint32_t offset, double value)
* @return int8_t at the offset
*/
static INLINE void
wasm_memory_set_i8(struct wasm_memory *self, uint32_t offset, int8_t value)
wasm_memory_set_i8(struct wasm_memory *wasm_memory, uint32_t offset, int8_t value)
{
assert(offset + sizeof(int8_t) <= self->size);
*(int8_t *)&self->buffer[offset] = value;
assert(offset + sizeof(int8_t) <= wasm_memory->size);
*(int8_t *)&wasm_memory->buffer[offset] = value;
}
/**
@ -332,10 +337,10 @@ wasm_memory_set_i8(struct wasm_memory *self, uint32_t offset, int8_t value)
* @return int16_t at the offset
*/
static INLINE void
wasm_memory_set_i16(struct wasm_memory *self, uint32_t offset, int16_t value)
wasm_memory_set_i16(struct wasm_memory *wasm_memory, uint32_t offset, int16_t value)
{
assert(offset + sizeof(int16_t) <= self->size);
*(int16_t *)&self->buffer[offset] = value;
assert(offset + sizeof(int16_t) <= wasm_memory->size);
*(int16_t *)&wasm_memory->buffer[offset] = value;
}
/**
@ -344,10 +349,10 @@ wasm_memory_set_i16(struct wasm_memory *self, uint32_t offset, int16_t value)
* @return int32_t at the offset
*/
static INLINE void
wasm_memory_set_i32(struct wasm_memory *self, uint32_t offset, int32_t value)
wasm_memory_set_i32(struct wasm_memory *wasm_memory, uint32_t offset, int32_t value)
{
assert(offset + sizeof(int32_t) <= self->size);
*(int32_t *)&self->buffer[offset] = value;
assert(offset + sizeof(int32_t) <= wasm_memory->size);
*(int32_t *)&wasm_memory->buffer[offset] = value;
}
/**
@ -356,8 +361,8 @@ wasm_memory_set_i32(struct wasm_memory *self, uint32_t offset, int32_t value)
* @return int64_t at the offset
*/
static INLINE void
wasm_memory_set_i64(struct wasm_memory *self, uint64_t offset, int64_t value)
wasm_memory_set_i64(struct wasm_memory *wasm_memory, uint64_t offset, int64_t value)
{
assert(offset + sizeof(int64_t) <= self->size);
*(int64_t *)&self->buffer[offset] = value;
assert(offset + sizeof(int64_t) <= wasm_memory->size);
*(int64_t *)&wasm_memory->buffer[offset] = value;
}

@ -7,13 +7,33 @@
#include "sandbox_types.h"
#include "types.h"
/**
* @brief wasm_stack is a stack used to execute an AOT-compiled WebAssembly instance. It is allocated with a static size
* and a guard page beneath the lowest usuable address. Because the stack grows down, this protects against stack
* overflow.
*
* Low Address <---------------------------------------------------------------------------> High Address
* | GUARD PAGE | USEABE FOR STACK FRAMES (SIZE of capacity) |
* /\ /\ /\
* buffer low high
*
* | Frame 2 | Frame 1 | Frame 0 |
* <<<<<<< Direction of Stack Growth
*/
struct wasm_stack {
size_t capacity; /* Usable capacity. Excludes size of guard page that we need to free */
uint8_t *high; /* The highest address of the stack. Grows down from here */
uint8_t *low; /* The address of the lowest usabe address. Above guard page */
uint8_t *buffer; /* Points to Guard Page */
struct ps_list list; /* Linked List Node used for object pool */
uint64_t capacity; /* Usable capacity. Excludes size of guard page that we need to free */
uint8_t * high; /* The highest address of the stack. Grows down from here */
uint8_t * low; /* The address of the lowest useabe address. Above guard page */
uint8_t * buffer; /* Points base address of backing heap allocation (Guard Page) */
};
static struct wasm_stack *wasm_stack_alloc(uint64_t capacity);
static inline int wasm_stack_init(struct wasm_stack *wasm_stack, uint64_t capacity);
static inline void wasm_stack_reinit(struct wasm_stack *wasm_stack);
static inline void wasm_stack_deinit(struct wasm_stack *wasm_stack);
static inline void wasm_stack_free(struct wasm_stack *wasm_stack);
/**
* Allocates a static sized stack for a sandbox with a guard page underneath
* Because a stack grows down, this protects against stack overflow
@ -21,49 +41,87 @@ struct wasm_stack {
* @param sandbox sandbox that we want to allocate a stack for
* @returns 0 on success, -1 on error
*/
static INLINE int
wasm_stack_allocate(struct wasm_stack *stack, size_t capacity)
static inline int
wasm_stack_init(struct wasm_stack *wasm_stack, uint64_t capacity)
{
assert(stack);
assert(wasm_stack);
int rc = 0;
stack->buffer = (uint8_t *)mmap(NULL, /* guard page */ PAGE_SIZE + capacity, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (unlikely(stack->buffer == MAP_FAILED)) {
wasm_stack->buffer = (uint8_t *)mmap(NULL, /* guard page */ PAGE_SIZE + capacity, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (unlikely(wasm_stack->buffer == MAP_FAILED)) {
perror("sandbox allocate stack");
goto err_stack_allocation_failed;
}
stack->low = (uint8_t *)mmap(stack->buffer + /* guard page */ PAGE_SIZE, capacity, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (unlikely(stack->low == MAP_FAILED)) {
wasm_stack->low = (uint8_t *)mmap(wasm_stack->buffer + /* guard page */ PAGE_SIZE, capacity,
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (unlikely(wasm_stack->low == MAP_FAILED)) {
perror("sandbox set stack read/write");
goto err_stack_prot_failed;
}
stack->capacity = capacity;
stack->high = stack->low + capacity;
ps_list_init_d(wasm_stack);
wasm_stack->capacity = capacity;
wasm_stack->high = wasm_stack->low + capacity;
rc = 0;
done:
return rc;
err_stack_prot_failed:
rc = munmap(stack->buffer, PAGE_SIZE + capacity);
rc = munmap(wasm_stack->buffer, PAGE_SIZE + capacity);
if (rc == -1) perror("munmap");
err_stack_allocation_failed:
stack->buffer = NULL;
rc = -1;
wasm_stack->buffer = NULL;
rc = -1;
goto done;
}
static INLINE void
wasm_stack_free(struct wasm_stack *stack)
static struct wasm_stack *
wasm_stack_alloc(uint64_t capacity)
{
assert(stack != NULL);
assert(stack->buffer != NULL);
struct wasm_stack *wasm_stack = calloc(1, sizeof(struct wasm_stack));
int rc = wasm_stack_init(wasm_stack, capacity);
if (rc < 0) {
wasm_stack_free(wasm_stack);
return NULL;
}
return wasm_stack;
}
static inline void
wasm_stack_deinit(struct wasm_stack *wasm_stack)
{
assert(wasm_stack != NULL);
assert(wasm_stack->buffer != NULL);
/* The stack start is the bottom of the usable stack, but we allocated a guard page below this */
int rc = munmap(stack->buffer, stack->capacity + PAGE_SIZE);
stack->buffer = NULL;
if (unlikely(rc == -1)) perror("munmap");
munmap(wasm_stack->buffer, wasm_stack->capacity + PAGE_SIZE);
wasm_stack->buffer = NULL;
wasm_stack->high = NULL;
wasm_stack->low = NULL;
}
static inline void
wasm_stack_free(struct wasm_stack *wasm_stack)
{
assert(wasm_stack != NULL);
assert(wasm_stack->buffer != NULL);
wasm_stack_deinit(wasm_stack);
free(wasm_stack);
}
static inline void
wasm_stack_reinit(struct wasm_stack *wasm_stack)
{
assert(wasm_stack != NULL);
assert(wasm_stack->buffer != NULL);
wasm_stack->low = wasm_stack->buffer + /* guard page */ PAGE_SIZE;
memset(wasm_stack->low, 0, wasm_stack->capacity);
ps_list_init_d(wasm_stack);
wasm_stack->high = wasm_stack->low + wasm_stack->capacity;
}

@ -17,85 +17,77 @@ struct wasm_table_entry {
struct wasm_table {
uint32_t length;
uint32_t capacity;
struct wasm_table_entry *buffer;
struct wasm_table_entry *buffer; /* Backing heap allocation */
};
static INLINE struct wasm_table *wasm_table_alloc(void);
static INLINE int wasm_table_init(struct wasm_table *self, size_t capacity);
static INLINE struct wasm_table *wasm_table_new(size_t capacity);
static INLINE void wasm_table_deinit(struct wasm_table *self);
static INLINE void wasm_table_free(struct wasm_table *self);
static INLINE void wasm_table_delete(struct wasm_table *self);
static INLINE struct wasm_table *
wasm_table_alloc(void)
{
return (struct wasm_table *)malloc(sizeof(struct wasm_table));
}
static INLINE int wasm_table_init(struct wasm_table *wasm_table, size_t capacity);
static INLINE struct wasm_table *wasm_table_alloc(size_t capacity);
static INLINE void wasm_table_deinit(struct wasm_table *wasm_table);
static INLINE void wasm_table_free(struct wasm_table *wasm_table);
static INLINE int
wasm_table_init(struct wasm_table *self, size_t capacity)
wasm_table_init(struct wasm_table *wasm_table, size_t capacity)
{
assert(self != NULL);
assert(wasm_table != NULL);
if (capacity > 0) {
self->buffer = calloc(capacity, sizeof(struct wasm_table_entry));
if (self->buffer == NULL) return -1;
wasm_table->buffer = calloc(capacity, sizeof(struct wasm_table_entry));
if (wasm_table->buffer == NULL) return -1;
}
self->capacity = capacity;
self->length = 0;
wasm_table->capacity = capacity;
wasm_table->length = 0;
return 0;
}
static INLINE struct wasm_table *
wasm_table_new(size_t capacity)
wasm_table_alloc(size_t capacity)
{
struct wasm_table *self = wasm_table_alloc();
if (self == NULL) return NULL;
struct wasm_table *wasm_table = (struct wasm_table *)malloc(sizeof(struct wasm_table));
if (wasm_table == NULL) return NULL;
int rc = wasm_table_init(self, capacity);
int rc = wasm_table_init(wasm_table, capacity);
if (rc < 0) {
wasm_table_free(self);
wasm_table_free(wasm_table);
return NULL;
}
return self;
return wasm_table;
}
static INLINE void
wasm_table_deinit(struct wasm_table *self)
wasm_table_deinit(struct wasm_table *wasm_table)
{
assert(self != NULL);
assert(wasm_table != NULL);
if (self->capacity > 0) {
assert(self->buffer == NULL);
assert(self->length == 0);
if (wasm_table->capacity > 0) {
assert(wasm_table->buffer == NULL);
assert(wasm_table->length == 0);
return;
}
assert(self->buffer != NULL);
free(self->buffer);
self->buffer = NULL;
self->length = 0;
self->capacity = 0;
assert(wasm_table->buffer != NULL);
free(wasm_table->buffer);
wasm_table->buffer = NULL;
wasm_table->length = 0;
wasm_table->capacity = 0;
}
static INLINE void
wasm_table_free(struct wasm_table *self)
wasm_table_free(struct wasm_table *wasm_table)
{
assert(self != NULL);
free(self);
assert(wasm_table != NULL);
free(wasm_table);
}
static INLINE void *
wasm_table_get(struct wasm_table *self, uint32_t idx, uint32_t type_id)
wasm_table_get(struct wasm_table *wasm_table, uint32_t idx, uint32_t type_id)
{
assert(self != NULL);
assert(idx < self->capacity);
assert(wasm_table != NULL);
assert(idx < wasm_table->capacity);
struct wasm_table_entry f = self->buffer[idx];
struct wasm_table_entry f = wasm_table->buffer[idx];
// FIXME: Commented out function type check because of gocr
// assert(f.type_id == type_id);
@ -105,14 +97,14 @@ wasm_table_get(struct wasm_table *self, uint32_t idx, uint32_t type_id)
}
static INLINE void
wasm_table_set(struct wasm_table *self, uint32_t idx, uint32_t type_id, char *pointer)
wasm_table_set(struct wasm_table *wasm_table, uint32_t idx, uint32_t type_id, char *pointer)
{
assert(self != NULL);
assert(idx < self->capacity);
assert(wasm_table != NULL);
assert(idx < wasm_table->capacity);
assert(pointer != NULL);
/* TODO: atomic for multiple concurrent invocations? Issue #97 */
if (self->buffer[idx].type_id == type_id && self->buffer[idx].func_pointer == pointer) return;
if (wasm_table->buffer[idx].type_id == type_id && wasm_table->buffer[idx].func_pointer == pointer) return;
self->buffer[idx] = (struct wasm_table_entry){ .type_id = type_id, .func_pointer = pointer };
wasm_table->buffer[idx] = (struct wasm_table_entry){ .type_id = type_id, .func_pointer = pointer };
}

@ -3,29 +3,29 @@
/**
* Initializes perf window
* @param self
* @param admissions_info
*/
void
admissions_info_initialize(struct admissions_info *self, int percentile, uint64_t expected_execution,
admissions_info_initialize(struct admissions_info *admissions_info, int percentile, uint64_t expected_execution,
uint64_t relative_deadline)
{
#ifdef ADMISSIONS_CONTROL
assert(relative_deadline > 0);
assert(expected_execution > 0);
self->relative_deadline = relative_deadline;
self->estimate = admissions_control_calculate_estimate(expected_execution, relative_deadline);
debuglog("Initial Estimate: %lu\n", self->estimate);
assert(self != NULL);
admissions_info->relative_deadline = relative_deadline;
admissions_info->estimate = admissions_control_calculate_estimate(expected_execution, relative_deadline);
debuglog("Initial Estimate: %lu\n", admissions_info->estimate);
assert(admissions_info != NULL);
perf_window_initialize(&self->perf_window);
perf_window_initialize(&admissions_info->perf_window);
if (unlikely(percentile < 50 || percentile > 99)) panic("Invalid admissions percentile");
self->percentile = percentile;
admissions_info->percentile = percentile;
self->control_index = PERF_WINDOW_BUFFER_SIZE * percentile / 100;
admissions_info->control_index = PERF_WINDOW_BUFFER_SIZE * percentile / 100;
#ifdef LOG_ADMISSIONS_CONTROL
debuglog("Percentile: %d\n", self->percentile);
debuglog("Control Index: %d\n", self->control_index);
debuglog("Percentile: %d\n", admissions_info->percentile);
debuglog("Control Index: %d\n", admissions_info->control_index);
#endif
#endif
}
@ -33,19 +33,21 @@ admissions_info_initialize(struct admissions_info *self, int percentile, uint64_
/*
* Adds an execution value to the perf window and calculates and caches and updated estimate
* @param self
* @param admissions_info
* @param execution_duration
*/
void
admissions_info_update(struct admissions_info *self, uint64_t execution_duration)
admissions_info_update(struct admissions_info *admissions_info, uint64_t execution_duration)
{
#ifdef ADMISSIONS_CONTROL
struct perf_window *perf_window = &self->perf_window;
struct perf_window *perf_window = &admissions_info->perf_window;
LOCK_LOCK(&self->perf_window.lock);
LOCK_LOCK(&admissions_info->perf_window.lock);
perf_window_add(perf_window, execution_duration);
uint64_t estimated_execution = perf_window_get_percentile(perf_window, self->percentile, self->control_index);
self->estimate = admissions_control_calculate_estimate(estimated_execution, self->relative_deadline);
LOCK_UNLOCK(&self->perf_window.lock);
uint64_t estimated_execution = perf_window_get_percentile(perf_window, admissions_info->percentile,
admissions_info->control_index);
admissions_info->estimate = admissions_control_calculate_estimate(estimated_execution,
admissions_info->relative_deadline);
LOCK_UNLOCK(&admissions_info->perf_window.lock);
#endif
}

@ -7,16 +7,20 @@
**************************************************/
void
http_request_print(struct http_request *self)
http_request_print(struct http_request *http_request)
{
printf("Header Count %d\n", self->header_count);
printf("Header Count %d\n", http_request->header_count);
printf("Header Content:\n");
for (int i = 0; i < self->header_count; i++) {
for (int j = 0; j < self->headers[i].key_length; j++) { putchar(self->headers[i].key[j]); }
for (int i = 0; i < http_request->header_count; i++) {
for (int j = 0; j < http_request->headers[i].key_length; j++) {
putchar(http_request->headers[i].key[j]);
}
putchar(':');
for (int j = 0; j < self->headers[i].value_length; j++) { putchar(self->headers[i].value[j]); }
for (int j = 0; j < http_request->headers[i].value_length; j++) {
putchar(http_request->headers[i].value[j]);
}
putchar('\n');
}
printf("Body Length %d\n", self->body_length);
printf("Body Read Length %d\n", self->body_read_length);
printf("Body Length %d\n", http_request->body_length);
printf("Body Read Length %d\n", http_request->body_read_length);
}

@ -179,9 +179,9 @@ listener_thread_main(void *dummy)
}
/* Allocate a Sandbox */
struct sandbox *sandbox = sandbox_new(module, client_socket,
(const struct sockaddr *)&client_address,
request_arrival_timestamp, work_admitted);
struct sandbox *sandbox = sandbox_alloc(module, client_socket,
(const struct sockaddr *)&client_address,
request_arrival_timestamp, work_admitted);
if (unlikely(sandbox == NULL)) {
client_socket_send_oneshot(sandbox->client_socket_descriptor,
http_header_build(503), http_header_len(503));

@ -369,7 +369,7 @@ main(int argc, char **argv)
#ifdef LOG_MODULE_LOADING
debuglog("Parsing modules file [%s]\n", argv[1]);
#endif
if (module_new_from_json(argv[1])) panic("failed to initialize module(s) defined in %s\n", argv[1]);
if (module_alloc_from_json(argv[1])) panic("failed to initialize module(s) defined in %s\n", argv[1]);
for (int i = 0; i < runtime_worker_threads_count; i++) {

@ -126,55 +126,30 @@ module_free(struct module *module)
free(module);
}
/**
* Module Contructor
* Creates a new module, invokes initialize_tables to initialize the indirect table, adds it to the module DB, and
*starts listening for HTTP Requests
*
* @param name
* @param path
* @param stack_size
* @param max_memory
* @param relative_deadline_us
* @param port
* @param request_size
* @returns A new module or NULL in case of failure
*/
struct module *
module_new(char *name, char *path, uint32_t stack_size, uint32_t max_memory, uint32_t relative_deadline_us, int port,
int request_size, int response_size, int admissions_percentile, uint32_t expected_execution_us)
static inline int
module_init(struct module *module, char *name, char *path, uint32_t stack_size, uint32_t relative_deadline_us, int port,
int request_size, int response_size, int admissions_percentile, uint32_t expected_execution_us)
{
int rc = 0;
assert(module != NULL);
struct module *module = (struct module *)calloc(1, sizeof(struct module));
if (!module) {
fprintf(stderr, "Failed to allocate module: %s\n", strerror(errno));
goto err;
};
int rc = 0;
atomic_init(&module->reference_count, 0);
rc = awsm_abi_init(&module->abi, path);
if (rc != 0) goto awsm_abi_init_err;
if (rc != 0) goto err;
/* Set fields in the module struct */
strncpy(module->name, name, MODULE_MAX_NAME_LENGTH);
strncpy(module->path, path, MODULE_MAX_PATH_LENGTH);
module->stack_size = ((uint32_t)(round_up_to_page(stack_size == 0 ? WASM_STACK_SIZE : stack_size)));
debuglog("Stack Size: %u", module->stack_size);
module->max_memory = max_memory == 0 ? ((uint64_t)WASM_PAGE_SIZE * WASM_MEMORY_PAGES_MAX) : max_memory;
module->stack_size = ((uint32_t)(round_up_to_page(stack_size == 0 ? WASM_STACK_SIZE : stack_size)));
module->socket_descriptor = -1;
module->port = port;
/* Deadlines */
module->relative_deadline_us = relative_deadline_us;
/* This should have been handled when a module was loaded */
assert(relative_deadline_us < RUNTIME_RELATIVE_DEADLINE_US_MAX);
/* This can overflow a uint32_t, so be sure to cast appropriately */
module->relative_deadline = (uint64_t)relative_deadline_us * runtime_processor_speed_MHz;
@ -183,44 +158,58 @@ module_new(char *name, char *path, uint32_t stack_size, uint32_t max_memory, uin
admissions_info_initialize(&module->admissions_info, admissions_percentile, expected_execution,
module->relative_deadline);
/* WebAssembly Indirect Table */
/* TODO: Should this be part of the module or per-sandbox? */
/* TODO: How should this table be sized? */
module->indirect_table = wasm_table_new(INDIRECT_TABLE_SIZE);
/* Request Response Buffer */
if (request_size == 0) request_size = MODULE_DEFAULT_REQUEST_RESPONSE_SIZE;
if (response_size == 0) response_size = MODULE_DEFAULT_REQUEST_RESPONSE_SIZE;
module->max_request_size = round_up_to_page(request_size);
module->max_response_size = round_up_to_page(response_size);
/* Table initialization calls a function that runs within the sandbox. Rather than setting the current sandbox,
* we partially fake this out by only setting the table and then clearing after table
* initialization is complete.
*
* assumption: This approach depends on module_new only being invoked at program start before preemption is
* enabled. We are check that current_wasm_module_instance.table is NULL to gain confidence that
* we are not invoking this in a way that clobbers a current module.
*
* If we want to be able to do this later, we can possibly defer module_initialize_table until the first
* invocation. Alternatively, we can maintain the table per sandbox and call initialize
* on each sandbox if this "assumption" is too restrictive and we're ready to pay a per-sandbox performance hit.
*/
assert(current_wasm_module_instance.table == NULL);
current_wasm_module_instance.table = module->indirect_table;
module_initialize_table(module);
current_wasm_module_instance.table = NULL;
module_alloc_table(module);
module_initialize_pools(module);
/* Start listening for requests */
rc = module_listen(module);
if (rc < 0) goto err_listen;
if (rc < 0) goto err;
done:
return rc;
err:
rc = -1;
goto done;
}
/**
* Module Contructor
* Creates a new module, invokes initialize_tables to initialize the indirect table, adds it to the module DB, and
*starts listening for HTTP Requests
*
* @param name
* @param path
* @param stack_size
* @param relative_deadline_us
* @param port
* @param request_size
* @returns A new module or NULL in case of failure
*/
struct module *
module_alloc(char *name, char *path, uint32_t stack_size, uint32_t relative_deadline_us, int port, int request_size,
int response_size, int admissions_percentile, uint32_t expected_execution_us)
{
struct module *module = (struct module *)calloc(1, sizeof(struct module));
if (!module) {
fprintf(stderr, "Failed to allocate module: %s\n", strerror(errno));
goto err;
};
int rc = module_init(module, name, path, stack_size, relative_deadline_us, port, request_size, response_size,
admissions_percentile, expected_execution_us);
if (rc < 0) goto init_err;
done:
return module;
err_listen:
awsm_abi_init_err:
init_err:
free(module);
err:
module = NULL;
@ -233,7 +222,7 @@ err:
* @return RC 0 on Success. -1 on Error
*/
int
module_new_from_json(char *file_name)
module_alloc_from_json(char *file_name)
{
assert(file_name != NULL);
int return_code = -1;
@ -421,10 +410,10 @@ module_new_from_json(char *file_name)
#endif
/* Allocate a module based on the values from the JSON */
struct module *module = module_new(module_name, module_path, 0, 0, relative_deadline_us, port,
request_size, response_size, admissions_percentile,
expected_execution_us);
if (module == NULL) goto module_new_err;
struct module *module = module_alloc(module_name, module_path, 0, relative_deadline_us, port,
request_size, response_size, admissions_percentile,
expected_execution_us);
if (module == NULL) goto module_alloc_err;
assert(module);
module_set_http_info(module, response_content_type);
@ -441,7 +430,7 @@ module_new_from_json(char *file_name)
done:
return return_code;
module_new_err:
module_alloc_err:
json_parse_err:
fclose_err:
/* We will retry fclose when we fall through into stat_buffer_alloc_err */

@ -5,6 +5,7 @@
#include "current_sandbox.h"
#include "debuglog.h"
#include "panic.h"
#include "pool.h"
#include "runtime.h"
#include "sandbox_functions.h"
#include "sandbox_set_as_error.h"
@ -34,16 +35,7 @@ static inline int
sandbox_allocate_linear_memory(struct sandbox *sandbox)
{
assert(sandbox != NULL);
char *error_message = NULL;
size_t initial = (size_t)sandbox->module->abi.starting_pages * WASM_PAGE_SIZE;
size_t max = (size_t)sandbox->module->abi.max_pages * WASM_PAGE_SIZE;
assert(initial <= (size_t)UINT32_MAX + 1);
assert(max <= (size_t)UINT32_MAX + 1);
sandbox->memory = wasm_memory_new(initial, max);
sandbox->memory = module_allocate_linear_memory(sandbox->module);
if (unlikely(sandbox->memory == NULL)) return -1;
return 0;
@ -55,7 +47,10 @@ sandbox_allocate_stack(struct sandbox *sandbox)
assert(sandbox);
assert(sandbox->module);
return wasm_stack_allocate(&sandbox->stack, sandbox->module->stack_size);
sandbox->stack = module_allocate_stack(sandbox->module);
if (sandbox->stack == NULL) return -1;
return 0;
}
static inline void
@ -63,7 +58,7 @@ sandbox_free_stack(struct sandbox *sandbox)
{
assert(sandbox);
return wasm_stack_free(&sandbox->stack);
return module_free_stack(sandbox->module, sandbox->stack);
}
/**
@ -72,30 +67,21 @@ sandbox_free_stack(struct sandbox *sandbox)
* @returns 0 on success, -1 on error
*/
static inline int
sandbox_allocate_http_buffers(struct sandbox *self)
sandbox_allocate_http_buffers(struct sandbox *sandbox)
{
int rc;
rc = vec_u8_init(&self->request, self->module->max_request_size);
rc = vec_u8_init(&sandbox->request, sandbox->module->max_request_size);
if (rc < 0) return -1;
rc = vec_u8_init(&self->response, self->module->max_response_size);
rc = vec_u8_init(&sandbox->response, sandbox->module->max_response_size);
if (rc < 0) {
vec_u8_deinit(&self->request);
vec_u8_deinit(&sandbox->request);
return -1;
}
return 0;
}
static inline struct sandbox *
sandbox_allocate(void)
{
struct sandbox *sandbox = NULL;
size_t page_aligned_sandbox_size = round_up_to_page(sizeof(struct sandbox));
sandbox = calloc(1, page_aligned_sandbox_size);
sandbox_set_as_allocated(sandbox);
return sandbox;
}
/**
* Allocates HTTP buffers and performs our approximation of "WebAssembly instantiation"
@ -131,7 +117,7 @@ sandbox_prepare_execution_environment(struct sandbox *sandbox)
/* Initialize the sandbox's context, stack, and instruction pointer */
/* stack grows down, so set to high address */
arch_context_init(&sandbox->ctxt, (reg_t)current_sandbox_start, (reg_t)sandbox->stack.high);
arch_context_init(&sandbox->ctxt, (reg_t)current_sandbox_start, (reg_t)sandbox->stack->high);
rc = 0;
done:
@ -185,12 +171,15 @@ sandbox_init(struct sandbox *sandbox, struct module *module, int socket_descript
* @return the new sandbox request
*/
struct sandbox *
sandbox_new(struct module *module, int socket_descriptor, const struct sockaddr *socket_address,
uint64_t request_arrival_timestamp, uint64_t admissions_estimate)
sandbox_alloc(struct module *module, int socket_descriptor, const struct sockaddr *socket_address,
uint64_t request_arrival_timestamp, uint64_t admissions_estimate)
{
struct sandbox *sandbox = sandbox_allocate();
assert(sandbox);
struct sandbox *sandbox = NULL;
size_t page_aligned_sandbox_size = round_up_to_page(sizeof(struct sandbox));
sandbox = calloc(1, page_aligned_sandbox_size);
if (unlikely(sandbox == NULL)) return NULL;
sandbox_set_as_allocated(sandbox);
sandbox_init(sandbox, module, socket_descriptor, socket_address, request_arrival_timestamp,
admissions_estimate);
@ -198,39 +187,33 @@ sandbox_new(struct module *module, int socket_descriptor, const struct sockaddr
return sandbox;
}
/**
* Free stack and heap resources.. also any I/O handles.
* @param sandbox
*/
void
sandbox_free(struct sandbox *sandbox)
sandbox_deinit(struct sandbox *sandbox)
{
assert(sandbox != NULL);
assert(sandbox != current_sandbox_get());
assert(sandbox->state == SANDBOX_ERROR || sandbox->state == SANDBOX_COMPLETE);
int rc;
module_release(sandbox->module);
/* Linear Memory and Guard Page should already have been munmaped and set to NULL */
assert(sandbox->memory == NULL);
/* Free Sandbox Struct and HTTP Request and Response Buffers */
if (likely(sandbox->stack.buffer != NULL)) sandbox_free_stack(sandbox);
free(sandbox);
/* Free Sandbox Struct*/
if (likely(sandbox->stack != NULL)) sandbox_free_stack(sandbox);
}
if (rc == -1) {
debuglog("Failed to unmap Sandbox %lu\n", sandbox->id);
goto err_free_sandbox_failed;
};
/**
* Free stack and heap resources.. also any I/O handles.
* @param sandbox
*/
void
sandbox_free(struct sandbox *sandbox)
{
assert(sandbox != NULL);
assert(sandbox != current_sandbox_get());
assert(sandbox->state == SANDBOX_ERROR || sandbox->state == SANDBOX_COMPLETE);
done:
return;
err_free_sandbox_failed:
err_free_stack_failed:
/* Errors freeing memory is a fatal error */
panic("Failed to free Sandbox %lu\n", sandbox->id);
sandbox_deinit(sandbox);
free(sandbox);
}

Loading…
Cancel
Save