feat: WIP object pool

mmap-opt
Sean McBride 4 years ago
parent 29af1d7fa5
commit a6689c4823

@ -3,6 +3,8 @@
#include <assert.h>
#include <dlfcn.h>
#include "wasm_types.h"
/* Wasm initialization functions generated by the compiler */
#define AWSM_ABI_INITIALIZE_GLOBALS "populate_globals"
#define AWSM_ABI_INITIALIZE_MEMORY "populate_memory"
@ -10,6 +12,9 @@
#define AWSM_ABI_INITIALIZE_LIBC "wasmf___init_libc"
#define AWSM_ABI_ENTRYPOINT "wasmf_main"
#define AWSM_ABI_STARTING_PAGES "starting_pages"
#define AWSM_ABI_MAX_PAGES "max_pages"
/* functions in the module to lookup and call per sandbox. */
typedef int32_t (*awsm_abi_entrypoint_fn_t)(int32_t a, int32_t b);
typedef void (*awsm_abi_init_globals_fn_t)(void);
@ -24,6 +29,8 @@ struct awsm_abi {
awsm_abi_init_tbl_fn_t initialize_tables;
awsm_abi_init_libc_fn_t initialize_libc;
awsm_abi_entrypoint_fn_t entrypoint;
uint32_t starting_pages;
uint32_t max_pages;
};
/* Initializes the ABI object using the *.so file at path */
@ -76,6 +83,26 @@ awsm_abi_init(struct awsm_abi *abi, char *path)
goto dl_error;
}
abi->starting_pages = *(uint32_t *)dlsym(abi->handle, AWSM_ABI_STARTING_PAGES);
if (abi->starting_pages == 0) {
fprintf(stderr, "Failed to resolve symbol %s in %s with error: %s\n", AWSM_ABI_STARTING_PAGES, path,
dlerror());
goto dl_error;
}
abi->max_pages = *(uint32_t *)dlsym(abi->handle, AWSM_ABI_MAX_PAGES);
if (abi->max_pages == 0) {
/* This seems to not always be present. I assume this is only there if the source module explicitly
* specified this */
abi->max_pages = WASM_MEMORY_PAGES_MAX;
debuglog("max_pages symbols not defined. Defaulting to MAX defined by spec.\n")
// TODO: We need to prove that this actually can get generated by awsm
// fprintf(stderr, "Failed to resolve symbol %s in %s with error: %s\n", AWSM_ABI_MAX_PAGES, path,
// dlerror());
// goto dl_error;
}
done:
return rc;
dl_error:

@ -0,0 +1,109 @@
#pragma once
#include <assert.h>
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include "debuglog.h"
#include "types.h" /* PAGE_SIZE */
struct buffer {
uint32_t size;
uint64_t max;
uint8_t data[];
};
static inline struct buffer *
buffer_allocate(size_t initial, size_t max)
{
char * error_message = NULL;
int rc = 0;
struct buffer *self = NULL;
assert(initial > 0);
assert(max > 0);
size_t size_to_alloc = sizeof(struct buffer) + max + /* guard page */ PAGE_SIZE;
// assert(round_up_to_page(size_to_alloc) == size_to_alloc);
void *addr = mmap(NULL, size_to_alloc, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
debuglog("buffer_allocate - allocation failed, (size: %lu) %s\n", size_to_alloc, strerror(errno));
return self;
}
/* Set as read / write */
size_t size_to_read_write = sizeof(struct buffer) + initial;
void *addr_rw = mmap(addr, size_to_read_write, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-1, 0);
if (addr_rw == MAP_FAILED) {
perror("buffer_allocate - prot r/w failed");
munmap(addr, size_to_alloc);
return self;
}
self = (struct buffer *)addr_rw;
self->max = max;
self->size = initial;
return self;
}
static inline void
buffer_free(struct buffer *self)
{
size_t size_to_free = sizeof(struct buffer) + self->max + /* guard page */ PAGE_SIZE;
munmap(self, size_to_free);
}
static inline int
buffer_expand(struct buffer *self, size_t size_to_expand)
{
if (unlikely(self->size + size_to_expand >= self->max)) {
debuglog("buffer_expand - Out of Memory!. %u out of %lu\n", self->size, self->max);
return -1;
}
void *temp = mmap(self, sizeof(struct buffer) + self->size + size_to_expand, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (temp == NULL) {
perror("buffer_expand mmap");
return -1;
}
/* Assumption: We are not actually resizing our buffer capacity. We are just adjusting the R/W rules within a
* preallocated buffer of size max */
assert(self == temp);
self->size += size_to_expand;
return 0;
}
static inline int
buffer_resize(struct buffer *self, size_t target_size)
{
if (unlikely(target_size >= self->max)) {
debuglog("buffer_expand - Out of Memory!. %u out of %lu\n", self->size, self->max);
return -1;
}
void *temp = mmap(self, sizeof(struct buffer) + target_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (temp == NULL) {
perror("buffer_resize mmap");
return -1;
}
assert(self == temp);
/* Assumption: We are not actually resizing our buffer capacity. We are just adjusting the R/W rules within a
* preallocated buffer of size max */
self->size = target_size;
return 0;
}

@ -31,11 +31,7 @@ current_sandbox_set(struct sandbox *sandbox)
/* Unpack hierarchy to avoid pointer chasing */
if (sandbox == NULL) {
local_sandbox_context_cache = (struct sandbox_context_cache){
.memory = {
.start = NULL,
.size = 0,
.max = 0,
},
.memory = NULL,
.module_indirect_table = NULL,
};
worker_thread_current_sandbox = NULL;

@ -10,6 +10,7 @@
#include "awsm_abi.h"
#include "http.h"
#include "panic.h"
#include "pool.h"
#include "types.h"
#define MODULE_DEFAULT_REQUEST_RESPONSE_SIZE (PAGE_SIZE)
@ -55,6 +56,7 @@ struct module {
_Atomic uint32_t reference_count; /* ref count how many instances exist here. */
struct indirect_table_entry indirect_table[INDIRECT_TABLE_SIZE];
struct pool ** linear_memory_pool;
};
/*************************

@ -0,0 +1,118 @@
#pragma once
#include <assert.h>
#include <stdbool.h>
#include <stdlib.h>
#include "generic_thread.h"
#include "lock.h"
struct pool {
bool use_lock;
lock_t lock;
ssize_t top;
size_t capacity;
void * buffer[];
};
static inline bool
pool_is_empty(struct pool *self)
{
return self->top == -1;
}
static inline bool
pool_is_full(struct pool *self)
{
return self->top + 1 == self->capacity;
}
static inline void *
pool_allocate_object_nolock(struct pool *self)
{
assert(self != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
void *result = NULL;
if (pool_is_empty(self)) return result;
result = self->buffer[self->top--];
return result;
}
static inline void *
pool_allocate_object(struct pool *self)
{
assert(self != NULL);
assert(self->use_lock);
void *result = NULL;
if (pool_is_empty(self)) return result;
LOCK_LOCK(&self->lock);
if (pool_is_empty(self)) {
LOCK_UNLOCK(&self->lock);
return result;
}
result = self->buffer[self->top--];
assert(result != NULL);
LOCK_UNLOCK(&self->lock);
return result;
}
static inline int
pool_free_object_nolock(struct pool *self, void *obj)
{
assert(self != NULL);
assert(obj != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
if (pool_is_full(self)) return -1;
self->buffer[++self->top] = obj;
return 0;
}
static inline int
pool_free_object(struct pool *self, void *obj)
{
assert(self != NULL);
assert(obj != NULL);
assert(self->use_lock);
if (pool_is_full(self)) return -1;
LOCK_LOCK(&self->lock);
if (pool_is_full(self)) {
LOCK_UNLOCK(&self->lock);
return -1;
}
self->buffer[++self->top] = obj;
LOCK_UNLOCK(&self->lock);
return 0;
}
static inline struct pool *
pool_init(size_t capacity, bool use_lock)
{
struct pool *self = (struct pool *)calloc(1, sizeof(struct pool) + capacity * sizeof(void *));
self->top = -1;
self->capacity = capacity;
self->use_lock = use_lock;
if (use_lock) LOCK_INIT(&self->lock);
return self;
}
static inline void
pool_free(struct pool *self)
{
while (!pool_is_empty(self)) free(pool_allocate_object(self));
free(self);
}

@ -5,6 +5,7 @@
#include <stdatomic.h>
#include <stdbool.h>
#include "buffer.h"
#include "likely.h"
#include "types.h"
@ -33,6 +34,23 @@ extern uint32_t runtime_worker_threads_count;
extern int * runtime_worker_threads_argument;
extern uint64_t * runtime_worker_threads_deadline;
/* memory also provides the table access functions */
#define INDIRECT_TABLE_SIZE (1 << 10)
struct indirect_table_entry {
uint32_t type_id;
void * func_pointer;
};
/* Cache of Frequently Accessed Members used to avoid pointer chasing */
struct sandbox_context_cache {
struct buffer * memory;
struct indirect_table_entry *module_indirect_table;
};
extern thread_local struct sandbox_context_cache local_sandbox_context_cache;
extern void runtime_initialize(void);
extern void runtime_set_pthread_prio(pthread_t thread, unsigned int nice);
extern void runtime_set_resource_limits_to_max(void);

@ -6,6 +6,7 @@
#include "client_socket.h"
#include "panic.h"
#include "pool.h"
#include "sandbox_request.h"
/***************************
@ -35,9 +36,9 @@ sandbox_close_http(struct sandbox *sandbox)
static inline void
sandbox_free_linear_memory(struct sandbox *sandbox)
{
int rc = munmap(sandbox->memory.start, sandbox->memory.max + PAGE_SIZE);
if (rc == -1) panic("sandbox_free_linear_memory - munmap failed\n");
sandbox->memory.start = NULL;
if (pool_free_object(sandbox->module->linear_memory_pool[worker_thread_idx], sandbox) < 0) {
buffer_free(sandbox->memory);
}
}
/**

@ -35,15 +35,15 @@ sandbox_perf_log_print_entry(struct sandbox *sandbox)
* becomes more intelligent, then peak linear memory size needs to be tracked
* seperately from current linear memory size.
*/
fprintf(sandbox_perf_log, "%lu,%s,%d,%s,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%u,%u\n",
fprintf(sandbox_perf_log, "%lu,%s,%d,%s,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%u,%u\n",
sandbox->id, sandbox->module->name, sandbox->module->port, sandbox_state_stringify(sandbox->state),
sandbox->module->relative_deadline, sandbox->total_time, queued_duration,
sandbox->duration_of_state[SANDBOX_UNINITIALIZED], sandbox->duration_of_state[SANDBOX_ALLOCATED],
sandbox->duration_of_state[SANDBOX_INITIALIZED], sandbox->duration_of_state[SANDBOX_RUNNABLE],
sandbox->duration_of_state[SANDBOX_PREEMPTED], sandbox->duration_of_state[SANDBOX_RUNNING_SYS],
sandbox->duration_of_state[SANDBOX_RUNNING_USER], sandbox->duration_of_state[SANDBOX_ASLEEP],
sandbox->duration_of_state[SANDBOX_RETURNED], sandbox->duration_of_state[SANDBOX_COMPLETE],
sandbox->duration_of_state[SANDBOX_ERROR], runtime_processor_speed_MHz, sandbox->memory.size);
sandbox->duration_of_state[SANDBOX_UNINITIALIZED], sandbox->duration_of_state[SANDBOX_INITIALIZED],
sandbox->duration_of_state[SANDBOX_RUNNABLE], sandbox->duration_of_state[SANDBOX_PREEMPTED],
sandbox->duration_of_state[SANDBOX_RUNNING_SYS], sandbox->duration_of_state[SANDBOX_RUNNING_USER],
sandbox->duration_of_state[SANDBOX_ASLEEP], sandbox->duration_of_state[SANDBOX_RETURNED],
sandbox->duration_of_state[SANDBOX_COMPLETE], sandbox->duration_of_state[SANDBOX_ERROR],
runtime_processor_speed_MHz, sandbox->memory->size);
}
static inline void

@ -19,39 +19,47 @@
* @param allocation_timestamp timestamp of allocation
*/
static inline void
sandbox_set_as_initialized(struct sandbox *sandbox, struct sandbox_request *sandbox_request,
uint64_t allocation_timestamp)
sandbox_set_as_initialized(struct sandbox *self, struct sandbox_request *sandbox_request, uint64_t allocation_timestamp)
{
assert(sandbox);
assert(sandbox->state == SANDBOX_ALLOCATED);
assert(sandbox_request != NULL);
assert(self);
assert(self->state == SANDBOX_UNINITIALIZED);
assert(self != NULL);
assert(allocation_timestamp > 0);
sandbox->state = SANDBOX_INITIALIZED;
uint64_t now = __getcycles();
/* Copy State from Sandbox Request */
sandbox->id = sandbox_request->id;
sandbox->absolute_deadline = sandbox_request->absolute_deadline;
sandbox->admissions_estimate = sandbox_request->admissions_estimate;
sandbox->client_socket_descriptor = sandbox_request->socket_descriptor;
sandbox->timestamp_of.request_arrival = sandbox_request->request_arrival_timestamp;
/* Copy the socket descriptor and address of the client invocation */
memcpy(&sandbox->client_address, &sandbox_request->socket_address, sizeof(struct sockaddr));
uint64_t now = __getcycles();
self->id = sandbox_request->id;
self->state = SANDBOX_INITIALIZED;
#ifdef LOG_STATE_CHANGES
sandbox_state_history_append(self, SANDBOX_UNINITIALIZED);
memset(&sandbox->state_history, 0, SANDBOX_STATE_HISTORY_CAPACITY * sizeof(sandbox_state_t));
sandbox->state_history_count = 0;
sandbox->state_history[sandbox->state_history_count++] = SANDBOX_UNINITIALIZED;
#endif
#ifdef LOG_SANDBOX_MEMORY_PROFILE
self->timestamp_of.page_allocations_size = 0;
#endif
/* Initialize the sandbox's context, stack, and instruction pointer */
/* stack.start points to the bottom of the usable stack, so add stack_size to get to top */
arch_context_init(&sandbox->ctxt, (reg_t)current_sandbox_start,
(reg_t)sandbox->stack.start + sandbox->stack.size);
ps_list_init_d(self);
self->absolute_deadline = sandbox_request->absolute_deadline;
self->admissions_estimate = sandbox_request->admissions_estimate;
self->client_socket_descriptor = sandbox_request->socket_descriptor;
self->timestamp_of.request_arrival = sandbox_request->request_arrival_timestamp;
/* Copy the socket descriptor and address of the client invocation */
memcpy(&self->client_address, &sandbox_request->socket_address, sizeof(struct sockaddr));
/* Initialize Parsec control structures */
ps_list_init_d(sandbox);
/* Allocations require the module to be set */
self->module = sandbox_request->module;
module_acquire(self->module);
memset(&self->duration_of_state, 0, SANDBOX_STATE_COUNT * sizeof(uint64_t));
/* State Change Bookkeeping */
sandbox->duration_of_state[SANDBOX_ALLOCATED] = now - allocation_timestamp;
sandbox->timestamp_of.allocation = allocation_timestamp;
sandbox->timestamp_of.last_state_change = allocation_timestamp;
sandbox_state_history_append(sandbox, SANDBOX_INITIALIZED);
self->duration_of_state[SANDBOX_UNINITIALIZED] = now - allocation_timestamp;
self->timestamp_of.allocation = allocation_timestamp;
self->timestamp_of.last_state_change = allocation_timestamp;
sandbox_state_history_append(self, SANDBOX_INITIALIZED);
runtime_sandbox_total_increment(SANDBOX_INITIALIZED);
}

@ -16,8 +16,8 @@ sandbox_setup_arguments(struct sandbox *sandbox)
assert(sandbox != NULL);
int32_t argument_count = 0;
/* whatever gregor has, to be able to pass arguments to a module! */
sandbox->arguments_offset = local_sandbox_context_cache.memory.size;
assert(local_sandbox_context_cache.memory.start == sandbox->memory.start);
sandbox->arguments_offset = local_sandbox_context_cache.memory->size;
assert(local_sandbox_context_cache.memory->data == sandbox->memory->data);
expand_memory();
int32_t string_off = sandbox->arguments_offset;

@ -9,7 +9,6 @@
typedef enum
{
SANDBOX_UNINITIALIZED = 0, /* Assumption: mmap zeros out structure */
SANDBOX_ALLOCATED,
SANDBOX_INITIALIZED,
SANDBOX_RUNNABLE,
SANDBOX_PREEMPTED,

@ -3,12 +3,25 @@
#include "sandbox_state.h"
#include "sandbox_types.h"
/* TODO: Define a struct and make the first argument a struct sandbox_state_history */
static inline void
sandbox_state_history_append(struct sandbox *sandbox, sandbox_state_t state)
sandbox_state_history_append(struct sandbox *self, sandbox_state_t state)
{
#ifdef LOG_STATE_CHANGES
if (likely(sandbox->state_history_count < SANDBOX_STATE_HISTORY_CAPACITY)) {
sandbox->state_history[sandbox->state_history_count++] = state;
if (likely(self->state_history_count < SANDBOX_STATE_HISTORY_CAPACITY)) {
sandbox->state_history[self->state_history_count++] = state;
}
#endif
}
static inline void
sandbox_state_history_init(struct sandbox *self)
{
#ifdef LOG_STATE_CHANGES
sandbox_state_history_append(self, SANDBOX_UNINITIALIZED);
memset(&sandbox->state_history, 0, SANDBOX_STATE_HISTORY_CAPACITY * sizeof(sandbox_state_t));
sandbox->state_history_count = 0;
sandbox->state_history[sandbox->state_history_count++] = SANDBOX_UNINITIALIZED;
#endif
}

@ -96,7 +96,7 @@ struct sandbox {
/* WebAssembly Instance State */
struct arch_context ctxt;
struct sandbox_stack stack;
struct wasm_memory memory;
struct buffer * memory;
/* Scheduling and Temporal State */
struct sandbox_timestamps timestamp_of;

@ -4,8 +4,6 @@
#include <stdio.h>
#include <threads.h>
#include "wasm_types.h"
/* For this family of macros, do NOT pass zero as the pow2 */
#define round_to_pow2(x, pow2) (((unsigned long)(x)) & (~((pow2)-1)))
#define round_up_to_pow2(x, pow2) (round_to_pow2(((unsigned long)(x)) + (pow2)-1, (pow2)))
@ -20,18 +18,10 @@
#define PAGE_SIZE (unsigned long)(1 << 12)
#define WEAK __attribute__((weak))
/* memory also provides the table access functions */
#define INDIRECT_TABLE_SIZE (1 << 10)
struct indirect_table_entry {
uint32_t type_id;
void * func_pointer;
};
/* Cache of Frequently Accessed Members used to avoid pointer chasing */
struct sandbox_context_cache {
struct wasm_memory memory;
struct indirect_table_entry *module_indirect_table;
};
#ifndef unlikely
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
extern thread_local struct sandbox_context_cache local_sandbox_context_cache;
#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
#endif

@ -8,10 +8,3 @@
#define WASM_MEMORY_PAGES_MAX (1 << 15) /* 32,768 Pages ~4GB */
#define WASM_STACK_SIZE (1 << 19) /* 512KB */
/* bytes, not wasm pages */
struct wasm_memory {
void * start;
uint32_t size;
uint64_t max;
};

@ -17,11 +17,7 @@
thread_local struct sandbox *worker_thread_current_sandbox = NULL;
thread_local struct sandbox_context_cache local_sandbox_context_cache = {
.memory = {
.start = NULL,
.size = 0,
.max = 0,
},
.memory = NULL,
.module_indirect_table = NULL,
};
@ -95,7 +91,7 @@ current_sandbox_exit()
assert(worker_thread_base_context.variant == ARCH_CONTEXT_VARIANT_FAST);
arch_context_switch(current_context, &worker_thread_base_context);
/* The schduler should never switch back to completed sandboxes */
/* The scheduler should never switch back to completed sandboxes */
assert(0);
}

@ -219,7 +219,7 @@ wasm_mmap(int32_t addr, int32_t len, int32_t prot, int32_t flags, int32_t fd, in
assert(len % WASM_PAGE_SIZE == 0);
int32_t result = local_sandbox_context_cache.memory.size;
int32_t result = local_sandbox_context_cache.memory->size;
for (int i = 0; i < len / WASM_PAGE_SIZE; i++) { expand_memory(); }
return result;
@ -318,7 +318,7 @@ wasm_mremap(int32_t offset, int32_t old_size, int32_t new_size, int32_t flags)
if (new_size <= old_size) return offset;
// If at end of linear memory, just expand and return same address
if (offset + old_size == local_sandbox_context_cache.memory.size) {
if (offset + old_size == local_sandbox_context_cache.memory->size) {
int32_t amount_to_expand = new_size - old_size;
int32_t pages_to_allocate = amount_to_expand / WASM_PAGE_SIZE;
if (amount_to_expand % WASM_PAGE_SIZE > 0) pages_to_allocate++;
@ -330,13 +330,13 @@ wasm_mremap(int32_t offset, int32_t old_size, int32_t new_size, int32_t flags)
// Otherwise allocate at end of address space and copy
int32_t pages_to_allocate = new_size / WASM_PAGE_SIZE;
if (new_size % WASM_PAGE_SIZE > 0) pages_to_allocate++;
int32_t new_offset = local_sandbox_context_cache.memory.size;
int32_t new_offset = local_sandbox_context_cache.memory->size;
for (int i = 0; i < pages_to_allocate; i++) expand_memory();
// Get pointer of old offset and pointer of new offset
char *linear_mem = local_sandbox_context_cache.memory.start;
char *src = &linear_mem[offset];
char *dest = &linear_mem[new_offset];
uint8_t *linear_mem = local_sandbox_context_cache.memory->data;
uint8_t *src = &linear_mem[offset];
uint8_t *dest = &linear_mem[new_offset];
// Copy Values. We can use memcpy because we don't overlap
memcpy((void *)dest, (void *)src, old_size);

@ -0,0 +1,3 @@
#include "pool.h"
struct pool **runtime_linear_memory_pools;

@ -17,27 +17,10 @@ expand_memory(void)
struct sandbox *sandbox = current_sandbox_get();
assert(sandbox->state == SANDBOX_RUNNING_USER || sandbox->state == SANDBOX_RUNNING_SYS);
assert(local_sandbox_context_cache.memory.size % WASM_PAGE_SIZE == 0);
assert(local_sandbox_context_cache.memory->size % WASM_PAGE_SIZE == 0);
/* Return -1 if we've hit the linear memory max */
if (unlikely(local_sandbox_context_cache.memory.size + WASM_PAGE_SIZE
>= local_sandbox_context_cache.memory.max)) {
debuglog("expand_memory - Out of Memory!. %u out of %lu\n", local_sandbox_context_cache.memory.size,
local_sandbox_context_cache.memory.max);
return -1;
}
// Remap the relevant wasm page to readable
char *mem_as_chars = local_sandbox_context_cache.memory.start;
char *page_address = &mem_as_chars[local_sandbox_context_cache.memory.size];
void *map_result = mmap(page_address, WASM_PAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (map_result == MAP_FAILED) {
debuglog("Mapping of new memory failed");
return -1;
}
local_sandbox_context_cache.memory.size += WASM_PAGE_SIZE;
if (unlikely(buffer_expand(local_sandbox_context_cache.memory, WASM_PAGE_SIZE) == -1)) return -1;
#ifdef LOG_SANDBOX_MEMORY_PROFILE
// Cache the runtime of the first N page allocations
@ -48,8 +31,6 @@ expand_memory(void)
}
#endif
// local_sandbox_context_cache is "forked state", so update authoritative member
sandbox->memory.size = local_sandbox_context_cache.memory.size;
return 0;
}
@ -59,10 +40,10 @@ get_memory_ptr_for_runtime(uint32_t offset, uint32_t bounds_check)
// Due to how we setup memory for x86, the virtual memory mechanism will catch the error, if bounds <
// WASM_PAGE_SIZE
assert(bounds_check < WASM_PAGE_SIZE
|| (local_sandbox_context_cache.memory.size > bounds_check
&& offset <= local_sandbox_context_cache.memory.size - bounds_check));
|| (local_sandbox_context_cache.memory->size > bounds_check
&& offset <= local_sandbox_context_cache.memory->size - bounds_check));
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
char *mem_as_chars = (char *)local_sandbox_context_cache.memory->data;
char *address = &mem_as_chars[offset];
return address;
@ -77,7 +58,7 @@ get_memory_ptr_for_runtime(uint32_t offset, uint32_t bounds_check)
int32_t
instruction_memory_grow(uint32_t count)
{
int rc = local_sandbox_context_cache.memory.size / WASM_PAGE_SIZE;
int rc = local_sandbox_context_cache.memory->size / WASM_PAGE_SIZE;
for (int i = 0; i < count; i++) {
if (unlikely(expand_memory() != 0)) {

@ -6,12 +6,12 @@
/* Region initialization helper function */
EXPORT void
initialize_region(uint32_t offset, uint32_t data_count, char *data)
initialize_region(uint32_t offset, uint32_t region_size, uint8_t region[region_size])
{
assert(local_sandbox_context_cache.memory.size >= data_count);
assert(offset < local_sandbox_context_cache.memory.size - data_count);
assert(local_sandbox_context_cache.memory->size >= region_size);
assert(offset < local_sandbox_context_cache.memory->size - region_size);
memcpy(get_memory_ptr_for_runtime(offset, data_count), data, data_count);
memcpy(get_memory_ptr_for_runtime(offset, region_size), region, region_size);
}
void

@ -122,6 +122,12 @@ module_free(struct module *module)
close(module->socket_descriptor);
awsm_abi_deinit(&module->abi);
for (int i = 0; i < runtime_worker_threads_count; i++) pool_free(module->linear_memory_pool[i]);
free(module->linear_memory_pool);
/* Initialize per worker linear memory pools */
module->linear_memory_pool = calloc(runtime_worker_threads_count, sizeof(struct pool *));
free(module);
}
@ -206,6 +212,10 @@ module_new(char *name, char *path, uint32_t stack_size, uint32_t max_memory, uin
module_initialize_table(module);
local_sandbox_context_cache.module_indirect_table = NULL;
/* Initialize per worker linear memory pools */
module->linear_memory_pool = calloc(runtime_worker_threads_count, sizeof(struct pool *));
for (int i = 0; i < runtime_worker_threads_count; i++) { module->linear_memory_pool[i] = pool_init(10, true); }
/* Start listening for requests */
rc = module_listen(module);
if (rc < 0) goto err_listen;

@ -5,9 +5,11 @@
#include "current_sandbox.h"
#include "debuglog.h"
#include "panic.h"
#include "pool.h"
#include "sandbox_functions.h"
#include "sandbox_set_as_error.h"
#include "sandbox_set_as_initialized.h"
#include "buffer.h"
/**
* Allocates a WebAssembly sandbox represented by the following layout
@ -15,74 +17,27 @@
* @param module the module that we want to run
* @returns the resulting sandbox or NULL if mmap failed
*/
static inline struct sandbox *
sandbox_allocate_memory(struct module *module)
static inline int
sandbox_allocate_linear_memory(struct sandbox *self)
{
assert(module != NULL);
char * error_message = NULL;
unsigned long memory_size = WASM_PAGE_SIZE * WASM_MEMORY_PAGES_INITIAL; /* The initial pages */
uint64_t memory_max = (uint64_t)WASM_PAGE_SIZE * WASM_MEMORY_PAGES_MAX;
struct sandbox *sandbox = NULL;
unsigned long page_aligned_sandbox_size = round_up_to_page(sizeof(struct sandbox));
assert(self != NULL);
unsigned long size_to_alloc = memory_max + /* guard page */ PAGE_SIZE;
unsigned long size_to_read_write = memory_size;
char * error_message = NULL;
uint64_t memory_max = (uint64_t)WASM_PAGE_SIZE * WASM_MEMORY_PAGES_MAX;
/*
* Control information should be page-aligned
*/
assert(round_up_to_page(size_to_alloc) == size_to_alloc);
/* At an address of the system's choosing, allocate the memory, marking it as inaccessible */
errno = 0;
void *addr = mmap(NULL, size_to_alloc, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
error_message = "sandbox_allocate_memory - memory allocation failed";
goto alloc_failed;
}
struct buffer *linear_memory = (struct buffer *)pool_allocate_object(
self->module->linear_memory_pool[worker_thread_idx]);
assert(addr != NULL);
size_t initial = (size_t)self->module->abi.starting_pages * WASM_PAGE_SIZE;
size_t max = (size_t)WASM_MEMORY_PAGES_MAX * WASM_PAGE_SIZE;
/* Set the struct sandbox, HTTP Req/Resp buffer, and the initial Wasm Pages as read/write */
errno = 0;
void *addr_rw = mmap(addr, size_to_read_write, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-1, 0);
if (addr_rw == MAP_FAILED) {
error_message = "set to r/w";
goto set_rw_failed;
if (linear_memory == NULL) {
linear_memory = buffer_allocate(initial, max);
if (unlikely(linear_memory == NULL)) return -1;
}
sandbox = calloc(1, page_aligned_sandbox_size);
/* Populate Sandbox members */
sandbox->state = SANDBOX_UNINITIALIZED;
sandbox->module = module;
module_acquire(module);
sandbox->request.base = calloc(1, module->max_request_size);
sandbox->request.length = 0;
sandbox->response.base = calloc(1, module->max_response_size);
sandbox->response.length = 0;
sandbox->memory.start = addr_rw;
sandbox->memory.size = memory_size;
sandbox->memory.max = memory_max;
memset(&sandbox->duration_of_state, 0, SANDBOX_STATE_COUNT * sizeof(uint64_t));
done:
return sandbox;
set_rw_failed:
sandbox = NULL;
errno = 0;
int rc = munmap(addr, size_to_alloc);
if (rc == -1) perror("Failed to munmap after fail to set r/w");
alloc_failed:
err:
perror(error_message);
goto done;
self->memory = linear_memory;
return 0;
}
static inline int
@ -123,6 +78,20 @@ err_stack_allocation_failed:
goto done;
}
static inline int
sandbox_allocate_http_buffers(struct sandbox *self)
{
self->request.base = calloc(1, self->module->max_request_size);
if (self->request.base == NULL) return -1;
self->request.length = 0;
self->response.base = calloc(1, self->module->max_response_size);
if (self->response.base == NULL) return -1;
self->response.length = 0;
return 0;
}
/**
* Allocates a new sandbox from a sandbox request
* Frees the sandbox request on success
@ -135,51 +104,58 @@ sandbox_allocate(struct sandbox_request *sandbox_request)
/* Validate Arguments */
assert(sandbox_request != NULL);
struct sandbox *sandbox;
char * error_message = "";
uint64_t now = __getcycles();
char * error_message = "";
uint64_t now = __getcycles();
int rc;
struct sandbox *self = NULL;
size_t page_aligned_sandbox_size = round_up_to_page(sizeof(struct sandbox));
self = calloc(1, page_aligned_sandbox_size);
if (self == NULL) goto err_struct_allocation_failed;
/* Allocate Sandbox control structures, buffers, and linear memory in a 4GB address space */
sandbox = sandbox_allocate_memory(sandbox_request->module);
if (!sandbox) {
error_message = "failed to allocate sandbox heap and linear memory";
/* Set state to initializing */
sandbox_set_as_initialized(self, sandbox_request, now);
if (sandbox_allocate_http_buffers(self)) {
error_message = "failed to allocate http buffers";
goto err_http_allocation_failed;
}
/* Allocate linear memory in a 4GB address space */
if (sandbox_allocate_linear_memory(self)) {
error_message = "failed to allocate sandbox linear memory";
goto err_memory_allocation_failed;
}
/* Allocate the Stack */
if (sandbox_allocate_stack(sandbox) < 0) {
if (sandbox_allocate_stack(self) < 0) {
error_message = "failed to allocate sandbox stack";
goto err_stack_allocation_failed;
}
sandbox->state = SANDBOX_ALLOCATED;
#ifdef LOG_STATE_CHANGES
sandbox->state_history_count = 0;
sandbox->state_history[sandbox->state_history_count++] = SANDBOX_ALLOCATED;
memset(&sandbox->state_history, 0, SANDBOX_STATE_HISTORY_CAPACITY * sizeof(sandbox_state_t));
#endif
/* Set state to initializing */
sandbox_set_as_initialized(sandbox, sandbox_request, now);
/* Initialize the sandbox's context, stack, and instruction pointer */
/* stack.start points to the bottom of the usable stack, so add stack_size to get to top */
arch_context_init(&self->ctxt, (reg_t)current_sandbox_start, (reg_t)self->stack.start + self->stack.size);
free(sandbox_request);
done:
return sandbox;
return self;
err_stack_allocation_failed:
/*
* This is a degenerate sandbox that never successfully completed initialization, so we need to
* hand jam some things to be able to cleanly transition to ERROR state
*/
sandbox->state = SANDBOX_UNINITIALIZED;
sandbox->timestamp_of.last_state_change = now;
#ifdef LOG_SANDBOX_MEMORY_PROFILE
sandbox->timestamp_of.page_allocations_size = 0;
#endif
ps_list_init_d(sandbox);
self->state = SANDBOX_UNINITIALIZED;
self->timestamp_of.last_state_change = now;
ps_list_init_d(self);
err_memory_allocation_failed:
sandbox_set_as_error(sandbox, SANDBOX_UNINITIALIZED);
err_http_allocation_failed:
sandbox_set_as_error(self, SANDBOX_UNINITIALIZED);
perror(error_message);
sandbox = NULL;
err_struct_allocation_failed:
self = NULL;
goto done;
}
@ -218,7 +194,7 @@ sandbox_free(struct sandbox *sandbox)
*/
/* Linear Memory and Guard Page should already have been munmaped and set to NULL */
assert(sandbox->memory.start == NULL);
assert(sandbox->memory->data == NULL);
free(sandbox->request.base);
free(sandbox->response.base);

@ -10,7 +10,6 @@
const char *sandbox_state_labels[SANDBOX_STATE_COUNT] = {
[SANDBOX_UNINITIALIZED] = "Uninitialized",
[SANDBOX_ALLOCATED] = "Allocated",
[SANDBOX_INITIALIZED] = "Initialized",
[SANDBOX_RUNNABLE] = "Runnable",
[SANDBOX_PREEMPTED] = "Preempted",

Loading…
Cancel
Save