feat: wasm_stack object pool

master
Sean McBride 3 years ago
parent 2311ec1ccf
commit b3dc7db31c

@ -102,6 +102,7 @@
"software_interrupt_counts.h": "c",
"sandbox_set_as_running_sys.h": "c",
"wasm_module_instance.h": "c",
"wasm_stack.h": "c"
},
"files.exclude": {
"**/.git": true,

@ -12,6 +12,8 @@
#include "panic.h"
#include "pool.h"
#include "types.h"
#include "wasm_stack.h"
#include "wasm_memory.h"
#define MODULE_DEFAULT_REQUEST_RESPONSE_SIZE (PAGE_SIZE)
@ -20,6 +22,9 @@
extern thread_local int worker_thread_idx;
INIT_POOL(wasm_memory, wasm_memory_delete)
INIT_POOL(wasm_stack, wasm_stack_delete)
/*
* Defines the listen backlog, the queue length for completely established socketeds waiting to be accepted
* If this value is greater than the value in /proc/sys/net/core/somaxconn (typically 128), then it is silently
@ -39,7 +44,8 @@ extern thread_local int worker_thread_idx;
#define MAX_WORKER_THREADS 64
struct module_pools {
struct pool memory[MAX_WORKER_THREADS];
struct wasm_memory_pool memory[MAX_WORKER_THREADS];
struct wasm_stack_pool stack[MAX_WORKER_THREADS];
};
struct module {
@ -152,12 +158,33 @@ module_release(struct module *module)
return;
}
static inline struct wasm_stack *
module_allocate_stack(struct module *self)
{
assert(self != NULL);
struct wasm_stack *stack = wasm_stack_pool_remove_nolock(&self->pools.stack[worker_thread_idx]);
if (stack == NULL) {
stack = wasm_stack_new(self->stack_size);
if (unlikely(stack == NULL)) return NULL;
}
return stack;
}
static inline void
module_free_stack(struct module *self, struct wasm_stack *stack)
{
wasm_stack_reinit(stack);
wasm_stack_pool_add_nolock(&self->pools.stack[worker_thread_idx], stack);
}
static inline struct wasm_memory *
module_allocate_linear_memory(struct module *module)
{
assert(module != NULL);
char *error_message = NULL;
size_t initial = (size_t)module->abi.starting_pages * WASM_PAGE_SIZE;
size_t max = (size_t)module->abi.max_pages * WASM_PAGE_SIZE;
@ -165,10 +192,9 @@ module_allocate_linear_memory(struct module *module)
assert(initial <= (size_t)UINT32_MAX + 1);
assert(max <= (size_t)UINT32_MAX + 1);
struct wasm_memory *linear_memory = (struct wasm_memory *)pool_remove_nolock(
&module->pools.memory[worker_thread_idx]);
struct wasm_memory *linear_memory = wasm_memory_pool_remove_nolock(&module->pools.memory[worker_thread_idx]);
if (linear_memory == NULL) {
linear_memory = wasm_memory_allocate(initial, max);
linear_memory = wasm_memory_new(initial, max);
if (unlikely(linear_memory == NULL)) return NULL;
}
@ -180,7 +206,7 @@ module_free_linear_memory(struct module *module, struct wasm_memory *memory)
{
wasm_memory_wipe(memory);
wasm_memory_reinit(memory, module->abi.starting_pages * WASM_PAGE_SIZE);
pool_add_nolock(&module->pools.memory[worker_thread_idx], memory);
wasm_memory_pool_add_nolock(&module->pools.memory[worker_thread_idx], memory);
}
/********************************

@ -7,113 +7,96 @@
#include "generic_thread.h"
#include "lock.h"
#include "ps_list.h"
#include "wasm_memory.h"
struct pool {
bool use_lock;
lock_t lock;
struct ps_list_head list;
};
static inline bool
pool_is_empty(struct pool *self)
{
assert(self != NULL);
return ps_list_head_empty(&self->list);
}
static inline void
pool_init(struct pool *self, bool use_lock)
{
ps_list_head_init(&self->list);
self->use_lock = use_lock;
if (use_lock) LOCK_INIT(&self->lock);
}
static inline void
pool_deinit(struct pool *self)
{
if (pool_is_empty(self)) return;
struct wasm_memory *iterator = NULL;
struct wasm_memory *buffer = NULL;
ps_list_foreach_del_d(&self->list, iterator, buffer)
{
ps_list_rem_d(iterator);
wasm_memory_free(iterator);
#define INIT_POOL(STRUCT_NAME, DTOR_FN) \
struct STRUCT_NAME##_pool { \
bool use_lock; \
lock_t lock; \
struct ps_list_head list; \
}; \
\
static inline bool STRUCT_NAME##_pool_is_empty(struct STRUCT_NAME##_pool *self) \
{ \
assert(self != NULL); \
\
return ps_list_head_empty(&self->list); \
} \
\
static inline void STRUCT_NAME##_pool_init(struct STRUCT_NAME##_pool *self, bool use_lock) \
{ \
ps_list_head_init(&self->list); \
self->use_lock = use_lock; \
if (use_lock) LOCK_INIT(&self->lock); \
} \
\
static inline void STRUCT_NAME##_pool_deinit(struct STRUCT_NAME##_pool *self) \
{ \
if (STRUCT_NAME##_pool_is_empty(self)) return; \
struct STRUCT_NAME *iterator = NULL; \
struct STRUCT_NAME *buffer = NULL; \
ps_list_foreach_del_d(&self->list, iterator, buffer) \
{ \
ps_list_rem_d(iterator); \
DTOR_FN(iterator); \
} \
} \
\
static inline struct STRUCT_NAME *STRUCT_NAME##_pool_remove_nolock(struct STRUCT_NAME##_pool *self) \
{ \
assert(self != NULL); \
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); \
\
struct STRUCT_NAME *obj = NULL; \
\
if (STRUCT_NAME##_pool_is_empty(self)) return obj; \
\
obj = ps_list_head_first_d(&self->list, struct STRUCT_NAME); \
assert(obj); \
ps_list_rem_d(obj); \
\
return obj; \
} \
\
static inline struct STRUCT_NAME *STRUCT_NAME##_pool_remove(struct STRUCT_NAME##_pool *self) \
{ \
assert(self != NULL); \
assert(self->use_lock); \
\
struct STRUCT_NAME *obj = NULL; \
\
if (STRUCT_NAME##_pool_is_empty(self)) return obj; \
\
LOCK_LOCK(&self->lock); \
if (STRUCT_NAME##_pool_is_empty(self)) { \
LOCK_UNLOCK(&self->lock); \
return obj; \
} \
\
obj = ps_list_head_first_d(&self->list, struct STRUCT_NAME); \
assert(obj); \
ps_list_rem_d(obj); \
LOCK_UNLOCK(&self->lock); \
return obj; \
} \
\
static inline int STRUCT_NAME##_pool_add_nolock(struct STRUCT_NAME##_pool *self, struct STRUCT_NAME *obj) \
{ \
assert(self != NULL); \
assert(obj != NULL); \
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); \
\
ps_list_head_add_d(&self->list, obj); \
return 0; \
} \
\
static inline int STRUCT_NAME##_pool_add(struct STRUCT_NAME##_pool *self, struct STRUCT_NAME *obj) \
{ \
assert(self != NULL); \
assert(obj != NULL); \
assert(self->use_lock); \
\
LOCK_LOCK(&self->lock); \
ps_list_head_add_d(&self->list, obj); \
LOCK_UNLOCK(&self->lock); \
return 0; \
}
}
static inline struct wasm_memory *
pool_remove_nolock(struct pool *self)
{
assert(self != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
struct wasm_memory *obj = NULL;
if (pool_is_empty(self)) return obj;
obj = ps_list_head_first_d(&self->list, struct wasm_memory);
assert(obj);
ps_list_rem_d(obj);
return obj;
}
static inline struct wasm_memory *
pool_remove(struct pool *self)
{
assert(self != NULL);
assert(self->use_lock);
struct wasm_memory *obj = NULL;
if (pool_is_empty(self)) return obj;
LOCK_LOCK(&self->lock);
if (pool_is_empty(self)) {
LOCK_UNLOCK(&self->lock);
return obj;
}
obj = ps_list_head_first_d(&self->list, struct wasm_memory);
assert(obj);
ps_list_rem_d(obj);
LOCK_UNLOCK(&self->lock);
return obj;
}
static inline int
pool_add_nolock(struct pool *self, struct wasm_memory *obj)
{
assert(self != NULL);
assert(obj != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
ps_list_head_add_d(&self->list, obj);
return 0;
}
static inline int
pool_add(struct pool *self, struct wasm_memory *obj)
{
assert(self != NULL);
assert(obj != NULL);
assert(self->use_lock);
LOCK_LOCK(&self->lock);
ps_list_head_add_d(&self->list, obj);
LOCK_UNLOCK(&self->lock);
return 0;
}
static inline void
pool_free(struct pool *self)
{
while (!pool_is_empty(self)) free(pool_remove(self));
free(self);
}

@ -58,7 +58,7 @@ struct sandbox {
/* WebAssembly Instance State */
struct arch_context ctxt;
struct wasm_stack stack;
struct wasm_stack * stack;
struct wasm_memory *memory;
/* Scheduling and Temporal State */

@ -22,7 +22,7 @@ struct wasm_memory {
};
static inline struct wasm_memory *
wasm_memory_allocate(size_t initial, size_t max)
wasm_memory_new(size_t initial, size_t max)
{
assert(initial > 0);
assert(initial <= (size_t)UINT32_MAX + 1);
@ -33,7 +33,7 @@ wasm_memory_allocate(size_t initial, size_t max)
size_t size_to_alloc = sizeof(struct wasm_memory) + WASM_MEMORY_MAX + /* guard page */ PAGE_SIZE;
void * temp = mmap(NULL, size_to_alloc, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (temp == MAP_FAILED) {
fprintf(stderr, "wasm_memory_allocate - allocation failed, (size: %lu) %s\n", size_to_alloc,
fprintf(stderr, "wasm_memory_new - allocation failed, (size: %lu) %s\n", size_to_alloc,
strerror(errno));
return NULL;
}
@ -44,7 +44,7 @@ wasm_memory_allocate(size_t initial, size_t max)
int rc = mprotect(self, size_to_read_write, PROT_READ | PROT_WRITE);
if (rc != 0) {
perror("wasm_memory_allocate - prot r/w failed");
perror("wasm_memory_new - prot r/w failed");
munmap(self, size_to_alloc);
assert(0);
return NULL;
@ -58,7 +58,7 @@ wasm_memory_allocate(size_t initial, size_t max)
}
static inline void
wasm_memory_free(struct wasm_memory *self)
wasm_memory_delete(struct wasm_memory *self)
{
size_t size_to_free = sizeof(struct wasm_memory) + WASM_MEMORY_MAX + /* guard page */ PAGE_SIZE;
munmap(self, size_to_free);

@ -4,15 +4,22 @@
#include <stdlib.h>
#include <sys/mman.h>
#include "sandbox_types.h"
/* This structure is not suitable for a flexible array member because it allocates a guard page beneath the buffer. This
* negates the benefit of tight locality */
struct wasm_stack {
size_t capacity; /* Usable capacity. Excludes size of guard page that we need to free */
uint8_t *high; /* The highest address of the stack. Grows down from here */
uint8_t *low; /* The address of the lowest usabe address. Above guard page */
uint8_t *buffer; /* Points to Guard Page */
struct ps_list list; /* Linked List Node used for object pool */
size_t capacity; /* Usable capacity. Excludes size of guard page that we need to free */
uint8_t * high; /* The highest address of the stack. Grows down from here */
uint8_t * low; /* The address of the lowest usabe address. Above guard page */
uint8_t * buffer; /* Points to Guard Page */
};
static inline struct wasm_stack *
wasm_stack_allocate(void)
{
return calloc(1, sizeof(struct wasm_stack));
}
/**
* Allocates a static sized stack for a sandbox with a guard page underneath
* Because a stack grows down, this protects against stack overflow
@ -21,48 +28,93 @@ struct wasm_stack {
* @returns 0 on success, -1 on error
*/
static inline int
wasm_stack_allocate(struct wasm_stack *stack, size_t capacity)
wasm_stack_init(struct wasm_stack *self, size_t capacity)
{
assert(stack);
assert(self);
int rc = 0;
stack->buffer = (uint8_t *)mmap(NULL, /* guard page */ PAGE_SIZE + capacity, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (unlikely(stack->buffer == MAP_FAILED)) {
self->buffer = (uint8_t *)mmap(NULL, /* guard page */ PAGE_SIZE + capacity, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (unlikely(self->buffer == MAP_FAILED)) {
perror("sandbox allocate stack");
goto err_stack_allocation_failed;
}
stack->low = (uint8_t *)mmap(stack->buffer + /* guard page */ PAGE_SIZE, capacity, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (unlikely(stack->low == MAP_FAILED)) {
self->low = (uint8_t *)mmap(self->buffer + /* guard page */ PAGE_SIZE, capacity, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (unlikely(self->low == MAP_FAILED)) {
perror("sandbox set stack read/write");
goto err_stack_prot_failed;
}
stack->capacity = capacity;
stack->high = stack->low + capacity;
ps_list_init_d(self);
self->capacity = capacity;
self->high = self->low + capacity;
rc = 0;
done:
return rc;
err_stack_prot_failed:
rc = munmap(stack->buffer, PAGE_SIZE + capacity);
rc = munmap(self->buffer, PAGE_SIZE + capacity);
if (rc == -1) perror("munmap");
err_stack_allocation_failed:
stack->buffer = NULL;
rc = -1;
self->buffer = NULL;
rc = -1;
goto done;
}
static inline void
wasm_stack_free(struct wasm_stack *stack)
wasm_stack_free(struct wasm_stack *self)
{
assert(stack != NULL);
assert(stack->buffer != NULL);
free(self);
}
static struct wasm_stack *
wasm_stack_new(size_t capacity)
{
struct wasm_stack *self = wasm_stack_allocate();
int rc = wasm_stack_init(self, capacity);
if (rc < 0) {
wasm_stack_free(self);
return NULL;
}
return self;
}
static inline void
wasm_stack_deinit(struct wasm_stack *self)
{
assert(self != NULL);
assert(self->buffer != NULL);
/* The stack start is the bottom of the usable stack, but we allocated a guard page below this */
int rc = munmap(stack->buffer, stack->capacity + PAGE_SIZE);
stack->buffer = NULL;
if (unlikely(rc == -1)) perror("munmap");
munmap(self->buffer, self->capacity + PAGE_SIZE);
self->buffer = NULL;
self->high = NULL;
self->low = NULL;
}
static inline void
wasm_stack_delete(struct wasm_stack *self)
{
assert(self != NULL);
assert(self->buffer != NULL);
wasm_stack_deinit(self);
wasm_stack_free(self);
}
static inline void
wasm_stack_reinit(struct wasm_stack *self)
{
assert(self != NULL);
assert(self->buffer != NULL);
self->low = self->buffer + /* guard page */ PAGE_SIZE;
memset(self->low, 0, self->capacity);
ps_list_init_d(self);
self->high = self->low + self->capacity;
}

@ -210,7 +210,10 @@ module_new(char *name, char *path, uint32_t stack_size, uint32_t relative_deadli
module_initialize_table(module);
current_wasm_module_instance.table = NULL;
for (int i = 0; i < MAX_WORKER_THREADS; i++) { pool_init(&module->pools.memory[i], false); }
for (int i = 0; i < MAX_WORKER_THREADS; i++) {
wasm_memory_pool_init(&module->pools.memory[i], false);
wasm_stack_pool_init(&module->pools.stack[i], false);
}
/* Start listening for requests */
rc = module_listen(module);

@ -47,7 +47,10 @@ sandbox_allocate_stack(struct sandbox *sandbox)
assert(sandbox);
assert(sandbox->module);
return wasm_stack_allocate(&sandbox->stack, sandbox->module->stack_size);
sandbox->stack = module_allocate_stack(sandbox->module);
if (sandbox->stack == NULL) return -1;
return 0;
}
static inline void
@ -55,7 +58,7 @@ sandbox_free_stack(struct sandbox *sandbox)
{
assert(sandbox);
return wasm_stack_free(&sandbox->stack);
return module_free_stack(sandbox->module, sandbox->stack);
}
/**
@ -122,7 +125,7 @@ sandbox_prepare_execution_environemnt(struct sandbox *sandbox)
/* Initialize the sandbox's context, stack, and instruction pointer */
/* stack grows down, so set to high address */
arch_context_init(&sandbox->ctxt, (reg_t)current_sandbox_start, (reg_t)sandbox->stack.high);
arch_context_init(&sandbox->ctxt, (reg_t)current_sandbox_start, (reg_t)sandbox->stack->high);
rc = 0;
done:
@ -210,7 +213,7 @@ sandbox_free(struct sandbox *sandbox)
/* Free Sandbox Struct and HTTP Request and Response Buffers */
if (likely(sandbox->stack.buffer != NULL)) sandbox_free_stack(sandbox);
if (likely(sandbox->stack->buffer != NULL)) sandbox_free_stack(sandbox);
free(sandbox);
if (rc == -1) {

Loading…
Cancel
Save