refactor: cleanup linear memory code

mmap-opt
Sean McBride 4 years ago
parent 7e46f58d0f
commit ef09fcc3f1

@ -102,7 +102,10 @@
"sandbox_set_as_returned.h": "c", "sandbox_set_as_returned.h": "c",
"wasm_linear_memory.h": "c", "wasm_linear_memory.h": "c",
"software_interrupt_counts.h": "c", "software_interrupt_counts.h": "c",
"sandbox_set_as_running_sys.h": "c" "sandbox_set_as_running_sys.h": "c",
"compiletime.h": "c",
"sandbox_context_cache.h": "c",
"wasm_indirect_table.h": "c"
}, },
"files.exclude": { "files.exclude": {
"**/.git": true, "**/.git": true,

@ -1,182 +1,405 @@
#include <assert.h> #include <assert.h>
#include <assert.h>
#include <math.h>
#include "types.h" #include "types.h"
#include "sandbox_context_cache.h"
uint32_t /* This file contains the stub functions that the aWsm compiler expects
* This corresponds to awsm/src/codegen/runtime_stubs.rs
* This should be linked with the *.bc file generated by aWsm in order to compile a module as a *.so
*/
extern thread_local struct sandbox_context_cache local_sandbox_context_cache;
EXPORT void
initialize_region(uint32_t offset, uint32_t region_size, uint8_t region[region_size])
{
wasm_linear_memory_initialize_region(local_sandbox_context_cache.memory, offset, region_size, region);
}
EXPORT uint32_t
instruction_memory_size() instruction_memory_size()
{ {
return local_sandbox_context_cache.memory.size / WASM_PAGE_SIZE; return wasm_linear_memory_get_page_count(local_sandbox_context_cache.memory);
} }
// All of these are pretty generic /**
INLINE float * @brief Stub that implements the WebAssembly memory.grow instruction
get_f32(uint32_t offset) *
* @param count number of pages to grow the WebAssembly linear memory by
* @return The previous size of the linear memory in pages or -1 if enough memory cannot be allocated
*/
EXPORT int32_t
instruction_memory_grow(uint32_t count)
{ {
assert(offset + sizeof(float) <= local_sandbox_context_cache.memory.size); int rc = local_sandbox_context_cache.memory->size / WASM_PAGE_SIZE;
/* Return -1 if we've hit the linear memory max */
if (unlikely(wasm_linear_memory_expand(local_sandbox_context_cache.memory, WASM_PAGE_SIZE * count) == -1))
return -1;
#ifdef LOG_SANDBOX_MEMORY_PROFILE
// Cache the runtime of the first N page allocations
for (int i = 0; i < count; i++) {
if (likely(sandbox->timestamp_of.page_allocations_size < SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT)) {
sandbox->timestamp_of.page_allocations[sandbox->timestamp_of.page_allocations_size++] =
sandbox->duration_of_state.running
+ (uint32_t)(__getcycles() - sandbox->timestamp_of.last_state_change);
}
}
#endif
return rc;
}
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start; EXPORT float
void *address = &mem_as_chars[offset]; get_f32(uint32_t offset)
{
return wasm_linear_memory_get_float(local_sandbox_context_cache.memory, offset);
}
return *(float *)address; EXPORT void
set_f32(uint32_t offset, float v)
{
wasm_linear_memory_set_float(local_sandbox_context_cache.memory, offset, v);
} }
INLINE double EXPORT double
get_f64(uint32_t offset) get_f64(uint32_t offset)
{ {
assert(offset + sizeof(double) <= local_sandbox_context_cache.memory.size); return wasm_linear_memory_get_double(local_sandbox_context_cache.memory, offset);
}
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
void *address = &mem_as_chars[offset];
return *(double *)address; EXPORT void
set_f64(uint32_t offset, double v)
{
wasm_linear_memory_set_double(local_sandbox_context_cache.memory, offset, v);
} }
INLINE int8_t EXPORT int8_t
get_i8(uint32_t offset) get_i8(uint32_t offset)
{ {
assert(offset + sizeof(int8_t) <= local_sandbox_context_cache.memory.size); return wasm_linear_memory_get_int8(local_sandbox_context_cache.memory, offset);
}
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
void *address = &mem_as_chars[offset];
return *(int8_t *)address; EXPORT void
set_i8(uint32_t offset, int8_t v)
{
wasm_linear_memory_set_int8(local_sandbox_context_cache.memory, offset, v);
} }
INLINE int16_t EXPORT int16_t
get_i16(uint32_t offset) get_i16(uint32_t offset)
{ {
assert(offset + sizeof(int16_t) <= local_sandbox_context_cache.memory.size); return wasm_linear_memory_get_int16(local_sandbox_context_cache.memory, offset);
}
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
void *address = &mem_as_chars[offset];
return *(int16_t *)address; EXPORT void
set_i16(uint32_t offset, int16_t v)
{
wasm_linear_memory_set_int16(local_sandbox_context_cache.memory, offset, v);
} }
INLINE int32_t EXPORT int32_t
get_i32(uint32_t offset) get_i32(uint32_t offset)
{ {
assert(offset + sizeof(int32_t) <= local_sandbox_context_cache.memory.size); return wasm_linear_memory_get_int32(local_sandbox_context_cache.memory, offset);
}
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
void *address = &mem_as_chars[offset];
return *(int32_t *)address; EXPORT void
set_i32(uint32_t offset, int32_t v)
{
wasm_linear_memory_set_int32(local_sandbox_context_cache.memory, offset, v);
} }
INLINE int64_t EXPORT int64_t
get_i64(uint32_t offset) get_i64(uint32_t offset)
{ {
assert(offset + sizeof(int64_t) <= local_sandbox_context_cache.memory.size); return wasm_linear_memory_get_int64(local_sandbox_context_cache.memory, offset);
}
EXPORT void
set_i64(uint32_t offset, int64_t v)
{
wasm_linear_memory_set_int64(local_sandbox_context_cache.memory, offset, v);
}
EXPORT void
add_function_to_table(uint32_t idx, uint32_t type_id, char *pointer)
{
assert(idx < INDIRECT_TABLE_SIZE);
assert(local_sandbox_context_cache.module_indirect_table != NULL);
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start; /* TODO: atomic for multiple concurrent invocations? Issue #97 */
void *address = &mem_as_chars[offset]; if (local_sandbox_context_cache.module_indirect_table[idx].type_id == type_id
&& local_sandbox_context_cache.module_indirect_table[idx].func_pointer == pointer)
return;
return *(int64_t *)address; local_sandbox_context_cache.module_indirect_table[idx] = (struct indirect_table_entry){
.type_id = type_id, .func_pointer = pointer
};
} }
INLINE int32_t /*
* Table handling functionality
* This was moved from compiletime in order to place the
* function in the callstack in GDB. It can be moved back
* to runtime/compiletime/memory/64bit_nix.c to remove the
* additional function call
*/
char *
get_function_from_table(uint32_t idx, uint32_t type_id)
{
#ifdef LOG_FUNCTION_TABLE
fprintf(stderr, "get_function_from_table(idx: %u, type_id: %u)\n", idx, type_id);
fprintf(stderr, "indirect_table_size: %u\n", INDIRECT_TABLE_SIZE);
#endif
assert(idx < INDIRECT_TABLE_SIZE);
struct indirect_table_entry f = local_sandbox_context_cache.module_indirect_table[idx];
#ifdef LOG_FUNCTION_TABLE
fprintf(stderr, "assumed type: %u, type in table: %u\n", type_id, f.type_id);
#endif
// FIXME: Commented out function type check because of gocr
// assert(f.type_id == type_id);
assert(f.func_pointer != NULL);
return f.func_pointer;
}
EXPORT int32_t
get_global_i32(uint32_t offset) get_global_i32(uint32_t offset)
{ {
return get_i32(offset); return get_i32(offset);
} }
INLINE int64_t EXPORT void
set_global_i32(uint32_t offset, int32_t v)
{
set_i32(offset, v);
}
EXPORT int64_t
get_global_i64(uint32_t offset) get_global_i64(uint32_t offset)
{ {
return get_i64(offset); return get_i64(offset);
} }
// Now setting routines EXPORT void
INLINE void set_global_i64(uint32_t offset, int64_t v)
set_f32(uint32_t offset, float v)
{ {
assert(offset + sizeof(float) <= local_sandbox_context_cache.memory.size); set_i64(offset, v);
}
#define CHAR_BIT 8
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start; // TODO: Throughout here we use `assert` for error conditions, which isn't optimal
void *address = &mem_as_chars[offset]; // Instead we should use `unlikely` branches to a single trapping function (which should optimize better)
// The below functions are for implementing WASM instructions
// ROTL and ROTR helper functions
INLINE uint32_t
rotl_u32(uint32_t n, uint32_t c_u32)
{
// WASM requires a modulus here (usually a single bitwise op, but it means we need no assert)
unsigned int c = c_u32 % (CHAR_BIT * sizeof(n));
const unsigned int mask = (CHAR_BIT * sizeof(n) - 1); // assumes width is a power of 2.
*(float *)address = v; c &= mask;
return (n << c) | (n >> ((-c) & mask));
} }
INLINE void INLINE uint32_t
set_f64(uint32_t offset, double v) rotr_u32(uint32_t n, uint32_t c_u32)
{ {
assert(offset + sizeof(double) <= local_sandbox_context_cache.memory.size); // WASM requires a modulus here (usually a single bitwise op, but it means we need no assert)
unsigned int c = c_u32 % (CHAR_BIT * sizeof(n));
const unsigned int mask = (CHAR_BIT * sizeof(n) - 1);
c &= mask;
return (n >> c) | (n << ((-c) & mask));
}
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start; INLINE uint64_t
void *address = &mem_as_chars[offset]; rotl_u64(uint64_t n, uint64_t c_u64)
{
// WASM requires a modulus here (usually a single bitwise op, but it means we need no assert)
unsigned int c = c_u64 % (CHAR_BIT * sizeof(n));
const unsigned int mask = (CHAR_BIT * sizeof(n) - 1); // assumes width is a power of 2.
*(double *)address = v; c &= mask;
return (n << c) | (n >> ((-c) & mask));
} }
INLINE void INLINE uint64_t
set_i8(uint32_t offset, int8_t v) rotr_u64(uint64_t n, uint64_t c_u64)
{ {
assert(offset + sizeof(int8_t) <= local_sandbox_context_cache.memory.size); // WASM requires a modulus here (usually a single bitwise op, but it means we need no assert)
unsigned int c = c_u64 % (CHAR_BIT * sizeof(n));
const unsigned int mask = (CHAR_BIT * sizeof(n) - 1);
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start; c &= mask;
void *address = &mem_as_chars[offset]; return (n >> c) | (n << ((-c) & mask));
}
*(int8_t *)address = v; // Now safe division and remainder
INLINE uint32_t
u32_div(uint32_t a, uint32_t b)
{
assert(b);
return a / b;
} }
INLINE void INLINE uint32_t
set_i16(uint32_t offset, int16_t v) u32_rem(uint32_t a, uint32_t b)
{ {
assert(offset + sizeof(int16_t) <= local_sandbox_context_cache.memory.size); assert(b);
return a % b;
}
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start; INLINE int32_t
void *address = &mem_as_chars[offset]; i32_div(int32_t a, int32_t b)
{
assert(b && (a != INT32_MIN || b != -1));
return a / b;
}
*(int16_t *)address = v; INLINE int32_t
i32_rem(int32_t a, int32_t b)
{
assert(b && (a != INT32_MIN || b != -1));
return a % b;
} }
INLINE void INLINE uint64_t
set_i32(uint32_t offset, int32_t v) u64_div(uint64_t a, uint64_t b)
{ {
assert(offset + sizeof(int32_t) <= local_sandbox_context_cache.memory.size); assert(b);
return a / b;
}
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start; INLINE uint64_t
void *address = &mem_as_chars[offset]; u64_rem(uint64_t a, uint64_t b)
{
assert(b);
return a % b;
}
*(int32_t *)address = v; INLINE int64_t
i64_div(int64_t a, int64_t b)
{
assert(b && (a != INT64_MIN || b != -1));
return a / b;
} }
INLINE void INLINE int64_t
set_i64(uint32_t offset, int64_t v) i64_rem(int64_t a, int64_t b)
{ {
assert(offset + sizeof(int64_t) <= local_sandbox_context_cache.memory.size); assert(b && (a != INT64_MIN || b != -1));
return a % b;
}
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start; // float to integer conversion methods
void *address = &mem_as_chars[offset]; // In C, float => int conversions always truncate
// If a int2float(int::min_value) <= float <= int2float(int::max_value), it must always be safe to truncate it
uint32_t
u32_trunc_f32(float f)
{
assert(0 <= f && f <= (float)UINT32_MAX);
return (uint32_t)f;
}
*(int64_t *)address = v; int32_t
i32_trunc_f32(float f)
{
assert(INT32_MIN <= f && f <= (float)INT32_MAX);
return (int32_t)f;
} }
INLINE void uint32_t
set_global_i32(uint32_t offset, int32_t v) u32_trunc_f64(double f)
{ {
set_i32(offset, v); assert(0 <= f && f <= (double)UINT32_MAX);
return (uint32_t)f;
} }
INLINE void int32_t
set_global_i64(uint32_t offset, int64_t v) i32_trunc_f64(double f)
{ {
set_i64(offset, v); assert(INT32_MIN <= f && f <= (double)INT32_MAX);
return (int32_t)f;
} }
// Table handling functionality uint64_t
// INLINE char * u64_trunc_f32(float f)
// get_function_from_table(uint32_t idx, uint32_t type_id) {
// { assert(0 <= f && f <= (float)UINT64_MAX);
// assert(idx < INDIRECT_TABLE_SIZE); return (uint64_t)f;
}
// struct indirect_table_entry f = local_sandbox_context_cache.module_indirect_table[idx]; int64_t
i64_trunc_f32(float f)
{
assert(INT64_MIN <= f && f <= (float)INT64_MAX);
return (int64_t)f;
}
// // FIXME: Commented out function type check because of gocr uint64_t
// // assert(f.type_id == type_id); u64_trunc_f64(double f)
{
assert(0 <= f && f <= (double)UINT64_MAX);
return (uint64_t)f;
}
int64_t
i64_trunc_f64(double f)
{
assert(INT64_MIN <= f && f <= (double)INT64_MAX);
return (int64_t)f;
}
// Float => Float truncation functions
INLINE float
f32_trunc_f32(float f)
{
return trunc(f);
}
INLINE float
f32_min(float a, float b)
{
return a < b ? a : b;
}
// assert(f.func_pointer); INLINE float
f32_max(float a, float b)
{
return a > b ? a : b;
}
// return f.func_pointer; INLINE float
// } f32_floor(float a)
{
return floor(a);
}
INLINE double
f64_min(double a, double b)
{
return a < b ? a : b;
}
INLINE double
f64_max(double a, double b)
{
return a > b ? a : b;
}
INLINE double
f64_floor(double a)
{
return floor(a);
}

@ -3,12 +3,11 @@
#include <threads.h> #include <threads.h>
#include "sandbox_types.h" #include "sandbox_types.h"
#include "sandbox_context_cache.h"
/* current sandbox that is active.. */ /* current sandbox that is active.. */
extern thread_local struct sandbox *worker_thread_current_sandbox; extern thread_local struct sandbox *worker_thread_current_sandbox;
extern thread_local struct sandbox_context_cache local_sandbox_context_cache;
void current_sandbox_start(void); void current_sandbox_start(void);
/** /**

@ -1,6 +1,7 @@
#pragma once #pragma once
#include <pthread.h> #include <pthread.h>
#include <sched.h>
#include <stdint.h> #include <stdint.h>
#include <stdio.h> #include <stdio.h>

@ -12,6 +12,7 @@
#include "panic.h" #include "panic.h"
#include "pool.h" #include "pool.h"
#include "types.h" #include "types.h"
#include "wasm_indirect_table.h"
#define MODULE_DEFAULT_REQUEST_RESPONSE_SIZE (PAGE_SIZE) #define MODULE_DEFAULT_REQUEST_RESPONSE_SIZE (PAGE_SIZE)

@ -47,32 +47,13 @@ extern uint32_t runtime_worker_threads_count;
extern int * runtime_worker_threads_argument; extern int * runtime_worker_threads_argument;
extern uint64_t * runtime_worker_threads_deadline; extern uint64_t * runtime_worker_threads_deadline;
/* memory also provides the table access functions */
#define INDIRECT_TABLE_SIZE (1 << 10)
struct indirect_table_entry {
uint32_t type_id;
void * func_pointer;
};
/* Cache of Frequently Accessed Members used to avoid pointer chasing */
struct sandbox_context_cache {
struct wasm_linear_memory * memory;
struct indirect_table_entry *module_indirect_table;
};
extern thread_local struct sandbox_context_cache local_sandbox_context_cache;
extern void runtime_initialize(void); extern void runtime_initialize(void);
extern void runtime_set_pthread_prio(pthread_t thread, unsigned int nice); extern void runtime_set_pthread_prio(pthread_t thread, unsigned int nice);
extern void runtime_set_resource_limits_to_max(void); extern void runtime_set_resource_limits_to_max(void);
/* External Symbols */ /* External Symbols */
extern void alloc_linear_memory(void); extern void alloc_linear_memory(void);
extern int expand_memory(void);
INLINE char *get_function_from_table(uint32_t idx, uint32_t type_id); INLINE char *get_function_from_table(uint32_t idx, uint32_t type_id);
INLINE char *get_memory_ptr_for_runtime(uint32_t offset, uint32_t bounds_check);
extern void stub_init(int32_t offset); extern void stub_init(int32_t offset);
static inline char * static inline char *

@ -0,0 +1,11 @@
#pragma once
#include "wasm_linear_memory.h"
#include "wasm_indirect_table.h"
/* Cache of Frequently Accessed Members used to avoid pointer chasing */
struct sandbox_context_cache {
struct wasm_linear_memory * memory;
struct indirect_table_entry *module_indirect_table;
};
extern thread_local struct sandbox_context_cache local_sandbox_context_cache;

@ -36,9 +36,12 @@ sandbox_close_http(struct sandbox *sandbox)
static inline void static inline void
sandbox_free_linear_memory(struct sandbox *sandbox) sandbox_free_linear_memory(struct sandbox *sandbox)
{ {
/* TODO Replace pool with parsec linked list */
wasm_linear_memory_wipe(sandbox->memory);
if (pool_free_object(sandbox->module->linear_memory_pool[worker_thread_idx], sandbox) < 0) { if (pool_free_object(sandbox->module->linear_memory_pool[worker_thread_idx], sandbox) < 0) {
wasm_linear_memory_free(sandbox->memory); wasm_linear_memory_free(sandbox->memory);
} }
sandbox->memory = NULL;
} }
/** /**

@ -15,12 +15,13 @@ sandbox_setup_arguments(struct sandbox *sandbox)
{ {
assert(sandbox != NULL); assert(sandbox != NULL);
int32_t argument_count = 0; int32_t argument_count = 0;
/* whatever gregor has, to be able to pass arguments to a module! */
sandbox->arguments_offset = local_sandbox_context_cache.memory->size;
assert(local_sandbox_context_cache.memory->data == sandbox->memory->data);
expand_memory();
int32_t string_off = sandbox->arguments_offset; /* Copy arguments into linear memory. It seems like malloc would clobber this, but I think this goes away in
* WASI, so not worth fixing*/
stub_init(string_off); sandbox->arguments_offset = wasm_linear_memory_get_size(sandbox->memory);
int rc = wasm_linear_memory_expand(sandbox->memory, WASM_PAGE_SIZE);
assert(rc == 0);
stub_init(sandbox->arguments_offset);
} }

@ -0,0 +1,11 @@
#pragma once
#include <stdint.h>
/* memory also provides the table access functions */
#define INDIRECT_TABLE_SIZE (1 << 10)
struct indirect_table_entry {
uint32_t type_id;
void * func_pointer;
};

@ -14,8 +14,9 @@
#define WASM_LINEAR_MEMORY_MAX (size_t) UINT32_MAX + 1 #define WASM_LINEAR_MEMORY_MAX (size_t) UINT32_MAX + 1
struct wasm_linear_memory { struct wasm_linear_memory {
size_t size; /* Initial Size in bytes */ size_t size; /* Initial Size in bytes */
size_t max; /* Soft cap in bytes. Defaults to 4GB */ size_t capacity; /* Size backed by actual pages */
size_t max; /* Soft cap in bytes. Defaults to 4GB */
uint8_t data[]; uint8_t data[];
}; };
@ -27,35 +28,30 @@ wasm_linear_memory_allocate(size_t initial, size_t max)
assert(max > 0); assert(max > 0);
assert(max <= (size_t)UINT32_MAX + 1); assert(max <= (size_t)UINT32_MAX + 1);
char * error_message = NULL; /* Allocate contiguous virtual addresses for struct, full linear memory, and guard page */
int rc = 0;
struct wasm_linear_memory *self = NULL;
/* Allocate contiguous virtual addresses and map to fault */
size_t size_to_alloc = sizeof(struct wasm_linear_memory) + WASM_LINEAR_MEMORY_MAX + /* guard page */ PAGE_SIZE; size_t size_to_alloc = sizeof(struct wasm_linear_memory) + WASM_LINEAR_MEMORY_MAX + /* guard page */ PAGE_SIZE;
void * temp = mmap(NULL, size_to_alloc, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
void *addr = mmap(NULL, size_to_alloc, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (temp == MAP_FAILED) {
if (addr == MAP_FAILED) {
debuglog("wasm_linear_memory_allocate - allocation failed, (size: %lu) %s\n", size_to_alloc, debuglog("wasm_linear_memory_allocate - allocation failed, (size: %lu) %s\n", size_to_alloc,
strerror(errno)); strerror(errno));
return self; return NULL;
} }
struct wasm_linear_memory *self = (struct wasm_linear_memory *)temp;
/* Set the struct and initial pages to read / write */ /* Set the struct and initial pages to read / write */
size_t size_to_read_write = sizeof(struct wasm_linear_memory) + initial; size_t size_to_read_write = sizeof(struct wasm_linear_memory) + initial;
void *addr_rw = mmap(addr, size_to_read_write, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, int rc = mprotect(self, size_to_read_write, PROT_READ | PROT_WRITE);
-1, 0); if (rc != 0) {
if (addr_rw == MAP_FAILED) {
perror("wasm_linear_memory_allocate - prot r/w failed"); perror("wasm_linear_memory_allocate - prot r/w failed");
munmap(addr, size_to_alloc); munmap(self, size_to_alloc);
return self; assert(0);
return NULL;
} }
self->size = initial;
self = (struct wasm_linear_memory *)addr_rw; self->capacity = initial;
self->max = max; self->max = max;
self->size = initial;
return self; return self;
} }
@ -66,51 +62,31 @@ wasm_linear_memory_free(struct wasm_linear_memory *self)
munmap(self, size_to_free); munmap(self, size_to_free);
} }
static inline void
static inline int wasm_linear_memory_wipe(struct wasm_linear_memory *self)
wasm_linear_memory_expand(struct wasm_linear_memory *self, size_t size_to_expand)
{ {
if (unlikely(self->size + size_to_expand >= self->max)) { memset(self->data, 0, self->size);
debuglog("wasm_linear_memory_expand - Out of Memory!.\n");
return -1;
}
void *temp = mmap(self, sizeof(struct wasm_linear_memory) + self->size + size_to_expand, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (temp == NULL) {
perror("wasm_linear_memory_expand mmap");
return -1;
}
/* Assumption: We are not actually resizing our wasm_linear_memory capacity. We are just adjusting the R/W rules
* within a preallocated wasm_linear_memory of size max */
assert(self == temp);
self->size += size_to_expand;
return 0;
} }
static inline int static inline int
wasm_linear_memory_resize(struct wasm_linear_memory *self, size_t target_size) wasm_linear_memory_expand(struct wasm_linear_memory *self, size_t size_to_expand)
{ {
if (unlikely(target_size >= self->max)) { size_t target_size = self->size + size_to_expand;
debuglog("wasm_linear_memory_expand - Out of Memory!. %lu out of %lu\n", self->size, self->max); if (unlikely(target_size > self->max)) {
fprintf(stderr, "wasm_linear_memory_expand - Out of Memory!. %lu out of %lu\n", self->size, self->max);
return -1; return -1;
} }
void *temp = mmap(self, sizeof(struct wasm_linear_memory) + target_size, PROT_READ | PROT_WRITE, if (target_size > self->capacity) {
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); int rc = mprotect(self, sizeof(struct wasm_linear_memory) + target_size, PROT_READ | PROT_WRITE);
if (rc != 0) {
perror("wasm_linear_memory_expand mprotect");
return -1;
}
if (temp == NULL) { self->capacity = target_size;
perror("wasm_linear_memory_resize mmap");
return -1;
} }
assert(self == temp);
/* Assumption: We are not actually resizing our wasm_linear_memory capacity. We are just adjusting the R/W rules
* within a preallocated wasm_linear_memory of size max */
self->size = target_size; self->size = target_size;
return 0; return 0;
} }
@ -140,6 +116,84 @@ wasm_linear_memory_get_char(struct wasm_linear_memory *self, uint32_t offset)
return (char)self->data[offset]; return (char)self->data[offset];
} }
/**
* Get an float from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return float at the offset
*/
static inline float
wasm_linear_memory_get_float(struct wasm_linear_memory *self, uint32_t offset)
{
assert(offset + sizeof(float) <= self->size);
return *(float *)&self->data[offset];
}
/**
* Get a double from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return double at the offset
*/
static inline double
wasm_linear_memory_get_double(struct wasm_linear_memory *self, uint32_t offset)
{
assert(offset + sizeof(double) <= self->size);
return *(double *)&self->data[offset];
}
/**
* Get a int8_t from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return int8_t at the offset
*/
static inline int8_t
wasm_linear_memory_get_int8(struct wasm_linear_memory *self, uint32_t offset)
{
assert(offset + sizeof(int8_t) <= self->size);
return (int8_t)self->data[offset];
}
/**
* Get a int16_t from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return int16_t at the offset
*/
static inline int16_t
wasm_linear_memory_get_int16(struct wasm_linear_memory *self, uint32_t offset)
{
assert(offset + sizeof(int16_t) <= self->size);
return *(int16_t *)&self->data[offset];
}
/**
* Get a int32_t from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return int32_t at the offset
*/
static inline int32_t
wasm_linear_memory_get_int32(struct wasm_linear_memory *self, uint32_t offset)
{
assert(offset + sizeof(int32_t) <= self->size);
return *(int32_t *)&self->data[offset];
}
/**
* Get a int32_t from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return int32_t at the offset
*/
static inline int64_t
wasm_linear_memory_get_int64(struct wasm_linear_memory *self, uint32_t offset)
{
assert(offset + sizeof(int64_t) <= self->size);
return *(int64_t *)&self->data[offset];
}
static inline uint32_t
wasm_linear_memory_get_page_count(struct wasm_linear_memory *self)
{
return (uint32_t)(self->size / WASM_PAGE_SIZE);
}
/** /**
* Get a null-terminated String from WebAssembly linear memory * Get a null-terminated String from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory * @param offset an offset into the WebAssembly linear memory
@ -156,3 +210,95 @@ wasm_linear_memory_get_string(struct wasm_linear_memory *self, uint32_t offset,
} }
return NULL; return NULL;
} }
/**
* Write a double to WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return double at the offset
*/
static inline void
wasm_linear_memory_set_double(struct wasm_linear_memory *self, uint32_t offset, double value)
{
assert(offset + sizeof(double) <= self->size);
*(double *)&self->data[offset] = value;
}
/**
* Write a float to WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return float at the offset
*/
static inline void
wasm_linear_memory_set_float(struct wasm_linear_memory *self, uint32_t offset, float value)
{
assert(offset + sizeof(float) <= self->size);
*(float *)&self->data[offset] = value;
}
/**
* Write a int8_t to WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return int8_t at the offset
*/
static inline void
wasm_linear_memory_set_int8(struct wasm_linear_memory *self, uint32_t offset, int8_t value)
{
assert(offset + sizeof(int8_t) <= self->size);
self->data[offset] = value;
}
/**
* Write a int16_t to WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return int16_t at the offset
*/
static inline void
wasm_linear_memory_set_int16(struct wasm_linear_memory *self, uint32_t offset, int16_t value)
{
assert(offset + sizeof(int16_t) <= self->size);
*(int16_t *)&self->data[offset] = value;
}
/**
* Write a int32_t to WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return int32_t at the offset
*/
static inline void
wasm_linear_memory_set_int32(struct wasm_linear_memory *self, uint32_t offset, int32_t value)
{
assert(offset + sizeof(int32_t) <= self->size);
*(int32_t *)&self->data[offset] = value;
}
/**
* Write a int64_t to WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return int64_t at the offset
*/
static inline void
wasm_linear_memory_set_int64(struct wasm_linear_memory *self, uint64_t offset, int64_t value)
{
assert(offset + sizeof(int64_t) <= self->size);
*(int32_t *)&self->data[offset] = value;
}
static inline void
wasm_linear_memory_set_size(struct wasm_linear_memory *self, size_t size)
{
self->size = size;
}
static inline size_t
wasm_linear_memory_get_size(struct wasm_linear_memory *self)
{
return self->size;
}
static inline void
wasm_linear_memory_initialize_region(struct wasm_linear_memory *self, uint32_t offset, uint32_t region_size,
uint8_t region[region_size])
{
assert((size_t)offset + region_size <= self->size);
memcpy(&self->data[offset], region, region_size);
}

@ -14,6 +14,7 @@
#include "scheduler.h" #include "scheduler.h"
#include "sandbox_functions.h" #include "sandbox_functions.h"
#include "worker_thread.h" #include "worker_thread.h"
#include "sandbox_context_cache.h"
// What should we tell the child program its UID and GID are? // What should we tell the child program its UID and GID are?
#define UID 0xFF #define UID 0xFF
@ -39,11 +40,12 @@
void void
stub_init(int32_t offset) stub_init(int32_t offset)
{ {
struct sandbox *current_sandbox = current_sandbox_get();
// What program name will we put in the auxiliary vectors // What program name will we put in the auxiliary vectors
char *program_name = current_sandbox_get()->module->name; char *program_name = current_sandbox->module->name;
// Copy the program name into WASM accessible memory // Copy the program name into WASM accessible memory
int32_t program_name_offset = offset; int32_t program_name_offset = offset;
strcpy(get_memory_ptr_for_runtime(offset, sizeof(program_name)), program_name); strcpy(wasm_linear_memory_get_ptr_void(current_sandbox->memory, offset, sizeof(program_name)), program_name);
offset += sizeof(program_name); offset += sizeof(program_name);
// The construction of this is: // The construction of this is:
@ -69,7 +71,8 @@ stub_init(int32_t offset)
0, 0,
}; };
int32_t env_vec_offset = offset; int32_t env_vec_offset = offset;
memcpy(get_memory_ptr_for_runtime(env_vec_offset, sizeof(env_vec)), env_vec, sizeof(env_vec)); memcpy(wasm_linear_memory_get_ptr_void(current_sandbox->memory, env_vec_offset, sizeof(env_vec)), env_vec,
sizeof(env_vec));
module_initialize_libc(current_sandbox_get()->module, env_vec_offset, program_name_offset); module_initialize_libc(current_sandbox_get()->module, env_vec_offset, program_name_offset);
} }
@ -219,8 +222,8 @@ wasm_mmap(int32_t addr, int32_t len, int32_t prot, int32_t flags, int32_t fd, in
assert(len % WASM_PAGE_SIZE == 0); assert(len % WASM_PAGE_SIZE == 0);
int32_t result = local_sandbox_context_cache.memory->size; int32_t result = wasm_linear_memory_get_size(local_sandbox_context_cache.memory);
for (int i = 0; i < len / WASM_PAGE_SIZE; i++) { expand_memory(); } if (wasm_linear_memory_expand(local_sandbox_context_cache.memory, len) == -1) { result = (uint32_t)-1; }
return result; return result;
} }
@ -318,19 +321,14 @@ wasm_mremap(int32_t offset, int32_t old_size, int32_t new_size, int32_t flags)
// If at end of linear memory, just expand and return same address // If at end of linear memory, just expand and return same address
if (offset + old_size == local_sandbox_context_cache.memory->size) { if (offset + old_size == local_sandbox_context_cache.memory->size) {
int32_t amount_to_expand = new_size - old_size; int32_t amount_to_expand = new_size - old_size;
int32_t pages_to_allocate = amount_to_expand / WASM_PAGE_SIZE; wasm_linear_memory_expand(local_sandbox_context_cache.memory, amount_to_expand);
if (amount_to_expand % WASM_PAGE_SIZE > 0) pages_to_allocate++;
for (int i = 0; i < pages_to_allocate; i++) expand_memory();
return offset; return offset;
} }
// Otherwise allocate at end of address space and copy // Otherwise allocate at end of address space and copy
int32_t pages_to_allocate = new_size / WASM_PAGE_SIZE;
if (new_size % WASM_PAGE_SIZE > 0) pages_to_allocate++;
int32_t new_offset = local_sandbox_context_cache.memory->size; int32_t new_offset = local_sandbox_context_cache.memory->size;
for (int i = 0; i < pages_to_allocate; i++) expand_memory(); wasm_linear_memory_expand(local_sandbox_context_cache.memory, new_size);
// Get pointer of old offset and pointer of new offset // Get pointer of old offset and pointer of new offset
uint8_t *linear_mem = local_sandbox_context_cache.memory->data; uint8_t *linear_mem = local_sandbox_context_cache.memory->data;

@ -1,103 +0,0 @@
#include "current_sandbox.h"
#include "panic.h"
#include "runtime.h"
#include "sandbox_types.h"
#include "types.h"
#include <sys/mman.h>
/**
* @brief Expand the linear memory of the active WebAssembly sandbox by a single page
*
* @return int
*/
int
expand_memory(void)
{
struct sandbox *sandbox = current_sandbox_get();
assert(sandbox->state == SANDBOX_RUNNING_USER || sandbox->state == SANDBOX_RUNNING_SYS);
assert(local_sandbox_context_cache.memory->size % WASM_PAGE_SIZE == 0);
/* Return -1 if we've hit the linear memory max */
if (unlikely(wasm_linear_memory_expand(local_sandbox_context_cache.memory, WASM_PAGE_SIZE) == -1)) return -1;
#ifdef LOG_SANDBOX_MEMORY_PROFILE
// Cache the runtime of the first N page allocations
if (likely(sandbox->timestamp_of.page_allocations_size < SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT)) {
sandbox->timestamp_of.page_allocations[sandbox->timestamp_of.page_allocations_size++] =
sandbox->duration_of_state.running
+ (uint32_t)(__getcycles() - sandbox->timestamp_of.last_state_change);
}
#endif
return 0;
}
INLINE char *
get_memory_ptr_for_runtime(uint32_t offset, uint32_t size)
{
return (char *)wasm_linear_memory_get_ptr_void(local_sandbox_context_cache.memory, offset, size);
}
/**
* @brief Stub that implements the WebAssembly memory.grow instruction
*
* @param count number of pages to grow the WebAssembly linear memory by
* @return The previous size of the linear memory in pages or -1 if enough memory cannot be allocated
*/
int32_t
instruction_memory_grow(uint32_t count)
{
int rc = local_sandbox_context_cache.memory->size / WASM_PAGE_SIZE;
struct sandbox *sandbox = current_sandbox_get();
assert(sandbox->state == SANDBOX_RUNNING_USER || sandbox->state == SANDBOX_RUNNING_SYS);
assert(local_sandbox_context_cache.memory->size % WASM_PAGE_SIZE == 0);
/* Return -1 if we've hit the linear memory max */
if (unlikely(wasm_linear_memory_expand(local_sandbox_context_cache.memory, WASM_PAGE_SIZE * count) == -1))
return -1;
#ifdef LOG_SANDBOX_MEMORY_PROFILE
// Cache the runtime of the first N page allocations
for (int i = 0; i < count; i++) {
if (likely(sandbox->timestamp_of.page_allocations_size < SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT)) {
sandbox->timestamp_of.page_allocations[sandbox->timestamp_of.page_allocations_size++] =
sandbox->duration_of_state.running
+ (uint32_t)(__getcycles() - sandbox->timestamp_of.last_state_change);
}
}
#endif
return local_sandbox_context_cache.memory->size / WASM_PAGE_SIZE;
}
/*
* Table handling functionality
* This was moved from compiletime in order to place the
* function in the callstack in GDB. It can be moved back
* to runtime/compiletime/memory/64bit_nix.c to remove the
* additional function call
*/
char *
get_function_from_table(uint32_t idx, uint32_t type_id)
{
#ifdef LOG_FUNCTION_TABLE
fprintf(stderr, "get_function_from_table(idx: %u, type_id: %u)\n", idx, type_id);
fprintf(stderr, "indirect_table_size: %u\n", INDIRECT_TABLE_SIZE);
#endif
assert(idx < INDIRECT_TABLE_SIZE);
struct indirect_table_entry f = local_sandbox_context_cache.module_indirect_table[idx];
#ifdef LOG_FUNCTION_TABLE
fprintf(stderr, "assumed type: %u, type in table: %u\n", type_id, f.type_id);
#endif
// FIXME: Commented out function type check because of gocr
// assert(f.type_id == type_id);
assert(f.func_pointer != NULL);
return f.func_pointer;
}

@ -1,37 +0,0 @@
#include <assert.h>
#include <string.h>
#include "runtime.h"
#include "types.h"
/* Region initialization helper function */
EXPORT void
initialize_region(uint32_t offset, uint32_t region_size, uint8_t region[region_size])
{
assert((size_t)offset + region_size < local_sandbox_context_cache.memory->size);
memcpy(get_memory_ptr_for_runtime(offset, region_size), region, region_size);
}
void
add_function_to_table(uint32_t idx, uint32_t type_id, char *pointer)
{
assert(idx < INDIRECT_TABLE_SIZE);
assert(local_sandbox_context_cache.module_indirect_table != NULL);
/* TODO: atomic for multiple concurrent invocations? Issue #97 */
if (local_sandbox_context_cache.module_indirect_table[idx].type_id == type_id
&& local_sandbox_context_cache.module_indirect_table[idx].func_pointer == pointer)
return;
local_sandbox_context_cache.module_indirect_table[idx] = (struct indirect_table_entry){
.type_id = type_id, .func_pointer = pointer
};
}
/* If we are using runtime globals, we need to populate them */
WEAK void
populate_globals()
{
assert(0); /* FIXME: is this used in WASM as dynamic modules? Issue #105. */
}

@ -24,21 +24,22 @@ sandbox_allocate_linear_memory(struct sandbox *self)
char *error_message = NULL; char *error_message = NULL;
struct wasm_linear_memory *linear_memory = (struct wasm_linear_memory *)pool_allocate_object(
self->module->linear_memory_pool[worker_thread_idx]);
size_t initial = (size_t)self->module->abi.starting_pages * WASM_PAGE_SIZE; size_t initial = (size_t)self->module->abi.starting_pages * WASM_PAGE_SIZE;
size_t max = (size_t)self->module->abi.max_pages * WASM_PAGE_SIZE; size_t max = (size_t)self->module->abi.max_pages * WASM_PAGE_SIZE;
assert(initial <= (size_t)UINT32_MAX + 1); assert(initial <= (size_t)UINT32_MAX + 1);
assert(max <= (size_t)UINT32_MAX + 1); assert(max <= (size_t)UINT32_MAX + 1);
if (linear_memory == NULL) { self->memory = (struct wasm_linear_memory *)pool_allocate_object(
linear_memory = wasm_linear_memory_allocate(initial, max); self->module->linear_memory_pool[worker_thread_idx]);
if (unlikely(linear_memory == NULL)) return -1;
if (self->memory == NULL) {
self->memory = wasm_linear_memory_allocate(initial, max);
if (unlikely(self->memory == NULL)) return -1;
} else {
wasm_linear_memory_set_size(self->memory, self->module->abi.starting_pages * WASM_PAGE_SIZE);
} }
self->memory = linear_memory;
return 0; return 0;
} }
@ -196,7 +197,7 @@ sandbox_free(struct sandbox *sandbox)
*/ */
/* Linear Memory and Guard Page should already have been munmaped and set to NULL */ /* Linear Memory and Guard Page should already have been munmaped and set to NULL */
assert(sandbox->memory->data == NULL); assert(sandbox->memory == NULL);
free(sandbox->request.base); free(sandbox->request.base);
free(sandbox->response.base); free(sandbox->response.base);

Loading…
Cancel
Save