refactor: wasm linear memory

mmap-opt
Sean McBride 4 years ago
parent a6689c4823
commit fc3325366f

@ -99,7 +99,8 @@
"sandbox_state_history.h": "c",
"sandbox_set_as_running_user.h": "c",
"scheduler.h": "c",
"sandbox_set_as_returned.h": "c"
"sandbox_set_as_returned.h": "c",
"wasm_linear_memory.h": "c"
},
"files.exclude": {
"**/.git": true,

@ -1,109 +0,0 @@
#pragma once
#include <assert.h>
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include "debuglog.h"
#include "types.h" /* PAGE_SIZE */
struct buffer {
uint32_t size;
uint64_t max;
uint8_t data[];
};
static inline struct buffer *
buffer_allocate(size_t initial, size_t max)
{
char * error_message = NULL;
int rc = 0;
struct buffer *self = NULL;
assert(initial > 0);
assert(max > 0);
size_t size_to_alloc = sizeof(struct buffer) + max + /* guard page */ PAGE_SIZE;
// assert(round_up_to_page(size_to_alloc) == size_to_alloc);
void *addr = mmap(NULL, size_to_alloc, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
debuglog("buffer_allocate - allocation failed, (size: %lu) %s\n", size_to_alloc, strerror(errno));
return self;
}
/* Set as read / write */
size_t size_to_read_write = sizeof(struct buffer) + initial;
void *addr_rw = mmap(addr, size_to_read_write, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-1, 0);
if (addr_rw == MAP_FAILED) {
perror("buffer_allocate - prot r/w failed");
munmap(addr, size_to_alloc);
return self;
}
self = (struct buffer *)addr_rw;
self->max = max;
self->size = initial;
return self;
}
static inline void
buffer_free(struct buffer *self)
{
size_t size_to_free = sizeof(struct buffer) + self->max + /* guard page */ PAGE_SIZE;
munmap(self, size_to_free);
}
static inline int
buffer_expand(struct buffer *self, size_t size_to_expand)
{
if (unlikely(self->size + size_to_expand >= self->max)) {
debuglog("buffer_expand - Out of Memory!. %u out of %lu\n", self->size, self->max);
return -1;
}
void *temp = mmap(self, sizeof(struct buffer) + self->size + size_to_expand, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (temp == NULL) {
perror("buffer_expand mmap");
return -1;
}
/* Assumption: We are not actually resizing our buffer capacity. We are just adjusting the R/W rules within a
* preallocated buffer of size max */
assert(self == temp);
self->size += size_to_expand;
return 0;
}
static inline int
buffer_resize(struct buffer *self, size_t target_size)
{
if (unlikely(target_size >= self->max)) {
debuglog("buffer_expand - Out of Memory!. %u out of %lu\n", self->size, self->max);
return -1;
}
void *temp = mmap(self, sizeof(struct buffer) + target_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (temp == NULL) {
perror("buffer_resize mmap");
return -1;
}
assert(self == temp);
/* Assumption: We are not actually resizing our buffer capacity. We are just adjusting the R/W rules within a
* preallocated buffer of size max */
self->size = target_size;
return 0;
}

@ -46,5 +46,24 @@ current_sandbox_set(struct sandbox *sandbox)
}
}
extern void current_sandbox_sleep();
static inline void *
current_sandbox_get_ptr_void(uint32_t offset, uint32_t bounds_check)
{
assert(local_sandbox_context_cache.memory != NULL);
return wasm_linear_memory_get_ptr_void(local_sandbox_context_cache.memory, offset, bounds_check);
}
static inline char
current_sandbox_get_char(uint32_t offset)
{
assert(local_sandbox_context_cache.memory != NULL);
return wasm_linear_memory_get_char(local_sandbox_context_cache.memory, offset);
}
static inline char *
current_sandbox_get_string(uint32_t offset, uint32_t size)
{
return wasm_linear_memory_get_string(local_sandbox_context_cache.memory, offset, size);
}

@ -5,7 +5,7 @@
#include <stdatomic.h>
#include <stdbool.h>
#include "buffer.h"
#include "wasm_linear_memory.h"
#include "likely.h"
#include "types.h"
@ -45,7 +45,7 @@ struct indirect_table_entry {
/* Cache of Frequently Accessed Members used to avoid pointer chasing */
struct sandbox_context_cache {
struct buffer * memory;
struct wasm_linear_memory * memory;
struct indirect_table_entry *module_indirect_table;
};

@ -37,7 +37,7 @@ static inline void
sandbox_free_linear_memory(struct sandbox *sandbox)
{
if (pool_free_object(sandbox->module->linear_memory_pool[worker_thread_idx], sandbox) < 0) {
buffer_free(sandbox->memory);
wasm_linear_memory_free(sandbox->memory);
}
}

@ -35,7 +35,7 @@ sandbox_perf_log_print_entry(struct sandbox *sandbox)
* becomes more intelligent, then peak linear memory size needs to be tracked
* seperately from current linear memory size.
*/
fprintf(sandbox_perf_log, "%lu,%s,%d,%s,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%u,%u\n",
fprintf(sandbox_perf_log, "%lu,%s,%d,%s,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%u,%lu\n",
sandbox->id, sandbox->module->name, sandbox->module->port, sandbox_state_stringify(sandbox->state),
sandbox->module->relative_deadline, sandbox->total_time, queued_duration,
sandbox->duration_of_state[SANDBOX_UNINITIALIZED], sandbox->duration_of_state[SANDBOX_INITIALIZED],

@ -94,9 +94,9 @@ struct sandbox {
struct module *module; /* the module this is an instance of */
/* WebAssembly Instance State */
struct arch_context ctxt;
struct sandbox_stack stack;
struct buffer * memory;
struct arch_context ctxt;
struct sandbox_stack stack;
struct wasm_linear_memory *memory;
/* Scheduling and Temporal State */
struct sandbox_timestamps timestamp_of;

@ -0,0 +1,158 @@
#pragma once
#include <assert.h>
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include "debuglog.h"
#include "types.h" /* PAGE_SIZE */
#include "wasm_types.h"
#define WASM_LINEAR_MEMORY_MAX (size_t) UINT32_MAX + 1
struct wasm_linear_memory {
size_t size; /* Initial Size in bytes */
size_t max; /* Soft cap in bytes. Defaults to 4GB */
uint8_t data[];
};
static inline struct wasm_linear_memory *
wasm_linear_memory_allocate(size_t initial, size_t max)
{
assert(initial > 0);
assert(initial <= (size_t)UINT32_MAX + 1);
assert(max > 0);
assert(max <= (size_t)UINT32_MAX + 1);
char * error_message = NULL;
int rc = 0;
struct wasm_linear_memory *self = NULL;
/* Allocate contiguous virtual addresses and map to fault */
size_t size_to_alloc = sizeof(struct wasm_linear_memory) + WASM_LINEAR_MEMORY_MAX + /* guard page */ PAGE_SIZE;
void *addr = mmap(NULL, size_to_alloc, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
debuglog("wasm_linear_memory_allocate - allocation failed, (size: %lu) %s\n", size_to_alloc,
strerror(errno));
return self;
}
/* Set the struct and initial pages to read / write */
size_t size_to_read_write = sizeof(struct wasm_linear_memory) + initial;
void *addr_rw = mmap(addr, size_to_read_write, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-1, 0);
if (addr_rw == MAP_FAILED) {
perror("wasm_linear_memory_allocate - prot r/w failed");
munmap(addr, size_to_alloc);
return self;
}
self = (struct wasm_linear_memory *)addr_rw;
self->max = max;
self->size = initial;
return self;
}
static inline void
wasm_linear_memory_free(struct wasm_linear_memory *self)
{
size_t size_to_free = sizeof(struct wasm_linear_memory) + self->max + /* guard page */ PAGE_SIZE;
munmap(self, size_to_free);
}
static inline int
wasm_linear_memory_expand(struct wasm_linear_memory *self, size_t size_to_expand)
{
if (unlikely(self->size + size_to_expand >= self->max)) {
debuglog("wasm_linear_memory_expand - Out of Memory!.\n");
return -1;
}
void *temp = mmap(self, sizeof(struct wasm_linear_memory) + self->size + size_to_expand, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (temp == NULL) {
perror("wasm_linear_memory_expand mmap");
return -1;
}
/* Assumption: We are not actually resizing our wasm_linear_memory capacity. We are just adjusting the R/W rules
* within a preallocated wasm_linear_memory of size max */
assert(self == temp);
self->size += size_to_expand;
return 0;
}
static inline int
wasm_linear_memory_resize(struct wasm_linear_memory *self, size_t target_size)
{
if (unlikely(target_size >= self->max)) {
debuglog("wasm_linear_memory_expand - Out of Memory!. %lu out of %lu\n", self->size, self->max);
return -1;
}
void *temp = mmap(self, sizeof(struct wasm_linear_memory) + target_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (temp == NULL) {
perror("wasm_linear_memory_resize mmap");
return -1;
}
assert(self == temp);
/* Assumption: We are not actually resizing our wasm_linear_memory capacity. We are just adjusting the R/W rules
* within a preallocated wasm_linear_memory of size max */
self->size = target_size;
return 0;
}
/**
* Translates WASM offsets into runtime VM pointers
* @param offset an offset into the WebAssembly linear memory
* @param bounds_check the size of the thing we are pointing to
* @return void pointer to something in WebAssembly linear memory
*/
static inline void *
wasm_linear_memory_get_ptr_void(struct wasm_linear_memory *self, uint32_t offset, uint32_t size)
{
assert(offset + size <= self->size);
return (void *)&self->data[offset];
}
/**
* Get an ASCII character from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return char at the offset
*/
static inline char
wasm_linear_memory_get_char(struct wasm_linear_memory *self, uint32_t offset)
{
assert(offset + sizeof(char) <= self->size);
return (char)self->data[offset];
}
/**
* Get a null-terminated String from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @param size the maximum expected length in characters
* @return pointer to the string or NULL if max_length is reached without finding null-terminator
*/
static inline char *
wasm_linear_memory_get_string(struct wasm_linear_memory *self, uint32_t offset, uint32_t size)
{
assert(offset + (sizeof(char) * size) <= self->size);
for (uint32_t i = 0; i < size; i++) {
if (self->data[offset + i] == '\0') return (char *)&self->data[offset];
}
return NULL;
}

@ -5,6 +5,5 @@
/* FIXME: per-module configuration? Issue #101 */
#define WASM_PAGE_SIZE (1024 * 64) /* 64KB */
#define WASM_MEMORY_PAGES_INITIAL (1 << 8) /* 256 Pages ~16MB */
#define WASM_MEMORY_PAGES_MAX (1 << 15) /* 32,768 Pages ~4GB */
#define WASM_STACK_SIZE (1 << 19) /* 512KB */

@ -11,42 +11,3 @@ extern thread_local int worker_thread_idx;
void *worker_thread_main(void *return_code);
/**
* Translates WASM offsets into runtime VM pointers
* @param offset an offset into the WebAssembly linear memory
* @param bounds_check the size of the thing we are pointing to
* @return void pointer to something in WebAssembly linear memory
*/
static inline void *
worker_thread_get_memory_ptr_void(uint32_t offset, uint32_t bounds_check)
{
return (void *)get_memory_ptr_for_runtime(offset, bounds_check);
}
/**
* Get a single-byte extended ASCII character from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return char at the offset
*/
static inline char
worker_thread_get_memory_character(uint32_t offset)
{
return get_memory_ptr_for_runtime(offset, 1)[0];
}
/**
* Get a null-terminated String from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @param max_length the maximum expected length in characters
* @return pointer to the string or NULL if max_length is reached without finding null-terminator
*/
static inline char *
worker_thread_get_memory_string(uint32_t offset, uint32_t max_length)
{
for (uint32_t i = 0; i < max_length; i++) {
if (worker_thread_get_memory_character(offset + i) == '\0') {
return (char *)worker_thread_get_memory_ptr_void(offset, 1);
}
}
return NULL;
}

@ -4,6 +4,7 @@
#include "arch/getcycles.h"
#include "worker_thread.h"
#include "current_sandbox.h"
extern int32_t inner_syscall_handler(int32_t n, int32_t a, int32_t b, int32_t c, int32_t d, int32_t e, int32_t f);
@ -36,7 +37,7 @@ env_a_ctz_64(uint64_t x)
INLINE void
env_a_and_64(int32_t p_off, uint64_t v)
{
uint64_t *p = worker_thread_get_memory_ptr_void(p_off, sizeof(uint64_t));
uint64_t *p = current_sandbox_get_ptr_void(p_off, sizeof(uint64_t));
ck_pr_and_64(p, v);
}
@ -44,7 +45,7 @@ INLINE void
env_a_or_64(int32_t p_off, int64_t v)
{
assert(sizeof(int64_t) == sizeof(uint64_t));
uint64_t *p = worker_thread_get_memory_ptr_void(p_off, sizeof(int64_t));
uint64_t *p = current_sandbox_get_ptr_void(p_off, sizeof(int64_t));
ck_pr_or_64(p, v);
}
@ -52,7 +53,7 @@ int32_t
env_a_cas(int32_t p_off, int32_t t, int32_t s)
{
assert(sizeof(int32_t) == sizeof(volatile int));
int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(int32_t));
int *p = current_sandbox_get_ptr_void(p_off, sizeof(int32_t));
return ck_pr_cas_int(p, t, s);
}
@ -61,7 +62,7 @@ void
env_a_or(int32_t p_off, int32_t v)
{
assert(sizeof(int32_t) == sizeof(volatile int));
int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(int32_t));
int *p = current_sandbox_get_ptr_void(p_off, sizeof(int32_t));
ck_pr_or_int(p, v);
}
@ -69,7 +70,7 @@ int32_t
env_a_swap(int32_t x_off, int32_t v)
{
assert(sizeof(int32_t) == sizeof(volatile int));
int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(int32_t));
int *x = current_sandbox_get_ptr_void(x_off, sizeof(int32_t));
int p;
do {
@ -84,7 +85,7 @@ int32_t
env_a_fetch_add(int32_t x_off, int32_t v)
{
assert(sizeof(int32_t) == sizeof(volatile int));
int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(int32_t));
int *x = current_sandbox_get_ptr_void(x_off, sizeof(int32_t));
return ck_pr_faa_int(x, v);
}
@ -92,7 +93,7 @@ void
env_a_inc(int32_t x_off)
{
assert(sizeof(int32_t) == sizeof(volatile int));
int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(int32_t));
int *x = current_sandbox_get_ptr_void(x_off, sizeof(int32_t));
ck_pr_inc_int(x);
}
@ -100,7 +101,7 @@ void
env_a_dec(int32_t x_off)
{
assert(sizeof(int32_t) == sizeof(volatile int));
int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(int32_t));
int *x = (int *)current_sandbox_get_ptr_void(x_off, sizeof(int32_t));
ck_pr_dec_int(x);
}
@ -108,7 +109,7 @@ void
env_a_store(int32_t p_off, int32_t x)
{
assert(sizeof(int32_t) == sizeof(volatile int));
int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(int32_t));
int *p = (int *)current_sandbox_get_ptr_void(p_off, sizeof(int32_t));
ck_pr_store_int(p, x);
}

@ -92,7 +92,7 @@ wasm_read(int32_t filedes, int32_t buf_offset, int32_t nbyte)
/* Non-blocking copy on stdin */
if (filedes == 0) {
char * buffer = worker_thread_get_memory_ptr_void(buf_offset, nbyte);
char * buffer = current_sandbox_get_ptr_void(buf_offset, nbyte);
struct http_request *current_request = &current_sandbox->http_request;
if (current_request->body_length <= 0) return 0;
int bytes_to_read = nbyte > current_request->body_length ? current_request->body_length : nbyte;
@ -102,7 +102,7 @@ wasm_read(int32_t filedes, int32_t buf_offset, int32_t nbyte)
return bytes_to_read;
}
char *buf = worker_thread_get_memory_ptr_void(buf_offset, nbyte);
char *buf = current_sandbox_get_ptr_void(buf_offset, nbyte);
int32_t res = 0;
while (res < nbyte) {
@ -133,7 +133,7 @@ int32_t
wasm_write(int32_t fd, int32_t buf_offset, int32_t buf_size)
{
struct sandbox *s = current_sandbox_get();
char * buffer = worker_thread_get_memory_ptr_void(buf_offset, buf_size);
char * buffer = current_sandbox_get_ptr_void(buf_offset, buf_size);
if (fd == STDERR_FILENO) { write(STDERR_FILENO, buffer, buf_size); }
@ -173,7 +173,7 @@ err:
int32_t
wasm_open(int32_t path_off, int32_t flags, int32_t mode)
{
char *path = worker_thread_get_memory_string(path_off, MODULE_MAX_PATH_LENGTH);
char *path = current_sandbox_get_string(path_off, MODULE_MAX_PATH_LENGTH);
int res = ENOTSUP;
@ -252,7 +252,7 @@ int32_t
wasm_readv(int32_t fd, int32_t iov_offset, int32_t iovcnt)
{
int32_t read = 0;
struct wasm_iovec *iov = worker_thread_get_memory_ptr_void(iov_offset, iovcnt * sizeof(struct wasm_iovec));
struct wasm_iovec *iov = current_sandbox_get_ptr_void(iov_offset, iovcnt * sizeof(struct wasm_iovec));
for (int i = 0; i < iovcnt; i++) { read += wasm_read(fd, iov[i].base_offset, iov[i].len); }
return read;
@ -266,8 +266,7 @@ wasm_writev(int32_t fd, int32_t iov_offset, int32_t iovcnt)
if (fd == STDOUT_FILENO || fd == STDERR_FILENO) {
// both 1 and 2 go to client.
int len = 0;
struct wasm_iovec *iov = worker_thread_get_memory_ptr_void(iov_offset,
iovcnt * sizeof(struct wasm_iovec));
struct wasm_iovec *iov = current_sandbox_get_ptr_void(iov_offset, iovcnt * sizeof(struct wasm_iovec));
for (int i = 0; i < iovcnt; i++) { len += wasm_write(fd, iov[i].base_offset, iov[i].len); }
return len;
@ -277,7 +276,7 @@ wasm_writev(int32_t fd, int32_t iov_offset, int32_t iovcnt)
assert(0);
struct wasm_iovec *iov = worker_thread_get_memory_ptr_void(iov_offset, iovcnt * sizeof(struct wasm_iovec));
struct wasm_iovec *iov = current_sandbox_get_ptr_void(iov_offset, iovcnt * sizeof(struct wasm_iovec));
// If we aren't on MUSL, pass writev to printf if possible
#if defined(__GLIBC__)
@ -285,7 +284,7 @@ wasm_writev(int32_t fd, int32_t iov_offset, int32_t iovcnt)
int sum = 0;
for (int i = 0; i < iovcnt; i++) {
int32_t len = iov[i].len;
void * ptr = worker_thread_get_memory_ptr_void(iov[i].base_offset, len);
void * ptr = current_sandbox_get_ptr_void(iov[i].base_offset, len);
printf("%.*s", len, (char *)ptr);
sum += len;
@ -297,7 +296,7 @@ wasm_writev(int32_t fd, int32_t iov_offset, int32_t iovcnt)
struct iovec vecs[iovcnt];
for (int i = 0; i < iovcnt; i++) {
int32_t len = iov[i].len;
void * ptr = worker_thread_get_memory_ptr_void(iov[i].base_offset, len);
void * ptr = current_sandbox_get_ptr_void(iov[i].base_offset, len);
vecs[i] = (struct iovec){ ptr, len };
}
@ -384,8 +383,7 @@ wasm_get_time(int32_t clock_id, int32_t timespec_off)
assert(0);
}
struct wasm_time_spec *timespec = worker_thread_get_memory_ptr_void(timespec_off,
sizeof(struct wasm_time_spec));
struct wasm_time_spec *timespec = current_sandbox_get_ptr_void(timespec_off, sizeof(struct wasm_time_spec));
struct timespec native_timespec = { 0, 0 };
int res = clock_gettime(real_clock, &native_timespec);

@ -20,7 +20,7 @@ expand_memory(void)
assert(local_sandbox_context_cache.memory->size % WASM_PAGE_SIZE == 0);
/* Return -1 if we've hit the linear memory max */
if (unlikely(buffer_expand(local_sandbox_context_cache.memory, WASM_PAGE_SIZE) == -1)) return -1;
if (unlikely(wasm_linear_memory_expand(local_sandbox_context_cache.memory, WASM_PAGE_SIZE) == -1)) return -1;
#ifdef LOG_SANDBOX_MEMORY_PROFILE
// Cache the runtime of the first N page allocations
@ -35,18 +35,9 @@ expand_memory(void)
}
INLINE char *
get_memory_ptr_for_runtime(uint32_t offset, uint32_t bounds_check)
get_memory_ptr_for_runtime(uint32_t offset, uint32_t size)
{
// Due to how we setup memory for x86, the virtual memory mechanism will catch the error, if bounds <
// WASM_PAGE_SIZE
assert(bounds_check < WASM_PAGE_SIZE
|| (local_sandbox_context_cache.memory->size > bounds_check
&& offset <= local_sandbox_context_cache.memory->size - bounds_check));
char *mem_as_chars = (char *)local_sandbox_context_cache.memory->data;
char *address = &mem_as_chars[offset];
return address;
return (char *)wasm_linear_memory_get_ptr_void(local_sandbox_context_cache.memory, offset, size);
}
/**
@ -60,14 +51,27 @@ instruction_memory_grow(uint32_t count)
{
int rc = local_sandbox_context_cache.memory->size / WASM_PAGE_SIZE;
struct sandbox *sandbox = current_sandbox_get();
assert(sandbox->state == SANDBOX_RUNNING_USER || sandbox->state == SANDBOX_RUNNING_SYS);
assert(local_sandbox_context_cache.memory->size % WASM_PAGE_SIZE == 0);
/* Return -1 if we've hit the linear memory max */
if (unlikely(wasm_linear_memory_expand(local_sandbox_context_cache.memory, WASM_PAGE_SIZE * count) == -1))
return -1;
#ifdef LOG_SANDBOX_MEMORY_PROFILE
// Cache the runtime of the first N page allocations
for (int i = 0; i < count; i++) {
if (unlikely(expand_memory() != 0)) {
rc = -1;
break;
if (likely(sandbox->timestamp_of.page_allocations_size < SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT)) {
sandbox->timestamp_of.page_allocations[sandbox->timestamp_of.page_allocations_size++] =
sandbox->duration_of_state.running
+ (uint32_t)(__getcycles() - sandbox->timestamp_of.last_state_change);
}
}
#endif
return rc;
return local_sandbox_context_cache.memory->size / WASM_PAGE_SIZE;
}
/*

@ -8,8 +8,7 @@
EXPORT void
initialize_region(uint32_t offset, uint32_t region_size, uint8_t region[region_size])
{
assert(local_sandbox_context_cache.memory->size >= region_size);
assert(offset < local_sandbox_context_cache.memory->size - region_size);
assert((size_t)offset + region_size < local_sandbox_context_cache.memory->size);
memcpy(get_memory_ptr_for_runtime(offset, region_size), region, region_size);
}

@ -9,7 +9,7 @@
#include "sandbox_functions.h"
#include "sandbox_set_as_error.h"
#include "sandbox_set_as_initialized.h"
#include "buffer.h"
#include "wasm_linear_memory.h"
/**
* Allocates a WebAssembly sandbox represented by the following layout
@ -22,17 +22,19 @@ sandbox_allocate_linear_memory(struct sandbox *self)
{
assert(self != NULL);
char * error_message = NULL;
uint64_t memory_max = (uint64_t)WASM_PAGE_SIZE * WASM_MEMORY_PAGES_MAX;
char *error_message = NULL;
struct buffer *linear_memory = (struct buffer *)pool_allocate_object(
struct wasm_linear_memory *linear_memory = (struct wasm_linear_memory *)pool_allocate_object(
self->module->linear_memory_pool[worker_thread_idx]);
size_t initial = (size_t)self->module->abi.starting_pages * WASM_PAGE_SIZE;
size_t max = (size_t)WASM_MEMORY_PAGES_MAX * WASM_PAGE_SIZE;
size_t max = (size_t)self->module->abi.max_pages * WASM_PAGE_SIZE;
assert(initial <= (size_t)UINT32_MAX + 1);
assert(max <= (size_t)UINT32_MAX + 1);
if (linear_memory == NULL) {
linear_memory = buffer_allocate(initial, max);
linear_memory = wasm_linear_memory_allocate(initial, max);
if (unlikely(linear_memory == NULL)) return -1;
}

Loading…
Cancel
Save