|
|
|
@ -10,62 +10,113 @@
|
|
|
|
|
#include "types.h" /* PAGE_SIZE */
|
|
|
|
|
#include "wasm_types.h"
|
|
|
|
|
|
|
|
|
|
#define WASM_MEMORY_MAX (size_t) UINT32_MAX + 1
|
|
|
|
|
#define WASM_MEMORY_MAX (size_t) UINT32_MAX + 1
|
|
|
|
|
#define WASM_MEMORY_SIZE_TO_ALLOC ((size_t)WASM_MEMORY_MAX + /* guard page */ PAGE_SIZE)
|
|
|
|
|
|
|
|
|
|
struct wasm_memory {
|
|
|
|
|
size_t size; /* Initial Size in bytes */
|
|
|
|
|
size_t capacity; /* Size backed by actual pages */
|
|
|
|
|
size_t max; /* Soft cap in bytes. Defaults to 4GB */
|
|
|
|
|
uint8_t data[];
|
|
|
|
|
size_t size; /* Initial Size in bytes */
|
|
|
|
|
size_t capacity; /* Size backed by actual pages */
|
|
|
|
|
size_t max; /* Soft cap in bytes. Defaults to 4GB */
|
|
|
|
|
uint8_t *buffer;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static INLINE struct wasm_memory *wasm_memory_alloc(void);
|
|
|
|
|
static INLINE int wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max);
|
|
|
|
|
static INLINE struct wasm_memory *wasm_memory_new(size_t initial, size_t max);
|
|
|
|
|
static INLINE void wasm_memory_deinit(struct wasm_memory *self);
|
|
|
|
|
static INLINE void wasm_memory_free(struct wasm_memory *self);
|
|
|
|
|
static INLINE void wasm_memory_delete(struct wasm_memory *self);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static INLINE struct wasm_memory *
|
|
|
|
|
wasm_memory_allocate(size_t initial, size_t max)
|
|
|
|
|
wasm_memory_alloc(void)
|
|
|
|
|
{
|
|
|
|
|
return malloc(sizeof(struct wasm_memory));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static INLINE int
|
|
|
|
|
wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max)
|
|
|
|
|
{
|
|
|
|
|
assert(self != NULL);
|
|
|
|
|
|
|
|
|
|
/* We assume WASI modules, which are required to declare and export a linear memory with a non-zero size to
|
|
|
|
|
* allow a standard lib to initialize. Technically, a WebAssembly module that exports pure functions may not use
|
|
|
|
|
* a linear memory */
|
|
|
|
|
assert(initial > 0);
|
|
|
|
|
assert(initial <= (size_t)UINT32_MAX + 1);
|
|
|
|
|
assert(max > 0);
|
|
|
|
|
assert(max <= (size_t)UINT32_MAX + 1);
|
|
|
|
|
|
|
|
|
|
/* Allocate contiguous virtual addresses for struct, full linear memory, and guard page */
|
|
|
|
|
size_t size_to_alloc = sizeof(struct wasm_memory) + WASM_MEMORY_MAX + /* guard page */ PAGE_SIZE;
|
|
|
|
|
void * temp = mmap(NULL, size_to_alloc, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
|
|
|
if (temp == MAP_FAILED) {
|
|
|
|
|
fprintf(stderr, "wasm_memory_allocate - allocation failed, (size: %lu) %s\n", size_to_alloc,
|
|
|
|
|
strerror(errno));
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
struct wasm_memory *self = (struct wasm_memory *)temp;
|
|
|
|
|
|
|
|
|
|
/* Set the struct and initial pages to read / write */
|
|
|
|
|
size_t size_to_read_write = sizeof(struct wasm_memory) + initial;
|
|
|
|
|
/* Allocate buffer of contiguous virtual addresses for full wasm32 linear memory and guard page */
|
|
|
|
|
self->buffer = mmap(NULL, WASM_MEMORY_SIZE_TO_ALLOC, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
|
|
|
if (self->buffer == MAP_FAILED) return -1;
|
|
|
|
|
|
|
|
|
|
int rc = mprotect(self, size_to_read_write, PROT_READ | PROT_WRITE);
|
|
|
|
|
/* Set the initial bytes to read / write */
|
|
|
|
|
int rc = mprotect(self->buffer, initial, PROT_READ | PROT_WRITE);
|
|
|
|
|
if (rc != 0) {
|
|
|
|
|
perror("wasm_memory_allocate - prot r/w failed");
|
|
|
|
|
munmap(self, size_to_alloc);
|
|
|
|
|
assert(0);
|
|
|
|
|
return NULL;
|
|
|
|
|
munmap(self->buffer, WASM_MEMORY_SIZE_TO_ALLOC);
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self->size = initial;
|
|
|
|
|
self->capacity = initial;
|
|
|
|
|
self->max = max;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static INLINE struct wasm_memory *
|
|
|
|
|
wasm_memory_new(size_t initial, size_t max)
|
|
|
|
|
{
|
|
|
|
|
struct wasm_memory *self = wasm_memory_alloc();
|
|
|
|
|
if (self == NULL) return self;
|
|
|
|
|
|
|
|
|
|
int rc = wasm_memory_init(self, initial, max);
|
|
|
|
|
if (rc < 0) {
|
|
|
|
|
assert(0);
|
|
|
|
|
wasm_memory_free(self);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return self;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static INLINE void
|
|
|
|
|
wasm_memory_deinit(struct wasm_memory *self)
|
|
|
|
|
{
|
|
|
|
|
assert(self != NULL);
|
|
|
|
|
assert(self->buffer != NULL);
|
|
|
|
|
|
|
|
|
|
munmap(self->buffer, WASM_MEMORY_SIZE_TO_ALLOC);
|
|
|
|
|
self->buffer = NULL;
|
|
|
|
|
self->size = 0;
|
|
|
|
|
self->capacity = 0;
|
|
|
|
|
self->max = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static INLINE void
|
|
|
|
|
wasm_memory_free(struct wasm_memory *self)
|
|
|
|
|
{
|
|
|
|
|
size_t size_to_free = sizeof(struct wasm_memory) + WASM_MEMORY_MAX + /* guard page */ PAGE_SIZE;
|
|
|
|
|
munmap(self, size_to_free);
|
|
|
|
|
assert(self != NULL);
|
|
|
|
|
/* Assume prior deinitialization so we don't leak buffers */
|
|
|
|
|
assert(self->buffer == NULL);
|
|
|
|
|
|
|
|
|
|
free(self);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static INLINE void
|
|
|
|
|
wasm_memory_delete(struct wasm_memory *self)
|
|
|
|
|
{
|
|
|
|
|
assert(self != NULL);
|
|
|
|
|
|
|
|
|
|
wasm_memory_deinit(self);
|
|
|
|
|
wasm_memory_free(self);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static INLINE void
|
|
|
|
|
wasm_memory_wipe(struct wasm_memory *self)
|
|
|
|
|
{
|
|
|
|
|
memset(self->data, 0, self->size);
|
|
|
|
|
memset(self->buffer, 0, self->size);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static INLINE int
|
|
|
|
@ -77,8 +128,13 @@ wasm_memory_expand(struct wasm_memory *self, size_t size_to_expand)
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* If recycling a wasm_memory from an object pool, a previous execution may have already expanded to or what
|
|
|
|
|
* beyond what we need. The capacity represents the "high water mark" of previous executions. If the desired
|
|
|
|
|
* size is less than this "high water mark," we just need to update size for accounting purposes. Otherwise, we
|
|
|
|
|
* need to actually issue an mprotect syscall. The goal of these optimizations is to reduce mmap and demand
|
|
|
|
|
* paging overhead for repeated instantiations of a WebAssembly module. */
|
|
|
|
|
if (target_size > self->capacity) {
|
|
|
|
|
int rc = mprotect(self, sizeof(struct wasm_memory) + target_size, PROT_READ | PROT_WRITE);
|
|
|
|
|
int rc = mprotect(self->buffer, target_size, PROT_READ | PROT_WRITE);
|
|
|
|
|
if (rc != 0) {
|
|
|
|
|
perror("wasm_memory_expand mprotect");
|
|
|
|
|
return -1;
|
|
|
|
@ -91,6 +147,28 @@ wasm_memory_expand(struct wasm_memory *self, size_t size_to_expand)
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static INLINE void
|
|
|
|
|
wasm_memory_set_size(struct wasm_memory *self, size_t size)
|
|
|
|
|
{
|
|
|
|
|
self->size = size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static INLINE size_t
|
|
|
|
|
wasm_memory_get_size(struct wasm_memory *self)
|
|
|
|
|
{
|
|
|
|
|
return self->size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static INLINE void
|
|
|
|
|
wasm_memory_initialize_region(struct wasm_memory *self, uint32_t offset, uint32_t region_size, uint8_t region[])
|
|
|
|
|
{
|
|
|
|
|
assert((size_t)offset + region_size <= self->size);
|
|
|
|
|
memcpy(&self->buffer[offset], region, region_size);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* NOTE: These wasm_memory functions require pointer dereferencing. For this reason, they are not directly by wasm32
|
|
|
|
|
* instructions. These functions are intended to be used by the runtime to interacts with linear memories. */
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Translates WASM offsets into runtime VM pointers
|
|
|
|
|
* @param offset an offset into the WebAssembly linear memory
|
|
|
|
@ -101,7 +179,7 @@ static INLINE void *
|
|
|
|
|
wasm_memory_get_ptr_void(struct wasm_memory *self, uint32_t offset, uint32_t size)
|
|
|
|
|
{
|
|
|
|
|
assert(offset + size <= self->size);
|
|
|
|
|
return (void *)&self->data[offset];
|
|
|
|
|
return (void *)&self->buffer[offset];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -113,7 +191,7 @@ static INLINE char
|
|
|
|
|
wasm_memory_get_char(struct wasm_memory *self, uint32_t offset)
|
|
|
|
|
{
|
|
|
|
|
assert(offset + sizeof(char) <= self->size);
|
|
|
|
|
return *(char *)&self->data[offset];
|
|
|
|
|
return *(char *)&self->buffer[offset];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -125,7 +203,7 @@ static INLINE float
|
|
|
|
|
wasm_memory_get_f32(struct wasm_memory *self, uint32_t offset)
|
|
|
|
|
{
|
|
|
|
|
assert(offset + sizeof(float) <= self->size);
|
|
|
|
|
return *(float *)&self->data[offset];
|
|
|
|
|
return *(float *)&self->buffer[offset];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -137,7 +215,7 @@ static INLINE double
|
|
|
|
|
wasm_memory_get_f64(struct wasm_memory *self, uint32_t offset)
|
|
|
|
|
{
|
|
|
|
|
assert(offset + sizeof(double) <= self->size);
|
|
|
|
|
return *(double *)&self->data[offset];
|
|
|
|
|
return *(double *)&self->buffer[offset];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -149,7 +227,7 @@ static INLINE int8_t
|
|
|
|
|
wasm_memory_get_i8(struct wasm_memory *self, uint32_t offset)
|
|
|
|
|
{
|
|
|
|
|
assert(offset + sizeof(int8_t) <= self->size);
|
|
|
|
|
return *(int8_t *)&self->data[offset];
|
|
|
|
|
return *(int8_t *)&self->buffer[offset];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -161,7 +239,7 @@ static INLINE int16_t
|
|
|
|
|
wasm_memory_get_i16(struct wasm_memory *self, uint32_t offset)
|
|
|
|
|
{
|
|
|
|
|
assert(offset + sizeof(int16_t) <= self->size);
|
|
|
|
|
return *(int16_t *)&self->data[offset];
|
|
|
|
|
return *(int16_t *)&self->buffer[offset];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -173,7 +251,7 @@ static INLINE int32_t
|
|
|
|
|
wasm_memory_get_i32(struct wasm_memory *self, uint32_t offset)
|
|
|
|
|
{
|
|
|
|
|
assert(offset + sizeof(int32_t) <= self->size);
|
|
|
|
|
return *(int32_t *)&self->data[offset];
|
|
|
|
|
return *(int32_t *)&self->buffer[offset];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -185,7 +263,7 @@ static INLINE int64_t
|
|
|
|
|
wasm_memory_get_i64(struct wasm_memory *self, uint32_t offset)
|
|
|
|
|
{
|
|
|
|
|
assert(offset + sizeof(int64_t) <= self->size);
|
|
|
|
|
return *(int64_t *)&self->data[offset];
|
|
|
|
|
return *(int64_t *)&self->buffer[offset];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static INLINE uint32_t
|
|
|
|
@ -206,7 +284,7 @@ wasm_memory_get_string(struct wasm_memory *self, uint32_t offset, uint32_t size)
|
|
|
|
|
assert(offset + (sizeof(char) * size) <= self->size);
|
|
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < size; i++) {
|
|
|
|
|
if (self->data[offset + i] == '\0') return (char *)&self->data[offset];
|
|
|
|
|
if (self->buffer[offset + i] == '\0') return (char *)&self->buffer[offset];
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
@ -220,7 +298,7 @@ static INLINE void
|
|
|
|
|
wasm_memory_set_f32(struct wasm_memory *self, uint32_t offset, float value)
|
|
|
|
|
{
|
|
|
|
|
assert(offset + sizeof(float) <= self->size);
|
|
|
|
|
*(float *)&self->data[offset] = value;
|
|
|
|
|
*(float *)&self->buffer[offset] = value;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -232,7 +310,7 @@ static INLINE void
|
|
|
|
|
wasm_memory_set_f64(struct wasm_memory *self, uint32_t offset, double value)
|
|
|
|
|
{
|
|
|
|
|
assert(offset + sizeof(double) <= self->size);
|
|
|
|
|
*(double *)&self->data[offset] = value;
|
|
|
|
|
*(double *)&self->buffer[offset] = value;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -244,7 +322,7 @@ static INLINE void
|
|
|
|
|
wasm_memory_set_i8(struct wasm_memory *self, uint32_t offset, int8_t value)
|
|
|
|
|
{
|
|
|
|
|
assert(offset + sizeof(int8_t) <= self->size);
|
|
|
|
|
*(int8_t *)&self->data[offset] = value;
|
|
|
|
|
*(int8_t *)&self->buffer[offset] = value;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -256,7 +334,7 @@ static INLINE void
|
|
|
|
|
wasm_memory_set_i16(struct wasm_memory *self, uint32_t offset, int16_t value)
|
|
|
|
|
{
|
|
|
|
|
assert(offset + sizeof(int16_t) <= self->size);
|
|
|
|
|
*(int16_t *)&self->data[offset] = value;
|
|
|
|
|
*(int16_t *)&self->buffer[offset] = value;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -268,7 +346,7 @@ static INLINE void
|
|
|
|
|
wasm_memory_set_i32(struct wasm_memory *self, uint32_t offset, int32_t value)
|
|
|
|
|
{
|
|
|
|
|
assert(offset + sizeof(int32_t) <= self->size);
|
|
|
|
|
*(int32_t *)&self->data[offset] = value;
|
|
|
|
|
*(int32_t *)&self->buffer[offset] = value;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -280,24 +358,5 @@ static INLINE void
|
|
|
|
|
wasm_memory_set_i64(struct wasm_memory *self, uint64_t offset, int64_t value)
|
|
|
|
|
{
|
|
|
|
|
assert(offset + sizeof(int64_t) <= self->size);
|
|
|
|
|
*(int64_t *)&self->data[offset] = value;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static INLINE void
|
|
|
|
|
wasm_memory_set_size(struct wasm_memory *self, size_t size)
|
|
|
|
|
{
|
|
|
|
|
self->size = size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static INLINE size_t
|
|
|
|
|
wasm_memory_get_size(struct wasm_memory *self)
|
|
|
|
|
{
|
|
|
|
|
return self->size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static INLINE void
|
|
|
|
|
wasm_memory_initialize_region(struct wasm_memory *self, uint32_t offset, uint32_t region_size, uint8_t region[])
|
|
|
|
|
{
|
|
|
|
|
assert((size_t)offset + region_size <= self->size);
|
|
|
|
|
memcpy(&self->data[offset], region, region_size);
|
|
|
|
|
*(int64_t *)&self->buffer[offset] = value;
|
|
|
|
|
}
|
|
|
|
|