Merge pull request #287 from gwsystems/spec-alignment-2
feat: WIP restage of memory allocation featuresmaster
commit
af016f2a40
@ -1,42 +1,17 @@
|
||||
#include <assert.h>
|
||||
|
||||
#include "types.h"
|
||||
#include "wasm_module_instance.h"
|
||||
|
||||
extern thread_local struct wasm_module_instance current_wasm_module_instance;
|
||||
|
||||
INLINE void
|
||||
add_function_to_table(uint32_t idx, uint32_t type_id, char *pointer)
|
||||
{
|
||||
assert(idx < INDIRECT_TABLE_SIZE);
|
||||
assert(local_sandbox_context_cache.module_indirect_table != NULL);
|
||||
|
||||
/* TODO: atomic for multiple concurrent invocations? Issue #97 */
|
||||
if (local_sandbox_context_cache.module_indirect_table[idx].type_id == type_id
|
||||
&& local_sandbox_context_cache.module_indirect_table[idx].func_pointer == pointer)
|
||||
return;
|
||||
|
||||
local_sandbox_context_cache.module_indirect_table[idx] = (struct indirect_table_entry){
|
||||
.type_id = type_id, .func_pointer = pointer
|
||||
};
|
||||
wasm_table_set(current_wasm_module_instance.table, idx, type_id, pointer);
|
||||
}
|
||||
|
||||
/* char * is used as a generic pointer to a function pointer */
|
||||
INLINE char *
|
||||
get_function_from_table(uint32_t idx, uint32_t type_id)
|
||||
{
|
||||
#ifdef LOG_FUNCTION_TABLE
|
||||
fprintf(stderr, "get_function_from_table(idx: %u, type_id: %u)\n", idx, type_id);
|
||||
fprintf(stderr, "indirect_table_size: %u\n", INDIRECT_TABLE_SIZE);
|
||||
#endif
|
||||
assert(idx < INDIRECT_TABLE_SIZE);
|
||||
|
||||
struct indirect_table_entry f = local_sandbox_context_cache.module_indirect_table[idx];
|
||||
#ifdef LOG_FUNCTION_TABLE
|
||||
fprintf(stderr, "assumed type: %u, type in table: %u\n", type_id, f.type_id);
|
||||
#endif
|
||||
// FIXME: Commented out function type check because of gocr
|
||||
// assert(f.type_id == type_id);
|
||||
|
||||
assert(f.func_pointer != NULL);
|
||||
|
||||
return f.func_pointer;
|
||||
return wasm_table_get(current_wasm_module_instance.table, idx, type_id);
|
||||
}
|
||||
|
@ -0,0 +1,59 @@
|
||||
#pragma once
|
||||
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "current_sandbox.h"
|
||||
#include "http.h"
|
||||
#include "http_total.h"
|
||||
#include "likely.h"
|
||||
#include "sandbox_types.h"
|
||||
#include "scheduler.h"
|
||||
#include "panic.h"
|
||||
|
||||
/**
|
||||
* Sends Response Back to Client
|
||||
* @return RC. -1 on Failure
|
||||
*/
|
||||
static inline int
|
||||
current_sandbox_send_response()
|
||||
{
|
||||
struct sandbox *sandbox = current_sandbox_get();
|
||||
assert(sandbox != NULL);
|
||||
struct vec_u8 *response = &sandbox->response;
|
||||
assert(response != NULL);
|
||||
|
||||
int rc;
|
||||
|
||||
/* Determine values to template into our HTTP response */
|
||||
size_t response_body_size = response->length;
|
||||
char * module_content_type = sandbox->module->response_content_type;
|
||||
const char *content_type = strlen(module_content_type) > 0 ? module_content_type : "text/plain";
|
||||
|
||||
/* Capture Timekeeping data for end-to-end latency */
|
||||
uint64_t end_time = __getcycles();
|
||||
sandbox->total_time = end_time - sandbox->timestamp_of.request_arrival;
|
||||
|
||||
/* Send HTTP Response Header and Body */
|
||||
rc = http_header_200_write(sandbox->client_socket_descriptor, module_content_type, response_body_size);
|
||||
if (rc < 0) goto err;
|
||||
|
||||
rc = client_socket_send(sandbox->client_socket_descriptor, (const char *)response->buffer, response_body_size,
|
||||
current_sandbox_sleep);
|
||||
if (rc < 0) goto err;
|
||||
|
||||
http_total_increment_2xx();
|
||||
rc = 0;
|
||||
|
||||
done:
|
||||
return rc;
|
||||
err:
|
||||
debuglog("Error sending to client: %s", strerror(errno));
|
||||
rc = -1;
|
||||
goto done;
|
||||
}
|
@ -0,0 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "wasm_module_instance.h"
|
||||
|
||||
extern thread_local struct wasm_module_instance current_wasm_module_instance;
|
||||
|
||||
extern void current_wasm_module_instance_memory_writeback(void);
|
@ -1,5 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include "deque.h"
|
||||
#include "global_request_scheduler.h"
|
||||
#include "sandbox_types.h"
|
||||
|
||||
DEQUE_PROTOTYPE(sandbox, struct sandbox *)
|
||||
|
||||
void global_request_scheduler_deque_initialize();
|
||||
|
@ -1,91 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdatomic.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <sys/socket.h>
|
||||
|
||||
#include "debuglog.h"
|
||||
#include "deque.h"
|
||||
#include "http_total.h"
|
||||
#include "module.h"
|
||||
#include "runtime.h"
|
||||
#include "sandbox_state.h"
|
||||
|
||||
struct sandbox_request {
|
||||
uint64_t id;
|
||||
struct module * module;
|
||||
int socket_descriptor;
|
||||
struct sockaddr socket_address;
|
||||
uint64_t request_arrival_timestamp; /* cycles */
|
||||
uint64_t absolute_deadline; /* cycles */
|
||||
|
||||
/*
|
||||
* Unitless estimate of the instantaneous fraction of system capacity required to run the request
|
||||
* Calculated by estimated execution time (cycles) * runtime_admissions_granularity / relative deadline (cycles)
|
||||
*/
|
||||
uint64_t admissions_estimate;
|
||||
};
|
||||
|
||||
DEQUE_PROTOTYPE(sandbox, struct sandbox_request *)
|
||||
|
||||
/* Count of the total number of requests we've ever allocated. Never decrements as it is used to generate IDs */
|
||||
extern _Atomic uint32_t sandbox_request_count;
|
||||
|
||||
static inline void
|
||||
sandbox_request_count_initialize()
|
||||
{
|
||||
atomic_init(&sandbox_request_count, 0);
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
sandbox_request_count_postfix_increment()
|
||||
{
|
||||
return atomic_fetch_add(&sandbox_request_count, 1);
|
||||
}
|
||||
|
||||
static inline void
|
||||
sandbox_request_log_allocation(struct sandbox_request *sandbox_request)
|
||||
{
|
||||
#ifdef LOG_REQUEST_ALLOCATION
|
||||
debuglog("Sandbox Request %lu: of %s:%d\n", sandbox_request->id, sandbox_request->module->name,
|
||||
sandbox_request->module->port);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocates a new Sandbox Request and places it on the Global Deque
|
||||
* @param module the module we want to request
|
||||
* @param socket_descriptor
|
||||
* @param socket_address
|
||||
* @param request_arrival_timestamp the timestamp of when we receives the request from the network (in cycles)
|
||||
* @return the new sandbox request
|
||||
*/
|
||||
static inline struct sandbox_request *
|
||||
sandbox_request_allocate(struct module *module, int socket_descriptor, const struct sockaddr *socket_address,
|
||||
uint64_t request_arrival_timestamp, uint64_t admissions_estimate)
|
||||
{
|
||||
struct sandbox_request *sandbox_request = (struct sandbox_request *)malloc(sizeof(struct sandbox_request));
|
||||
assert(sandbox_request);
|
||||
|
||||
/* Sets the ID to the value before the increment */
|
||||
sandbox_request->id = sandbox_request_count_postfix_increment();
|
||||
|
||||
sandbox_request->module = module;
|
||||
sandbox_request->socket_descriptor = socket_descriptor;
|
||||
memcpy(&sandbox_request->socket_address, socket_address, sizeof(struct sockaddr));
|
||||
sandbox_request->request_arrival_timestamp = request_arrival_timestamp;
|
||||
sandbox_request->absolute_deadline = request_arrival_timestamp + module->relative_deadline;
|
||||
|
||||
/*
|
||||
* Admissions Control State
|
||||
* Assumption: an estimate of 0 should have been interpreted as a rejection
|
||||
*/
|
||||
assert(admissions_estimate != 0);
|
||||
sandbox_request->admissions_estimate = admissions_estimate;
|
||||
|
||||
sandbox_request_log_allocation(sandbox_request);
|
||||
|
||||
return sandbox_request;
|
||||
}
|
@ -1,74 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "current_sandbox.h"
|
||||
#include "http.h"
|
||||
#include "http_total.h"
|
||||
#include "likely.h"
|
||||
#include "sandbox_types.h"
|
||||
#include "scheduler.h"
|
||||
#include "panic.h"
|
||||
|
||||
/**
|
||||
* Sends Response Back to Client
|
||||
* @return RC. -1 on Failure
|
||||
*/
|
||||
static inline int
|
||||
sandbox_send_response(struct sandbox *sandbox)
|
||||
{
|
||||
assert(sandbox != NULL);
|
||||
/* Assumption: The HTTP Request Buffer immediately precedes the HTTP Response Buffer,
|
||||
* meaning that when we prepend, we are overwritting the tail of the HTTP request buffer */
|
||||
assert(sandbox->request.base + sandbox->module->max_request_size == sandbox->response.base);
|
||||
|
||||
int rc;
|
||||
|
||||
/* Determine values to template into our HTTP response */
|
||||
ssize_t response_body_size = sandbox->response.length;
|
||||
char content_length[20] = { 0 };
|
||||
sprintf(content_length, "%zu", response_body_size);
|
||||
char *module_content_type = sandbox->module->response_content_type;
|
||||
char *content_type = strlen(module_content_type) > 0 ? module_content_type : "text/plain";
|
||||
|
||||
/* Prepend HTTP Response Headers */
|
||||
size_t response_header_size = http_response_200_size(content_type, content_length);
|
||||
char * response_header = sandbox->response.base - response_header_size;
|
||||
rc = http_response_200(response_header, content_type, content_length);
|
||||
if (rc < 0) goto err;
|
||||
|
||||
/* Capture Timekeeping data for end-to-end latency */
|
||||
uint64_t end_time = __getcycles();
|
||||
sandbox->total_time = end_time - sandbox->timestamp_of.request_arrival;
|
||||
|
||||
/* Send HTTP Response */
|
||||
int sent = 0;
|
||||
size_t response_size = response_header_size + response_body_size;
|
||||
while (sent < response_size) {
|
||||
rc = write(sandbox->client_socket_descriptor, response_header, response_size - sent);
|
||||
if (rc < 0) {
|
||||
if (errno == EAGAIN)
|
||||
current_sandbox_sleep();
|
||||
else {
|
||||
perror("write");
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
sent += rc;
|
||||
}
|
||||
|
||||
http_total_increment_2xx();
|
||||
rc = 0;
|
||||
|
||||
done:
|
||||
return rc;
|
||||
err:
|
||||
rc = -1;
|
||||
goto done;
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
#pragma once
|
||||
|
||||
#include <assert.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "arch/context.h"
|
||||
#include "current_sandbox.h"
|
||||
#include "ps_list.h"
|
||||
#include "sandbox_state_history.h"
|
||||
#include "sandbox_types.h"
|
||||
|
||||
/**
|
||||
* Transitions a sandbox to the SANDBOX_ALLOCATED state.
|
||||
* This the is the initial state, so there is no concept of "last state" here
|
||||
* @param sandbox
|
||||
*/
|
||||
static inline void
|
||||
sandbox_set_as_allocated(struct sandbox *sandbox)
|
||||
{
|
||||
assert(sandbox);
|
||||
assert(sandbox->state == SANDBOX_UNINITIALIZED);
|
||||
uint64_t now = __getcycles();
|
||||
|
||||
/* State Change Bookkeeping */
|
||||
sandbox->timestamp_of.last_state_change = now;
|
||||
sandbox_state_history_init(&sandbox->state_history);
|
||||
sandbox_state_history_append(&sandbox->state_history, SANDBOX_ALLOCATED);
|
||||
sandbox_state_totals_increment(SANDBOX_ALLOCATED);
|
||||
}
|
@ -0,0 +1,19 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdatomic.h>
|
||||
#include <stdint.h>
|
||||
|
||||
/* Count of the total number of requests we've ever allocated. Never decrements as it is used to generate IDs */
|
||||
extern _Atomic uint32_t sandbox_total;
|
||||
|
||||
static inline void
|
||||
sandbox_total_initialize()
|
||||
{
|
||||
atomic_init(&sandbox_total, 0);
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
sandbox_total_postfix_increment()
|
||||
{
|
||||
return atomic_fetch_add(&sandbox_total, 1);
|
||||
}
|
@ -0,0 +1,113 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
struct vec_u8 {
|
||||
size_t length;
|
||||
size_t capacity;
|
||||
uint8_t *buffer;
|
||||
};
|
||||
|
||||
static inline struct vec_u8 *vec_u8_alloc(void);
|
||||
static inline int vec_u8_init(struct vec_u8 *self, size_t capacity);
|
||||
static inline struct vec_u8 *vec_u8_new(size_t capacity);
|
||||
static inline void vec_u8_deinit(struct vec_u8 *self);
|
||||
static inline void vec_u8_free(struct vec_u8 *self);
|
||||
static inline void vec_u8_delete(struct vec_u8 *self);
|
||||
|
||||
/**
|
||||
* Allocates an uninitialized vec on the heap'
|
||||
* @returns a pointer to an uninitialized vec on the heap
|
||||
*/
|
||||
static inline struct vec_u8 *
|
||||
vec_u8_alloc(void)
|
||||
{
|
||||
return (struct vec_u8 *)calloc(1, sizeof(struct vec_u8));
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes a vec, allocating a backing buffer for the provided capcity
|
||||
* @param self pointer to an uninitialized vec
|
||||
* @param capacity
|
||||
* @returns 0 on success, -1 on failure
|
||||
*/
|
||||
static inline int
|
||||
vec_u8_init(struct vec_u8 *self, size_t capacity)
|
||||
{
|
||||
if (capacity == 0) {
|
||||
self->buffer = NULL;
|
||||
} else {
|
||||
self->buffer = calloc(capacity, sizeof(uint8_t));
|
||||
if (self->buffer == NULL) return -1;
|
||||
}
|
||||
|
||||
self->length = 0;
|
||||
self->capacity = capacity;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate and initialize a vec with a backing buffer
|
||||
* @param capacity
|
||||
* @returns a pointer to an initialized vec on the heap, ready for use
|
||||
*/
|
||||
static inline struct vec_u8 *
|
||||
vec_u8_new(size_t capacity)
|
||||
{
|
||||
struct vec_u8 *self = vec_u8_alloc();
|
||||
if (self == NULL) return self;
|
||||
|
||||
int rc = vec_u8_init(self, capacity);
|
||||
if (rc < 0) {
|
||||
vec_u8_free(self);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deinitialize a vec, clearing out members and releasing the backing buffer
|
||||
* @param self
|
||||
*/
|
||||
static inline void
|
||||
vec_u8_deinit(struct vec_u8 *self)
|
||||
{
|
||||
if (self->capacity == 0) {
|
||||
assert(self->buffer == NULL);
|
||||
assert(self->length == 0);
|
||||
return;
|
||||
}
|
||||
|
||||
assert(self->buffer != NULL);
|
||||
free(self->buffer);
|
||||
self->buffer = NULL;
|
||||
self->length = 0;
|
||||
self->capacity = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Frees a vec struct allocated on the heap
|
||||
* Assumes that the vec has already been deinitialized
|
||||
*/
|
||||
static inline void
|
||||
vec_u8_free(struct vec_u8 *self)
|
||||
{
|
||||
assert(self->buffer == NULL);
|
||||
assert(self->length == 0);
|
||||
assert(self->capacity == 0);
|
||||
free(self);
|
||||
}
|
||||
|
||||
/**
|
||||
* Deinitializes and frees a vec allocated to the heap
|
||||
* @param self
|
||||
*/
|
||||
static inline void
|
||||
vec_u8_delete(struct vec_u8 *self)
|
||||
{
|
||||
vec_u8_deinit(self);
|
||||
vec_u8_free(self);
|
||||
}
|
@ -0,0 +1,363 @@
|
||||
#pragma once
|
||||
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include "types.h" /* PAGE_SIZE */
|
||||
#include "wasm_types.h"
|
||||
|
||||
#define WASM_MEMORY_MAX (size_t) UINT32_MAX + 1
|
||||
#define WASM_MEMORY_SIZE_TO_ALLOC ((size_t)WASM_MEMORY_MAX + /* guard page */ PAGE_SIZE)
|
||||
|
||||
struct wasm_memory {
|
||||
size_t size; /* Initial Size in bytes */
|
||||
size_t capacity; /* Size backed by actual pages */
|
||||
size_t max; /* Soft cap in bytes. Defaults to 4GB */
|
||||
uint8_t *buffer;
|
||||
};
|
||||
|
||||
static INLINE struct wasm_memory *wasm_memory_alloc(void);
|
||||
static INLINE int wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max);
|
||||
static INLINE struct wasm_memory *wasm_memory_new(size_t initial, size_t max);
|
||||
static INLINE void wasm_memory_deinit(struct wasm_memory *self);
|
||||
static INLINE void wasm_memory_free(struct wasm_memory *self);
|
||||
static INLINE void wasm_memory_delete(struct wasm_memory *self);
|
||||
|
||||
|
||||
static INLINE struct wasm_memory *
|
||||
wasm_memory_alloc(void)
|
||||
{
|
||||
return malloc(sizeof(struct wasm_memory));
|
||||
}
|
||||
|
||||
static INLINE int
|
||||
wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max)
|
||||
{
|
||||
assert(self != NULL);
|
||||
|
||||
/* We assume WASI modules, which are required to declare and export a linear memory with a non-zero size to
|
||||
* allow a standard lib to initialize. Technically, a WebAssembly module that exports pure functions may not use
|
||||
* a linear memory */
|
||||
assert(initial > 0);
|
||||
assert(initial <= (size_t)UINT32_MAX + 1);
|
||||
assert(max > 0);
|
||||
assert(max <= (size_t)UINT32_MAX + 1);
|
||||
|
||||
/* Allocate buffer of contiguous virtual addresses for full wasm32 linear memory and guard page */
|
||||
self->buffer = mmap(NULL, WASM_MEMORY_SIZE_TO_ALLOC, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (self->buffer == MAP_FAILED) return -1;
|
||||
|
||||
/* Set the initial bytes to read / write */
|
||||
int rc = mprotect(self->buffer, initial, PROT_READ | PROT_WRITE);
|
||||
if (rc != 0) {
|
||||
munmap(self->buffer, WASM_MEMORY_SIZE_TO_ALLOC);
|
||||
return -1;
|
||||
}
|
||||
|
||||
self->size = initial;
|
||||
self->capacity = initial;
|
||||
self->max = max;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static INLINE struct wasm_memory *
|
||||
wasm_memory_new(size_t initial, size_t max)
|
||||
{
|
||||
struct wasm_memory *self = wasm_memory_alloc();
|
||||
if (self == NULL) return self;
|
||||
|
||||
int rc = wasm_memory_init(self, initial, max);
|
||||
if (rc < 0) {
|
||||
assert(0);
|
||||
wasm_memory_free(self);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
static INLINE void
|
||||
wasm_memory_deinit(struct wasm_memory *self)
|
||||
{
|
||||
assert(self != NULL);
|
||||
assert(self->buffer != NULL);
|
||||
|
||||
munmap(self->buffer, WASM_MEMORY_SIZE_TO_ALLOC);
|
||||
self->buffer = NULL;
|
||||
self->size = 0;
|
||||
self->capacity = 0;
|
||||
self->max = 0;
|
||||
}
|
||||
|
||||
static INLINE void
|
||||
wasm_memory_free(struct wasm_memory *self)
|
||||
{
|
||||
assert(self != NULL);
|
||||
/* Assume prior deinitialization so we don't leak buffers */
|
||||
assert(self->buffer == NULL);
|
||||
|
||||
free(self);
|
||||
}
|
||||
|
||||
static INLINE void
|
||||
wasm_memory_delete(struct wasm_memory *self)
|
||||
{
|
||||
assert(self != NULL);
|
||||
|
||||
wasm_memory_deinit(self);
|
||||
wasm_memory_free(self);
|
||||
}
|
||||
|
||||
static INLINE void
|
||||
wasm_memory_wipe(struct wasm_memory *self)
|
||||
{
|
||||
memset(self->buffer, 0, self->size);
|
||||
}
|
||||
|
||||
static INLINE int
|
||||
wasm_memory_expand(struct wasm_memory *self, size_t size_to_expand)
|
||||
{
|
||||
size_t target_size = self->size + size_to_expand;
|
||||
if (unlikely(target_size > self->max)) {
|
||||
fprintf(stderr, "wasm_memory_expand - Out of Memory!. %lu out of %lu\n", self->size, self->max);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* If recycling a wasm_memory from an object pool, a previous execution may have already expanded to or what
|
||||
* beyond what we need. The capacity represents the "high water mark" of previous executions. If the desired
|
||||
* size is less than this "high water mark," we just need to update size for accounting purposes. Otherwise, we
|
||||
* need to actually issue an mprotect syscall. The goal of these optimizations is to reduce mmap and demand
|
||||
* paging overhead for repeated instantiations of a WebAssembly module. */
|
||||
if (target_size > self->capacity) {
|
||||
int rc = mprotect(self->buffer, target_size, PROT_READ | PROT_WRITE);
|
||||
if (rc != 0) {
|
||||
perror("wasm_memory_expand mprotect");
|
||||
return -1;
|
||||
}
|
||||
|
||||
self->capacity = target_size;
|
||||
}
|
||||
|
||||
self->size = target_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static INLINE void
|
||||
wasm_memory_set_size(struct wasm_memory *self, size_t size)
|
||||
{
|
||||
self->size = size;
|
||||
}
|
||||
|
||||
static INLINE size_t
|
||||
wasm_memory_get_size(struct wasm_memory *self)
|
||||
{
|
||||
return self->size;
|
||||
}
|
||||
|
||||
static INLINE void
|
||||
wasm_memory_initialize_region(struct wasm_memory *self, uint32_t offset, uint32_t region_size, uint8_t region[])
|
||||
{
|
||||
assert((size_t)offset + region_size <= self->size);
|
||||
memcpy(&self->buffer[offset], region, region_size);
|
||||
}
|
||||
|
||||
/* NOTE: These wasm_memory functions require pointer dereferencing. For this reason, they are not directly by wasm32
|
||||
* instructions. These functions are intended to be used by the runtime to interacts with linear memories. */
|
||||
|
||||
/**
|
||||
* Translates WASM offsets into runtime VM pointers
|
||||
* @param offset an offset into the WebAssembly linear memory
|
||||
* @param bounds_check the size of the thing we are pointing to
|
||||
* @return void pointer to something in WebAssembly linear memory
|
||||
*/
|
||||
static INLINE void *
|
||||
wasm_memory_get_ptr_void(struct wasm_memory *self, uint32_t offset, uint32_t size)
|
||||
{
|
||||
assert(offset + size <= self->size);
|
||||
return (void *)&self->buffer[offset];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an ASCII character from WebAssembly linear memory
|
||||
* @param offset an offset into the WebAssembly linear memory
|
||||
* @return char at the offset
|
||||
*/
|
||||
static INLINE char
|
||||
wasm_memory_get_char(struct wasm_memory *self, uint32_t offset)
|
||||
{
|
||||
assert(offset + sizeof(char) <= self->size);
|
||||
return *(char *)&self->buffer[offset];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an float from WebAssembly linear memory
|
||||
* @param offset an offset into the WebAssembly linear memory
|
||||
* @return float at the offset
|
||||
*/
|
||||
static INLINE float
|
||||
wasm_memory_get_f32(struct wasm_memory *self, uint32_t offset)
|
||||
{
|
||||
assert(offset + sizeof(float) <= self->size);
|
||||
return *(float *)&self->buffer[offset];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a double from WebAssembly linear memory
|
||||
* @param offset an offset into the WebAssembly linear memory
|
||||
* @return double at the offset
|
||||
*/
|
||||
static INLINE double
|
||||
wasm_memory_get_f64(struct wasm_memory *self, uint32_t offset)
|
||||
{
|
||||
assert(offset + sizeof(double) <= self->size);
|
||||
return *(double *)&self->buffer[offset];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a int8_t from WebAssembly linear memory
|
||||
* @param offset an offset into the WebAssembly linear memory
|
||||
* @return int8_t at the offset
|
||||
*/
|
||||
static INLINE int8_t
|
||||
wasm_memory_get_i8(struct wasm_memory *self, uint32_t offset)
|
||||
{
|
||||
assert(offset + sizeof(int8_t) <= self->size);
|
||||
return *(int8_t *)&self->buffer[offset];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a int16_t from WebAssembly linear memory
|
||||
* @param offset an offset into the WebAssembly linear memory
|
||||
* @return int16_t at the offset
|
||||
*/
|
||||
static INLINE int16_t
|
||||
wasm_memory_get_i16(struct wasm_memory *self, uint32_t offset)
|
||||
{
|
||||
assert(offset + sizeof(int16_t) <= self->size);
|
||||
return *(int16_t *)&self->buffer[offset];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a int32_t from WebAssembly linear memory
|
||||
* @param offset an offset into the WebAssembly linear memory
|
||||
* @return int32_t at the offset
|
||||
*/
|
||||
static INLINE int32_t
|
||||
wasm_memory_get_i32(struct wasm_memory *self, uint32_t offset)
|
||||
{
|
||||
assert(offset + sizeof(int32_t) <= self->size);
|
||||
return *(int32_t *)&self->buffer[offset];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a int32_t from WebAssembly linear memory
|
||||
* @param offset an offset into the WebAssembly linear memory
|
||||
* @return int32_t at the offset
|
||||
*/
|
||||
static INLINE int64_t
|
||||
wasm_memory_get_i64(struct wasm_memory *self, uint32_t offset)
|
||||
{
|
||||
assert(offset + sizeof(int64_t) <= self->size);
|
||||
return *(int64_t *)&self->buffer[offset];
|
||||
}
|
||||
|
||||
static INLINE uint32_t
|
||||
wasm_memory_get_page_count(struct wasm_memory *self)
|
||||
{
|
||||
return (uint32_t)(self->size / WASM_PAGE_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a null-terminated String from WebAssembly linear memory
|
||||
* @param offset an offset into the WebAssembly linear memory
|
||||
* @param size the maximum expected length in characters
|
||||
* @return pointer to the string or NULL if max_length is reached without finding null-terminator
|
||||
*/
|
||||
static INLINE char *
|
||||
wasm_memory_get_string(struct wasm_memory *self, uint32_t offset, uint32_t size)
|
||||
{
|
||||
assert(offset + (sizeof(char) * size) <= self->size);
|
||||
|
||||
if (strnlen((const char *)&self->buffer[offset], size) < size) {
|
||||
return (char *)&self->buffer[offset];
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a float to WebAssembly linear memory
|
||||
* @param offset an offset into the WebAssembly linear memory
|
||||
* @return float at the offset
|
||||
*/
|
||||
static INLINE void
|
||||
wasm_memory_set_f32(struct wasm_memory *self, uint32_t offset, float value)
|
||||
{
|
||||
assert(offset + sizeof(float) <= self->size);
|
||||
*(float *)&self->buffer[offset] = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a double to WebAssembly linear memory
|
||||
* @param offset an offset into the WebAssembly linear memory
|
||||
* @return double at the offset
|
||||
*/
|
||||
static INLINE void
|
||||
wasm_memory_set_f64(struct wasm_memory *self, uint32_t offset, double value)
|
||||
{
|
||||
assert(offset + sizeof(double) <= self->size);
|
||||
*(double *)&self->buffer[offset] = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a int8_t to WebAssembly linear memory
|
||||
* @param offset an offset into the WebAssembly linear memory
|
||||
* @return int8_t at the offset
|
||||
*/
|
||||
static INLINE void
|
||||
wasm_memory_set_i8(struct wasm_memory *self, uint32_t offset, int8_t value)
|
||||
{
|
||||
assert(offset + sizeof(int8_t) <= self->size);
|
||||
*(int8_t *)&self->buffer[offset] = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a int16_t to WebAssembly linear memory
|
||||
* @param offset an offset into the WebAssembly linear memory
|
||||
* @return int16_t at the offset
|
||||
*/
|
||||
static INLINE void
|
||||
wasm_memory_set_i16(struct wasm_memory *self, uint32_t offset, int16_t value)
|
||||
{
|
||||
assert(offset + sizeof(int16_t) <= self->size);
|
||||
*(int16_t *)&self->buffer[offset] = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a int32_t to WebAssembly linear memory
|
||||
* @param offset an offset into the WebAssembly linear memory
|
||||
* @return int32_t at the offset
|
||||
*/
|
||||
static INLINE void
|
||||
wasm_memory_set_i32(struct wasm_memory *self, uint32_t offset, int32_t value)
|
||||
{
|
||||
assert(offset + sizeof(int32_t) <= self->size);
|
||||
*(int32_t *)&self->buffer[offset] = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a int64_t to WebAssembly linear memory
|
||||
* @param offset an offset into the WebAssembly linear memory
|
||||
* @return int64_t at the offset
|
||||
*/
|
||||
static INLINE void
|
||||
wasm_memory_set_i64(struct wasm_memory *self, uint64_t offset, int64_t value)
|
||||
{
|
||||
assert(offset + sizeof(int64_t) <= self->size);
|
||||
*(int64_t *)&self->buffer[offset] = value;
|
||||
}
|
@ -0,0 +1,12 @@
|
||||
#pragma once
|
||||
#include "wasm_memory.h"
|
||||
#include "wasm_table.h"
|
||||
|
||||
/* This structure is the runtime representation of the unique state of a module instance
|
||||
* Currently this is not spec-compliant, as it only supports a single table and a single memory and it excludes many
|
||||
* entities https://webassembly.github.io/spec/core/exec/runtime.html#module-instances
|
||||
*/
|
||||
struct wasm_module_instance {
|
||||
struct wasm_memory memory;
|
||||
struct wasm_table *table;
|
||||
};
|
@ -0,0 +1,69 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include "sandbox_types.h"
|
||||
#include "types.h"
|
||||
|
||||
struct wasm_stack {
|
||||
size_t capacity; /* Usable capacity. Excludes size of guard page that we need to free */
|
||||
uint8_t *high; /* The highest address of the stack. Grows down from here */
|
||||
uint8_t *low; /* The address of the lowest usabe address. Above guard page */
|
||||
uint8_t *buffer; /* Points to Guard Page */
|
||||
};
|
||||
|
||||
/**
|
||||
* Allocates a static sized stack for a sandbox with a guard page underneath
|
||||
* Because a stack grows down, this protects against stack overflow
|
||||
* TODO: Should this use MAP_GROWSDOWN to enable demand paging for the stack?
|
||||
* @param sandbox sandbox that we want to allocate a stack for
|
||||
* @returns 0 on success, -1 on error
|
||||
*/
|
||||
static INLINE int
|
||||
wasm_stack_allocate(struct wasm_stack *stack, size_t capacity)
|
||||
{
|
||||
assert(stack);
|
||||
|
||||
int rc = 0;
|
||||
|
||||
stack->buffer = (uint8_t *)mmap(NULL, /* guard page */ PAGE_SIZE + capacity, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (unlikely(stack->buffer == MAP_FAILED)) {
|
||||
perror("sandbox allocate stack");
|
||||
goto err_stack_allocation_failed;
|
||||
}
|
||||
|
||||
stack->low = (uint8_t *)mmap(stack->buffer + /* guard page */ PAGE_SIZE, capacity, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
|
||||
if (unlikely(stack->low == MAP_FAILED)) {
|
||||
perror("sandbox set stack read/write");
|
||||
goto err_stack_prot_failed;
|
||||
}
|
||||
|
||||
stack->capacity = capacity;
|
||||
stack->high = stack->low + capacity;
|
||||
|
||||
rc = 0;
|
||||
done:
|
||||
return rc;
|
||||
err_stack_prot_failed:
|
||||
rc = munmap(stack->buffer, PAGE_SIZE + capacity);
|
||||
if (rc == -1) perror("munmap");
|
||||
err_stack_allocation_failed:
|
||||
stack->buffer = NULL;
|
||||
rc = -1;
|
||||
goto done;
|
||||
}
|
||||
|
||||
static INLINE void
|
||||
wasm_stack_free(struct wasm_stack *stack)
|
||||
{
|
||||
assert(stack != NULL);
|
||||
assert(stack->buffer != NULL);
|
||||
/* The stack start is the bottom of the usable stack, but we allocated a guard page below this */
|
||||
int rc = munmap(stack->buffer, stack->capacity + PAGE_SIZE);
|
||||
stack->buffer = NULL;
|
||||
if (unlikely(rc == -1)) perror("munmap");
|
||||
}
|
@ -0,0 +1,118 @@
|
||||
#pragma once
|
||||
|
||||
#include <assert.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "types.h"
|
||||
|
||||
/* memory also provides the table access functions */
|
||||
#define INDIRECT_TABLE_SIZE (1 << 10)
|
||||
|
||||
struct wasm_table_entry {
|
||||
uint32_t type_id;
|
||||
void * func_pointer;
|
||||
};
|
||||
|
||||
struct wasm_table {
|
||||
uint32_t length;
|
||||
uint32_t capacity;
|
||||
struct wasm_table_entry *buffer;
|
||||
};
|
||||
|
||||
static INLINE struct wasm_table *wasm_table_alloc(void);
|
||||
static INLINE int wasm_table_init(struct wasm_table *self, size_t capacity);
|
||||
static INLINE struct wasm_table *wasm_table_new(size_t capacity);
|
||||
static INLINE void wasm_table_deinit(struct wasm_table *self);
|
||||
static INLINE void wasm_table_free(struct wasm_table *self);
|
||||
static INLINE void wasm_table_delete(struct wasm_table *self);
|
||||
|
||||
static INLINE struct wasm_table *
|
||||
wasm_table_alloc(void)
|
||||
{
|
||||
return (struct wasm_table *)malloc(sizeof(struct wasm_table));
|
||||
}
|
||||
|
||||
static INLINE int
|
||||
wasm_table_init(struct wasm_table *self, size_t capacity)
|
||||
{
|
||||
assert(self != NULL);
|
||||
|
||||
if (capacity > 0) {
|
||||
self->buffer = calloc(capacity, sizeof(struct wasm_table_entry));
|
||||
if (self->buffer == NULL) return -1;
|
||||
}
|
||||
|
||||
self->capacity = capacity;
|
||||
self->length = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static INLINE struct wasm_table *
|
||||
wasm_table_new(size_t capacity)
|
||||
{
|
||||
struct wasm_table *self = wasm_table_alloc();
|
||||
if (self == NULL) return NULL;
|
||||
|
||||
int rc = wasm_table_init(self, capacity);
|
||||
if (rc < 0) {
|
||||
wasm_table_free(self);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
static INLINE void
|
||||
wasm_table_deinit(struct wasm_table *self)
|
||||
{
|
||||
assert(self != NULL);
|
||||
|
||||
if (self->capacity > 0) {
|
||||
assert(self->buffer == NULL);
|
||||
assert(self->length == 0);
|
||||
return;
|
||||
}
|
||||
|
||||
assert(self->buffer != NULL);
|
||||
free(self->buffer);
|
||||
self->buffer = NULL;
|
||||
self->length = 0;
|
||||
self->capacity = 0;
|
||||
}
|
||||
|
||||
static INLINE void
|
||||
wasm_table_free(struct wasm_table *self)
|
||||
{
|
||||
assert(self != NULL);
|
||||
free(self);
|
||||
}
|
||||
|
||||
static INLINE void *
|
||||
wasm_table_get(struct wasm_table *self, uint32_t idx, uint32_t type_id)
|
||||
{
|
||||
assert(self != NULL);
|
||||
assert(idx < self->capacity);
|
||||
|
||||
struct wasm_table_entry f = self->buffer[idx];
|
||||
// FIXME: Commented out function type check because of gocr
|
||||
// assert(f.type_id == type_id);
|
||||
|
||||
assert(f.func_pointer != NULL);
|
||||
|
||||
return f.func_pointer;
|
||||
}
|
||||
|
||||
static INLINE void
|
||||
wasm_table_set(struct wasm_table *self, uint32_t idx, uint32_t type_id, char *pointer)
|
||||
{
|
||||
assert(self != NULL);
|
||||
assert(idx < self->capacity);
|
||||
assert(pointer != NULL);
|
||||
|
||||
/* TODO: atomic for multiple concurrent invocations? Issue #97 */
|
||||
if (self->buffer[idx].type_id == type_id && self->buffer[idx].func_pointer == pointer) return;
|
||||
|
||||
self->buffer[idx] = (struct wasm_table_entry){ .type_id = type_id, .func_pointer = pointer };
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "current_sandbox.h"
|
||||
#include "wasm_module_instance.h"
|
||||
#include "wasm_memory.h"
|
||||
|
||||
thread_local struct wasm_module_instance current_wasm_module_instance = {
|
||||
.memory =
|
||||
(struct wasm_memory){
|
||||
.size = 0,
|
||||
.max = 0,
|
||||
.capacity = 0,
|
||||
.buffer = NULL,
|
||||
},
|
||||
.table = NULL,
|
||||
};
|
||||
|
||||
/**
|
||||
* Because we copy the members of a sandbox when it is set to current_sandbox, current_wasm_module_instance acts as a
|
||||
* cache. If we change state by doing something like expanding a member, we have to perform writeback on the sandbox
|
||||
* member that we copied from.
|
||||
*/
|
||||
void
|
||||
current_wasm_module_instance_memory_writeback(void)
|
||||
{
|
||||
struct sandbox *current_sandbox = current_sandbox_get();
|
||||
memcpy(current_sandbox->memory, ¤t_wasm_module_instance.memory, sizeof(struct wasm_memory));
|
||||
}
|
@ -1,90 +0,0 @@
|
||||
#include "current_sandbox.h"
|
||||
#include "panic.h"
|
||||
#include "runtime.h"
|
||||
#include "sandbox_types.h"
|
||||
#include "types.h"
|
||||
|
||||
#include <sys/mman.h>
|
||||
|
||||
/**
|
||||
* @brief Expand the linear memory of the active WebAssembly sandbox by a single page
|
||||
*
|
||||
* @return int
|
||||
*/
|
||||
int
|
||||
expand_memory(void)
|
||||
{
|
||||
struct sandbox *sandbox = current_sandbox_get();
|
||||
|
||||
assert(sandbox->state == SANDBOX_RUNNING_USER || sandbox->state == SANDBOX_RUNNING_SYS);
|
||||
assert(local_sandbox_context_cache.memory.size % WASM_PAGE_SIZE == 0);
|
||||
|
||||
/* Return -1 if we've hit the linear memory max */
|
||||
if (unlikely(local_sandbox_context_cache.memory.size + WASM_PAGE_SIZE
|
||||
>= local_sandbox_context_cache.memory.max)) {
|
||||
debuglog("expand_memory - Out of Memory!. %u out of %lu\n", local_sandbox_context_cache.memory.size,
|
||||
local_sandbox_context_cache.memory.max);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Remap the relevant wasm page to readable
|
||||
char *mem_as_chars = local_sandbox_context_cache.memory.start;
|
||||
char *page_address = &mem_as_chars[local_sandbox_context_cache.memory.size];
|
||||
void *map_result = mmap(page_address, WASM_PAGE_SIZE, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
|
||||
if (map_result == MAP_FAILED) {
|
||||
debuglog("Mapping of new memory failed");
|
||||
return -1;
|
||||
}
|
||||
|
||||
local_sandbox_context_cache.memory.size += WASM_PAGE_SIZE;
|
||||
|
||||
#ifdef LOG_SANDBOX_MEMORY_PROFILE
|
||||
// Cache the runtime of the first N page allocations
|
||||
if (likely(sandbox->timestamp_of.page_allocations_size < SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT)) {
|
||||
sandbox->timestamp_of.page_allocations[sandbox->timestamp_of.page_allocations_size++] =
|
||||
sandbox->duration_of_state.running
|
||||
+ (uint32_t)(__getcycles() - sandbox->timestamp_of.last_state_change);
|
||||
}
|
||||
#endif
|
||||
|
||||
// local_sandbox_context_cache is "forked state", so update authoritative member
|
||||
sandbox->memory.size = local_sandbox_context_cache.memory.size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
INLINE char *
|
||||
get_memory_ptr_for_runtime(uint32_t offset, uint32_t bounds_check)
|
||||
{
|
||||
// Due to how we setup memory for x86, the virtual memory mechanism will catch the error, if bounds <
|
||||
// WASM_PAGE_SIZE
|
||||
assert(bounds_check < WASM_PAGE_SIZE
|
||||
|| (local_sandbox_context_cache.memory.size > bounds_check
|
||||
&& offset <= local_sandbox_context_cache.memory.size - bounds_check));
|
||||
|
||||
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
|
||||
char *address = &mem_as_chars[offset];
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Stub that implements the WebAssembly memory.grow instruction
|
||||
*
|
||||
* @param count number of pages to grow the WebAssembly linear memory by
|
||||
* @return The previous size of the linear memory in pages or -1 if enough memory cannot be allocated
|
||||
*/
|
||||
int32_t
|
||||
instruction_memory_grow(uint32_t count)
|
||||
{
|
||||
int rc = local_sandbox_context_cache.memory.size / WASM_PAGE_SIZE;
|
||||
|
||||
for (int i = 0; i < count; i++) {
|
||||
if (unlikely(expand_memory() != 0)) {
|
||||
rc = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "runtime.h"
|
||||
#include "types.h"
|
||||
|
||||
/* Region initialization helper function */
|
||||
EXPORT void
|
||||
initialize_region(uint32_t offset, uint32_t data_count, char *data)
|
||||
{
|
||||
assert(local_sandbox_context_cache.memory.size >= data_count);
|
||||
assert(offset < local_sandbox_context_cache.memory.size - data_count);
|
||||
|
||||
memcpy(get_memory_ptr_for_runtime(offset, data_count), data, data_count);
|
||||
}
|
||||
|
||||
/* If we are using runtime globals, we need to populate them */
|
||||
WEAK void
|
||||
populate_globals()
|
||||
{
|
||||
assert(0); /* FIXME: is this used in WASM as dynamic modules? Issue #105. */
|
||||
}
|
@ -1,3 +0,0 @@
|
||||
#include "sandbox_request.h"
|
||||
|
||||
_Atomic uint32_t sandbox_request_count = 0;
|
Loading…
Reference in new issue