Merge pull request #287 from gwsystems/spec-alignment-2

feat: WIP restage of memory allocation features
master
Sean McBride 3 years ago committed by GitHub
commit af016f2a40
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -80,7 +80,6 @@
"sandbox_set_as_runnable.h": "c",
"sandbox_set_as_complete.h": "c",
"deque.h": "c",
"sandbox_request.h": "c",
"sandbox_send_response.h": "c",
"sandbox_setup_arguments.h": "c",
"worker_thread.h": "c",
@ -101,7 +100,9 @@
"scheduler.h": "c",
"sandbox_set_as_returned.h": "c",
"software_interrupt_counts.h": "c",
"sandbox_set_as_running_sys.h": "c"
"sandbox_set_as_running_sys.h": "c",
"wasm_module_instance.h": "c",
"wasm_table.h": "c"
},
"files.exclude": {
"**/.git": true,

@ -55,7 +55,7 @@ BINARY_NAME=sledgert
# CFLAGS += -DLOG_LOCK_OVERHEAD
# CFLAGS += -DLOG_MODULE_LOADING
# CFLAGS += -DLOG_PREEMPTION
# CFLAGS += -DLOG_REQUEST_ALLOCATION
# CFLAGS += -DLOG_SANDBOX_ALLOCATION
# Stores and logs extended signal information for each worker
# CFLAGS += -DLOG_SOFTWARE_INTERRUPT_COUNTS
@ -73,10 +73,9 @@ BINARY_NAME=sledgert
# To log, run `call http_total_log()` while in GDB
# CFLAGS += -DLOG_TOTAL_REQS_RESPS
# This flag logs the total number of sandboxes in the various states
# This flag tracks the total number of sandboxes in the various states
# It is useful to debug if sandboxes are "getting caught" in a particular state
# To log, run `call runtime_log_sandbox_states()` while in GDB
# CFLAGS += -DLOG_SANDBOX_COUNT
# CFLAGS += -DSANDBOX_STATE_TOTALS
# This flag enables an per-worker atomic count of sandbox's local runqueue count in thread local storage
# Useful to debug if sandboxes are "getting caught" or "leaking" while in a local runqueue
@ -105,8 +104,6 @@ INCLUDES += -Iinclude/ -Ithirdparty/dist/include/
CFILES += src/*.c
CFILES += src/arch/${ARCH}/*.c
CFILES += src/libc/*.c
CFILES += src/memory/common.c
CFILES += src/memory/64bit_nix.c
CFILES += thirdparty/dist/lib/http_parser.o
# Configuring Jasmine

@ -1,78 +1,60 @@
#include <assert.h>
#include "types.h"
#include "current_wasm_module_instance.h"
uint32_t
INLINE uint32_t
instruction_memory_size()
{
return local_sandbox_context_cache.memory.size / WASM_PAGE_SIZE;
return (uint32_t)(current_wasm_module_instance.memory.size / WASM_PAGE_SIZE);
}
// All of these are pretty generic
// These functions are equivalent to those in wasm_memory.h, but they minimize pointer dereferencing
INLINE float
get_f32(uint32_t offset)
{
assert(offset + sizeof(float) <= local_sandbox_context_cache.memory.size);
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
void *address = &mem_as_chars[offset];
return *(float *)address;
assert(current_wasm_module_instance.memory.buffer != NULL);
assert(offset + sizeof(float) <= current_wasm_module_instance.memory.size);
return *(float *)&current_wasm_module_instance.memory.buffer[offset];
}
INLINE double
get_f64(uint32_t offset)
{
assert(offset + sizeof(double) <= local_sandbox_context_cache.memory.size);
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
void *address = &mem_as_chars[offset];
return *(double *)address;
assert(current_wasm_module_instance.memory.buffer != NULL);
assert(offset + sizeof(double) <= current_wasm_module_instance.memory.size);
return *(double *)&current_wasm_module_instance.memory.buffer[offset];
}
INLINE int8_t
get_i8(uint32_t offset)
{
assert(offset + sizeof(int8_t) <= local_sandbox_context_cache.memory.size);
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
void *address = &mem_as_chars[offset];
return *(int8_t *)address;
assert(current_wasm_module_instance.memory.buffer != NULL);
assert(offset + sizeof(int8_t) <= current_wasm_module_instance.memory.size);
return *(int8_t *)&current_wasm_module_instance.memory.buffer[offset];
}
INLINE int16_t
get_i16(uint32_t offset)
{
assert(offset + sizeof(int16_t) <= local_sandbox_context_cache.memory.size);
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
void *address = &mem_as_chars[offset];
return *(int16_t *)address;
assert(current_wasm_module_instance.memory.buffer != NULL);
assert(offset + sizeof(int16_t) <= current_wasm_module_instance.memory.size);
return *(int16_t *)&current_wasm_module_instance.memory.buffer[offset];
}
INLINE int32_t
get_i32(uint32_t offset)
{
assert(offset + sizeof(int32_t) <= local_sandbox_context_cache.memory.size);
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
void *address = &mem_as_chars[offset];
return *(int32_t *)address;
assert(current_wasm_module_instance.memory.buffer != NULL);
assert(offset + sizeof(int32_t) <= current_wasm_module_instance.memory.size);
return *(int32_t *)&current_wasm_module_instance.memory.buffer[offset];
}
INLINE int64_t
get_i64(uint32_t offset)
{
assert(offset + sizeof(int64_t) <= local_sandbox_context_cache.memory.size);
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
void *address = &mem_as_chars[offset];
return *(int64_t *)address;
assert(current_wasm_module_instance.memory.buffer != NULL);
assert(offset + sizeof(int64_t) <= current_wasm_module_instance.memory.size);
return *(int64_t *)&current_wasm_module_instance.memory.buffer[offset];
}
INLINE int32_t
@ -89,79 +71,101 @@ get_global_i64(uint32_t offset)
// Now setting routines
INLINE void
set_f32(uint32_t offset, float v)
set_f32(uint32_t offset, float value)
{
assert(offset + sizeof(float) <= local_sandbox_context_cache.memory.size);
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
void *address = &mem_as_chars[offset];
*(float *)address = v;
assert(current_wasm_module_instance.memory.buffer != NULL);
assert(offset + sizeof(float) <= current_wasm_module_instance.memory.size);
*(float *)&current_wasm_module_instance.memory.buffer[offset] = value;
}
INLINE void
set_f64(uint32_t offset, double v)
set_f64(uint32_t offset, double value)
{
assert(offset + sizeof(double) <= local_sandbox_context_cache.memory.size);
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
void *address = &mem_as_chars[offset];
*(double *)address = v;
assert(current_wasm_module_instance.memory.buffer != NULL);
assert(current_wasm_module_instance.memory.buffer != NULL);
assert(offset + sizeof(double) <= current_wasm_module_instance.memory.size);
*(double *)&current_wasm_module_instance.memory.buffer[offset] = value;
}
INLINE void
set_i8(uint32_t offset, int8_t v)
set_i8(uint32_t offset, int8_t value)
{
assert(offset + sizeof(int8_t) <= local_sandbox_context_cache.memory.size);
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
void *address = &mem_as_chars[offset];
*(int8_t *)address = v;
assert(current_wasm_module_instance.memory.buffer != NULL);
assert(offset + sizeof(int8_t) <= current_wasm_module_instance.memory.size);
*(int8_t *)&current_wasm_module_instance.memory.buffer[offset] = value;
}
INLINE void
set_i16(uint32_t offset, int16_t v)
set_i16(uint32_t offset, int16_t value)
{
assert(offset + sizeof(int16_t) <= local_sandbox_context_cache.memory.size);
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
void *address = &mem_as_chars[offset];
assert(current_wasm_module_instance.memory.buffer != NULL);
*(int16_t *)address = v;
assert(offset + sizeof(int16_t) <= current_wasm_module_instance.memory.size);
*(int16_t *)&current_wasm_module_instance.memory.buffer[offset] = value;
}
INLINE void
set_i32(uint32_t offset, int32_t v)
set_i32(uint32_t offset, int32_t value)
{
assert(offset + sizeof(int32_t) <= local_sandbox_context_cache.memory.size);
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
void *address = &mem_as_chars[offset];
*(int32_t *)address = v;
assert(current_wasm_module_instance.memory.buffer != NULL);
assert(offset + sizeof(int32_t) <= current_wasm_module_instance.memory.size);
*(int32_t *)&current_wasm_module_instance.memory.buffer[offset] = value;
}
INLINE void
set_i64(uint32_t offset, int64_t v)
set_i64(uint32_t offset, int64_t value)
{
assert(offset + sizeof(int64_t) <= local_sandbox_context_cache.memory.size);
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
void *address = &mem_as_chars[offset];
assert(current_wasm_module_instance.memory.buffer != NULL);
assert(offset + sizeof(int64_t) <= current_wasm_module_instance.memory.size);
*(int64_t *)&current_wasm_module_instance.memory.buffer[offset] = value;
}
*(int64_t *)address = v;
INLINE void
set_global_i32(uint32_t offset, int32_t value)
{
set_i32(offset, value);
}
INLINE void
set_global_i32(uint32_t offset, int32_t v)
set_global_i64(uint32_t offset, int64_t value)
{
set_i32(offset, v);
set_i64(offset, value);
}
/**
* @brief Stub that implements the WebAssembly memory.grow instruction
*
* @param count number of pages to grow the WebAssembly linear memory by
* @return The previous size of the linear memory in pages or -1 if enough memory cannot be allocated
*/
INLINE int32_t
instruction_memory_grow(uint32_t count)
{
int old_page_count = current_wasm_module_instance.memory.size / WASM_PAGE_SIZE;
/* Return -1 if we've hit the linear memory max */
int rc = wasm_memory_expand(&current_wasm_module_instance.memory, WASM_PAGE_SIZE * count);
if (unlikely(rc == -1)) return -1;
/* We updated "forked state" in current_wasm_module_instance.memory. We need to write this back to persist */
current_wasm_module_instance_memory_writeback();
#ifdef LOG_SANDBOX_MEMORY_PROFILE
// Cache the runtime of the first N page allocations
for (int i = 0; i < count; i++) {
if (likely(sandbox->timestamp_of.page_allocations_size < SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT)) {
sandbox->timestamp_of.page_allocations[sandbox->timestamp_of.page_allocations_size++] =
sandbox->duration_of_state.running
+ (uint32_t)(__getcycles() - sandbox->timestamp_of.last_state_change);
}
}
#endif
return rc;
}
INLINE void
set_global_i64(uint32_t offset, int64_t v)
initialize_region(uint32_t offset, uint32_t region_size, uint8_t region[region_size])
{
set_i64(offset, v);
wasm_memory_initialize_region(&current_wasm_module_instance.memory, offset, region_size, region);
}

@ -1,42 +1,17 @@
#include <assert.h>
#include "types.h"
#include "wasm_module_instance.h"
extern thread_local struct wasm_module_instance current_wasm_module_instance;
INLINE void
add_function_to_table(uint32_t idx, uint32_t type_id, char *pointer)
{
assert(idx < INDIRECT_TABLE_SIZE);
assert(local_sandbox_context_cache.module_indirect_table != NULL);
/* TODO: atomic for multiple concurrent invocations? Issue #97 */
if (local_sandbox_context_cache.module_indirect_table[idx].type_id == type_id
&& local_sandbox_context_cache.module_indirect_table[idx].func_pointer == pointer)
return;
local_sandbox_context_cache.module_indirect_table[idx] = (struct indirect_table_entry){
.type_id = type_id, .func_pointer = pointer
};
wasm_table_set(current_wasm_module_instance.table, idx, type_id, pointer);
}
/* char * is used as a generic pointer to a function pointer */
INLINE char *
get_function_from_table(uint32_t idx, uint32_t type_id)
{
#ifdef LOG_FUNCTION_TABLE
fprintf(stderr, "get_function_from_table(idx: %u, type_id: %u)\n", idx, type_id);
fprintf(stderr, "indirect_table_size: %u\n", INDIRECT_TABLE_SIZE);
#endif
assert(idx < INDIRECT_TABLE_SIZE);
struct indirect_table_entry f = local_sandbox_context_cache.module_indirect_table[idx];
#ifdef LOG_FUNCTION_TABLE
fprintf(stderr, "assumed type: %u, type in table: %u\n", type_id, f.type_id);
#endif
// FIXME: Commented out function type check because of gocr
// assert(f.type_id == type_id);
assert(f.func_pointer != NULL);
return f.func_pointer;
return wasm_table_get(current_wasm_module_instance.table, idx, type_id);
}

@ -2,20 +2,10 @@
#include <assert.h>
#include <dlfcn.h>
#include <stdint.h>
/* Wasm initialization functions generated by the compiler */
#define AWSM_ABI_INITIALIZE_GLOBALS "populate_globals"
#define AWSM_ABI_INITIALIZE_MEMORY "populate_memory"
#define AWSM_ABI_INITIALIZE_TABLE "populate_table"
#define AWSM_ABI_INITIALIZE_LIBC "wasmf___init_libc"
#define AWSM_ABI_ENTRYPOINT "wasmf_main"
/* functions in the module to lookup and call per sandbox. */
typedef int32_t (*awsm_abi_entrypoint_fn_t)(int32_t a, int32_t b);
typedef void (*awsm_abi_init_globals_fn_t)(void);
typedef void (*awsm_abi_init_mem_fn_t)(void);
typedef void (*awsm_abi_init_tbl_fn_t)(void);
typedef void (*awsm_abi_init_libc_fn_t)(int32_t, int32_t);
#include "debuglog.h"
#include "wasm_types.h"
struct awsm_abi {
void * handle;
@ -24,6 +14,8 @@ struct awsm_abi {
awsm_abi_init_tbl_fn_t initialize_tables;
awsm_abi_init_libc_fn_t initialize_libc;
awsm_abi_entrypoint_fn_t entrypoint;
uint32_t starting_pages;
uint32_t max_pages;
};
/* Initializes the ABI object using the *.so file at path */
@ -76,6 +68,27 @@ awsm_abi_init(struct awsm_abi *abi, char *path)
goto dl_error;
}
abi->starting_pages = *(uint32_t *)dlsym(abi->handle, AWSM_ABI_STARTING_PAGES);
if (abi->starting_pages == 0) {
fprintf(stderr, "Failed to resolve symbol %s in %s with error: %s\n", AWSM_ABI_STARTING_PAGES, path,
dlerror());
goto dl_error;
}
abi->max_pages = *(uint32_t *)dlsym(abi->handle, AWSM_ABI_MAX_PAGES);
if (abi->max_pages == 0) {
/* This seems to not always be present. I assume this is only there if the source module explicitly
* specified this */
abi->max_pages = WASM_MEMORY_PAGES_MAX;
debuglog("max_pages symbols not defined. Defaulting to MAX defined by spec.\n");
// TODO: We need to prove that this actually can get generated by awsm
// fprintf(stderr, "Failed to resolve symbol %s in %s with error: %s\n", AWSM_ABI_MAX_PAGES, path,
// dlerror());
// goto dl_error;
}
done:
return rc;
dl_error:

@ -3,6 +3,7 @@
#include <assert.h>
#include <arpa/inet.h>
#include <errno.h>
#include <stdbool.h>
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
@ -32,53 +33,55 @@ client_socket_close(int client_socket, struct sockaddr *client_address)
}
}
typedef void (*void_cb)(void);
/**
* Rejects request due to admission control or error
* Writes buffer to the client socket
* @param client_socket - the client we are rejecting
* @param status_code - either 503 or 400
* @param buffer - buffer to write to socket
* @param on_eagain - cb to execute when client socket returns EAGAIN. If NULL, error out
* @returns 0 on success, -1 on error.
*/
static inline int
client_socket_send(int client_socket, int status_code)
client_socket_send(int client_socket, const char *buffer, size_t buffer_len, void_cb on_eagain)
{
const char *response;
int rc;
switch (status_code) {
case 503:
response = HTTP_RESPONSE_503_SERVICE_UNAVAILABLE;
http_total_increment_5XX();
break;
case 413:
response = HTTP_RESPONSE_413_PAYLOAD_TOO_LARGE;
http_total_increment_4XX();
break;
case 400:
response = HTTP_RESPONSE_400_BAD_REQUEST;
http_total_increment_4XX();
break;
default:
panic("%d is not a valid status code\n", status_code);
}
int rc;
size_t total_sent = 0;
size_t to_send = strlen(response);
size_t cursor = 0;
while (total_sent < to_send) {
ssize_t sent = write(client_socket, &response[total_sent], to_send - total_sent);
while (cursor < buffer_len) {
ssize_t sent = write(client_socket, &buffer[cursor], buffer_len - cursor);
if (sent < 0) {
if (errno == EAGAIN) { debuglog("Unexpectedly blocking on write of %s\n", response); }
debuglog("Error with %s\n", strerror(errno));
goto send_err;
if (errno == EAGAIN) {
if (on_eagain == NULL) {
rc = -1;
goto done;
}
on_eagain();
} else {
debuglog("Error sending to client: %s", strerror(errno));
rc = -1;
goto done;
}
}
total_sent += sent;
assert(sent > 0);
cursor += (size_t)sent;
};
rc = 0;
done:
return rc;
send_err:
debuglog("Error sending to client: %s", strerror(errno));
rc = -1;
goto done;
}
/**
* Rejects request due to admission control or error
* @param client_socket - the client we are rejecting
* @param buffer - buffer to write to socket
* @returns 0
*/
static inline int
client_socket_send_oneshot(int client_socket, const char *buffer, size_t buffer_len)
{
return client_socket_send(client_socket, buffer, buffer_len, NULL);
}

@ -3,12 +3,11 @@
#include <threads.h>
#include "sandbox_types.h"
#include "current_wasm_module_instance.h"
/* current sandbox that is active.. */
extern thread_local struct sandbox *worker_thread_current_sandbox;
extern thread_local struct sandbox_context_cache local_sandbox_context_cache;
void current_sandbox_start(void);
/**
@ -30,21 +29,21 @@ current_sandbox_set(struct sandbox *sandbox)
{
/* Unpack hierarchy to avoid pointer chasing */
if (sandbox == NULL) {
local_sandbox_context_cache = (struct sandbox_context_cache){
.memory = {
.start = NULL,
.size = 0,
.max = 0,
},
.module_indirect_table = NULL,
current_wasm_module_instance = (struct wasm_module_instance){
.memory =
(struct wasm_memory){
.size = 0,
.capacity = 0,
.max = 0,
.buffer = NULL,
},
.table = NULL,
};
worker_thread_current_sandbox = NULL;
runtime_worker_threads_deadline[worker_thread_idx] = UINT64_MAX;
} else {
local_sandbox_context_cache = (struct sandbox_context_cache){
.memory = sandbox->memory,
.module_indirect_table = sandbox->module->indirect_table,
};
memcpy(&current_wasm_module_instance.memory, sandbox->memory, sizeof(struct wasm_memory));
current_wasm_module_instance.table = sandbox->module->indirect_table,
worker_thread_current_sandbox = sandbox;
runtime_worker_threads_deadline[worker_thread_idx] = sandbox->absolute_deadline;
}
@ -52,3 +51,23 @@ current_sandbox_set(struct sandbox *sandbox)
extern void current_sandbox_sleep();
static inline void *
current_sandbox_get_ptr_void(uint32_t offset, uint32_t bounds_check)
{
assert(current_wasm_module_instance.memory.capacity > 0);
return wasm_memory_get_ptr_void(&current_wasm_module_instance.memory, offset, bounds_check);
}
static inline char
current_sandbox_get_char(uint32_t offset)
{
assert(current_wasm_module_instance.memory.capacity > 0);
return wasm_memory_get_char(&current_wasm_module_instance.memory, offset);
}
static inline char *
current_sandbox_get_string(uint32_t offset, uint32_t size)
{
return wasm_memory_get_string(&current_wasm_module_instance.memory, offset, size);
}

@ -0,0 +1,59 @@
#pragma once
#include <assert.h>
#include <errno.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include "current_sandbox.h"
#include "http.h"
#include "http_total.h"
#include "likely.h"
#include "sandbox_types.h"
#include "scheduler.h"
#include "panic.h"
/**
* Sends Response Back to Client
* @return RC. -1 on Failure
*/
static inline int
current_sandbox_send_response()
{
struct sandbox *sandbox = current_sandbox_get();
assert(sandbox != NULL);
struct vec_u8 *response = &sandbox->response;
assert(response != NULL);
int rc;
/* Determine values to template into our HTTP response */
size_t response_body_size = response->length;
char * module_content_type = sandbox->module->response_content_type;
const char *content_type = strlen(module_content_type) > 0 ? module_content_type : "text/plain";
/* Capture Timekeeping data for end-to-end latency */
uint64_t end_time = __getcycles();
sandbox->total_time = end_time - sandbox->timestamp_of.request_arrival;
/* Send HTTP Response Header and Body */
rc = http_header_200_write(sandbox->client_socket_descriptor, module_content_type, response_body_size);
if (rc < 0) goto err;
rc = client_socket_send(sandbox->client_socket_descriptor, (const char *)response->buffer, response_body_size,
current_sandbox_sleep);
if (rc < 0) goto err;
http_total_increment_2xx();
rc = 0;
done:
return rc;
err:
debuglog("Error sending to client: %s", strerror(errno));
rc = -1;
goto done;
}

@ -0,0 +1,7 @@
#pragma once
#include "wasm_module_instance.h"
extern thread_local struct wasm_module_instance current_wasm_module_instance;
extern void current_wasm_module_instance_memory_writeback(void);

@ -2,12 +2,12 @@
#include <stdint.h>
#include "sandbox_request.h"
#include "sandbox_types.h"
/* Returns pointer back if successful, null otherwise */
typedef struct sandbox_request *(*global_request_scheduler_add_fn_t)(void *);
typedef int (*global_request_scheduler_remove_fn_t)(struct sandbox_request **);
typedef int (*global_request_scheduler_remove_if_earlier_fn_t)(struct sandbox_request **, uint64_t);
typedef struct sandbox *(*global_request_scheduler_add_fn_t)(void *);
typedef int (*global_request_scheduler_remove_fn_t)(struct sandbox **);
typedef int (*global_request_scheduler_remove_if_earlier_fn_t)(struct sandbox **, uint64_t);
typedef uint64_t (*global_request_scheduler_peek_fn_t)(void);
struct global_request_scheduler_config {
@ -18,8 +18,8 @@ struct global_request_scheduler_config {
};
void global_request_scheduler_initialize(struct global_request_scheduler_config *config);
struct sandbox_request *global_request_scheduler_add(struct sandbox_request *);
int global_request_scheduler_remove(struct sandbox_request **);
int global_request_scheduler_remove_if_earlier(struct sandbox_request **, uint64_t targed_deadline);
uint64_t global_request_scheduler_peek(void);
void global_request_scheduler_initialize(struct global_request_scheduler_config *config);
struct sandbox *global_request_scheduler_add(struct sandbox *);
int global_request_scheduler_remove(struct sandbox **);
int global_request_scheduler_remove_if_earlier(struct sandbox **, uint64_t targed_deadline);
uint64_t global_request_scheduler_peek(void);

@ -1,5 +1,9 @@
#pragma once
#include "deque.h"
#include "global_request_scheduler.h"
#include "sandbox_types.h"
DEQUE_PROTOTYPE(sandbox, struct sandbox *)
void global_request_scheduler_deque_initialize();

@ -2,6 +2,9 @@
#include <string.h>
#include "http_total.h"
#include "panic.h"
#define HTTP_MAX_HEADER_COUNT 16
#define HTTP_MAX_HEADER_LENGTH 32
#define HTTP_MAX_HEADER_VALUE_LENGTH 64
@ -30,46 +33,55 @@
"Server: SLEdge\r\n" \
"Connection: close\r\n" \
"Content-Type: %s\r\n" \
"Content-Length: %s\r\n" \
"Content-Length: %lu\r\n" \
"\r\n"
/* The sum of format specifier characters in the template above */
#define HTTP_RESPONSE_200_TEMPLATE_FORMAT_SPECIFIER_LENGTH 4
#define HTTP_RESPONSE_200_TEMPLATE_FORMAT_SPECIFIER_LENGTH 5
/**
* Calculates the number of bytes of the HTTP response containing the passed header values
* @return total size in bytes
*/
static inline size_t
http_response_200_size(char *content_type, char *content_length)
static inline int
http_header_200_write(int fd, const char *content_type, size_t content_length)
{
size_t size = 0;
size += strlen(HTTP_RESPONSE_200_TEMPLATE) - HTTP_RESPONSE_200_TEMPLATE_FORMAT_SPECIFIER_LENGTH;
size += strlen(content_type);
size += strlen(content_length);
return size;
return dprintf(fd, HTTP_RESPONSE_200_TEMPLATE, content_type, content_length);
}
/**
* Writes the HTTP response header to the destination. This is assumed to have been sized
* using the value returned by http_response_200_size. We have to use an intermediate buffer
* in order to truncate off the null terminator
* @return 0 on success, -1 otherwise
*/
static inline int
http_response_200(char *destination, char *content_type, char *content_length)
static inline const char *
http_header_build(int status_code)
{
size_t response_size = http_response_200_size(content_type, content_length);
char buffer[response_size + 1];
int rc = 0;
rc = sprintf(buffer, HTTP_RESPONSE_200_TEMPLATE, content_type, content_length);
if (rc <= 0) goto err;
memmove(destination, buffer, response_size);
rc = 0;
const char *response;
int rc;
switch (status_code) {
case 503:
response = HTTP_RESPONSE_503_SERVICE_UNAVAILABLE;
http_total_increment_5XX();
break;
case 413:
response = HTTP_RESPONSE_413_PAYLOAD_TOO_LARGE;
http_total_increment_4XX();
break;
case 400:
response = HTTP_RESPONSE_400_BAD_REQUEST;
http_total_increment_4XX();
break;
default:
panic("%d is not a valid status code\n", status_code);
}
done:
return rc;
err:
rc = -1;
goto done;
return response;
}
static inline int
http_header_len(int status_code)
{
switch (status_code) {
case 503:
return strlen(HTTP_RESPONSE_503_SERVICE_UNAVAILABLE);
case 413:
return strlen(HTTP_RESPONSE_413_PAYLOAD_TOO_LARGE);
case 400:
return strlen(HTTP_RESPONSE_400_BAD_REQUEST);
default:
panic("%d is not a valid status code\n", status_code);
}
}

@ -53,8 +53,8 @@ struct module {
/* Handle and ABI Symbols for *.so file */
struct awsm_abi abi;
_Atomic uint32_t reference_count; /* ref count how many instances exist here. */
struct indirect_table_entry indirect_table[INDIRECT_TABLE_SIZE];
_Atomic uint32_t reference_count; /* ref count how many instances exist here. */
struct wasm_table *indirect_table;
};
/*************************

@ -26,7 +26,6 @@
#define RUNTIME_HTTP_RESPONSE_SIZE_MAX 100000000 /* 100 MB */
#define RUNTIME_LOG_FILE "sledge.log"
#define RUNTIME_MAX_EPOLL_EVENTS 128
#define RUNTIME_MAX_SANDBOX_REQUEST_COUNT (1 << 19)
#define RUNTIME_MAX_WORKER_COUNT 32 /* Static buffer size for per-worker globals */
#define RUNTIME_READ_WRITE_VECTOR_LENGTH 16
#define RUNTIME_RELATIVE_DEADLINE_US_MAX 3600000000 /* One Hour. Fits in uint32_t */

@ -6,17 +6,18 @@
#include "client_socket.h"
#include "panic.h"
#include "sandbox_request.h"
#include "sandbox_types.h"
/***************************
* Public API *
**************************/
struct sandbox *sandbox_allocate(struct sandbox_request *sandbox_request);
struct sandbox *sandbox_new(struct module *module, int socket_descriptor, const struct sockaddr *socket_address,
uint64_t request_arrival_timestamp, uint64_t admissions_estimate);
int sandbox_prepare_execution_environment(struct sandbox *sandbox);
void sandbox_free(struct sandbox *sandbox);
void sandbox_main(struct sandbox *sandbox);
void sandbox_switch_to(struct sandbox *next_sandbox);
static inline void
sandbox_close_http(struct sandbox *sandbox)
{
@ -35,9 +36,20 @@ sandbox_close_http(struct sandbox *sandbox)
static inline void
sandbox_free_linear_memory(struct sandbox *sandbox)
{
int rc = munmap(sandbox->memory.start, sandbox->memory.max + PAGE_SIZE);
if (rc == -1) panic("sandbox_free_linear_memory - munmap failed\n");
sandbox->memory.start = NULL;
wasm_memory_delete(sandbox->memory);
sandbox->memory = NULL;
}
/**
* Free Linear Memory, leaving stack in place
* @param sandbox
*/
static inline void
sandbox_free_http_buffers(struct sandbox *sandbox)
{
assert(sandbox);
vec_u8_deinit(&sandbox->request);
vec_u8_deinit(&sandbox->response);
}
/**

@ -36,7 +36,7 @@ sandbox_perf_log_print_entry(struct sandbox *sandbox)
* becomes more intelligent, then peak linear memory size needs to be tracked
* seperately from current linear memory size.
*/
fprintf(sandbox_perf_log, "%lu,%s,%d,%s,%lu,%lu,%lu,,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%u,%u\n",
fprintf(sandbox_perf_log, "%lu,%s,%d,%s,%lu,%lu,%lu,,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu,%u,%lu\n",
sandbox->id, sandbox->module->name, sandbox->module->port, sandbox_state_stringify(sandbox->state),
sandbox->module->relative_deadline, sandbox->total_time, queued_duration,
sandbox->duration_of_state[SANDBOX_UNINITIALIZED], sandbox->duration_of_state[SANDBOX_ALLOCATED],
@ -45,7 +45,7 @@ sandbox_perf_log_print_entry(struct sandbox *sandbox)
sandbox->duration_of_state[SANDBOX_RUNNING_SYS], sandbox->duration_of_state[SANDBOX_RUNNING_USER],
sandbox->duration_of_state[SANDBOX_ASLEEP], sandbox->duration_of_state[SANDBOX_RETURNED],
sandbox->duration_of_state[SANDBOX_COMPLETE], sandbox->duration_of_state[SANDBOX_ERROR],
runtime_processor_speed_MHz, sandbox->memory.size);
runtime_processor_speed_MHz, sandbox->memory->size);
}
static inline void

@ -24,11 +24,13 @@ static inline int
sandbox_receive_request(struct sandbox *sandbox)
{
assert(sandbox != NULL);
assert(sandbox->module->max_request_size > 0);
assert(sandbox->request.length == 0);
int rc = 0;
struct vec_u8 *request = &sandbox->request;
assert(request->length == 0);
assert(request->capacity > 0);
while (!sandbox->http_request.message_end) {
/* Read from the Socket */
@ -36,16 +38,18 @@ sandbox_receive_request(struct sandbox *sandbox)
http_parser * parser = &sandbox->http_parser;
const http_parser_settings *settings = http_parser_settings_get();
if (sandbox->module->max_request_size <= sandbox->request.length) {
size_t request_length = request->length;
size_t request_capacity = request->capacity;
if (request_length >= request_capacity) {
debuglog("Sandbox %lu: Ran out of Request Buffer before message end\n", sandbox->id);
goto err_nobufs;
}
ssize_t bytes_received = recv(sandbox->client_socket_descriptor,
&sandbox->request.base[sandbox->request.length],
sandbox->module->max_request_size - sandbox->request.length, 0);
ssize_t bytes_received = recv(sandbox->client_socket_descriptor, &request->buffer[request_length],
request_capacity - request_length, 0);
if (bytes_received == -1) {
if (bytes_received < 0) {
if (errno == EAGAIN) {
current_sandbox_sleep();
continue;
@ -70,24 +74,26 @@ sandbox_receive_request(struct sandbox *sandbox)
goto err;
}
assert(bytes_received > 0);
#ifdef LOG_HTTP_PARSER
debuglog("Sandbox: %lu http_parser_execute(%p, %p, %p, %zu\n)", sandbox->id, parser, settings,
&sandbox->request.base[sandbox->request.length], bytes_received);
#endif
size_t bytes_parsed = http_parser_execute(parser, settings,
&sandbox->request.base[sandbox->request.length],
bytes_received);
(const char *)&request->buffer[request_length],
(size_t)bytes_received);
if (bytes_parsed != bytes_received) {
if (bytes_parsed != (size_t)bytes_received) {
debuglog("Error: %s, Description: %s\n",
http_errno_name((enum http_errno)sandbox->http_parser.http_errno),
http_errno_description((enum http_errno)sandbox->http_parser.http_errno));
debuglog("Length Parsed %zu, Length Read %zu\n", bytes_parsed, bytes_received);
debuglog("Length Parsed %zu, Length Read %zu\n", bytes_parsed, (size_t)bytes_received);
debuglog("Error parsing socket %d\n", sandbox->client_socket_descriptor);
goto err;
}
sandbox->request.length += bytes_parsed;
request->length += bytes_parsed;
}
rc = 0;

@ -1,91 +0,0 @@
#pragma once
#include <errno.h>
#include <stdatomic.h>
#include <stdbool.h>
#include <stdint.h>
#include <sys/socket.h>
#include "debuglog.h"
#include "deque.h"
#include "http_total.h"
#include "module.h"
#include "runtime.h"
#include "sandbox_state.h"
struct sandbox_request {
uint64_t id;
struct module * module;
int socket_descriptor;
struct sockaddr socket_address;
uint64_t request_arrival_timestamp; /* cycles */
uint64_t absolute_deadline; /* cycles */
/*
* Unitless estimate of the instantaneous fraction of system capacity required to run the request
* Calculated by estimated execution time (cycles) * runtime_admissions_granularity / relative deadline (cycles)
*/
uint64_t admissions_estimate;
};
DEQUE_PROTOTYPE(sandbox, struct sandbox_request *)
/* Count of the total number of requests we've ever allocated. Never decrements as it is used to generate IDs */
extern _Atomic uint32_t sandbox_request_count;
static inline void
sandbox_request_count_initialize()
{
atomic_init(&sandbox_request_count, 0);
}
static inline uint32_t
sandbox_request_count_postfix_increment()
{
return atomic_fetch_add(&sandbox_request_count, 1);
}
static inline void
sandbox_request_log_allocation(struct sandbox_request *sandbox_request)
{
#ifdef LOG_REQUEST_ALLOCATION
debuglog("Sandbox Request %lu: of %s:%d\n", sandbox_request->id, sandbox_request->module->name,
sandbox_request->module->port);
#endif
}
/**
* Allocates a new Sandbox Request and places it on the Global Deque
* @param module the module we want to request
* @param socket_descriptor
* @param socket_address
* @param request_arrival_timestamp the timestamp of when we receives the request from the network (in cycles)
* @return the new sandbox request
*/
static inline struct sandbox_request *
sandbox_request_allocate(struct module *module, int socket_descriptor, const struct sockaddr *socket_address,
uint64_t request_arrival_timestamp, uint64_t admissions_estimate)
{
struct sandbox_request *sandbox_request = (struct sandbox_request *)malloc(sizeof(struct sandbox_request));
assert(sandbox_request);
/* Sets the ID to the value before the increment */
sandbox_request->id = sandbox_request_count_postfix_increment();
sandbox_request->module = module;
sandbox_request->socket_descriptor = socket_descriptor;
memcpy(&sandbox_request->socket_address, socket_address, sizeof(struct sockaddr));
sandbox_request->request_arrival_timestamp = request_arrival_timestamp;
sandbox_request->absolute_deadline = request_arrival_timestamp + module->relative_deadline;
/*
* Admissions Control State
* Assumption: an estimate of 0 should have been interpreted as a rejection
*/
assert(admissions_estimate != 0);
sandbox_request->admissions_estimate = admissions_estimate;
sandbox_request_log_allocation(sandbox_request);
return sandbox_request;
}

@ -1,74 +0,0 @@
#pragma once
#include <assert.h>
#include <errno.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include "current_sandbox.h"
#include "http.h"
#include "http_total.h"
#include "likely.h"
#include "sandbox_types.h"
#include "scheduler.h"
#include "panic.h"
/**
* Sends Response Back to Client
* @return RC. -1 on Failure
*/
static inline int
sandbox_send_response(struct sandbox *sandbox)
{
assert(sandbox != NULL);
/* Assumption: The HTTP Request Buffer immediately precedes the HTTP Response Buffer,
* meaning that when we prepend, we are overwritting the tail of the HTTP request buffer */
assert(sandbox->request.base + sandbox->module->max_request_size == sandbox->response.base);
int rc;
/* Determine values to template into our HTTP response */
ssize_t response_body_size = sandbox->response.length;
char content_length[20] = { 0 };
sprintf(content_length, "%zu", response_body_size);
char *module_content_type = sandbox->module->response_content_type;
char *content_type = strlen(module_content_type) > 0 ? module_content_type : "text/plain";
/* Prepend HTTP Response Headers */
size_t response_header_size = http_response_200_size(content_type, content_length);
char * response_header = sandbox->response.base - response_header_size;
rc = http_response_200(response_header, content_type, content_length);
if (rc < 0) goto err;
/* Capture Timekeeping data for end-to-end latency */
uint64_t end_time = __getcycles();
sandbox->total_time = end_time - sandbox->timestamp_of.request_arrival;
/* Send HTTP Response */
int sent = 0;
size_t response_size = response_header_size + response_body_size;
while (sent < response_size) {
rc = write(sandbox->client_socket_descriptor, response_header, response_size - sent);
if (rc < 0) {
if (errno == EAGAIN)
current_sandbox_sleep();
else {
perror("write");
goto err;
}
}
sent += rc;
}
http_total_increment_2xx();
rc = 0;
done:
return rc;
err:
rc = -1;
goto done;
}

@ -0,0 +1,30 @@
#pragma once
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include "arch/context.h"
#include "current_sandbox.h"
#include "ps_list.h"
#include "sandbox_state_history.h"
#include "sandbox_types.h"
/**
* Transitions a sandbox to the SANDBOX_ALLOCATED state.
* This the is the initial state, so there is no concept of "last state" here
* @param sandbox
*/
static inline void
sandbox_set_as_allocated(struct sandbox *sandbox)
{
assert(sandbox);
assert(sandbox->state == SANDBOX_UNINITIALIZED);
uint64_t now = __getcycles();
/* State Change Bookkeeping */
sandbox->timestamp_of.last_state_change = now;
sandbox_state_history_init(&sandbox->state_history);
sandbox_state_history_append(&sandbox->state_history, SANDBOX_ALLOCATED);
sandbox_state_totals_increment(SANDBOX_ALLOCATED);
}

@ -38,9 +38,9 @@ sandbox_set_as_asleep(struct sandbox *sandbox, sandbox_state_t last_state)
/* State Change Bookkeeping */
sandbox->duration_of_state[last_state] += (now - sandbox->timestamp_of.last_state_change);
sandbox->timestamp_of.last_state_change = now;
sandbox_state_history_append(sandbox, SANDBOX_ASLEEP);
runtime_sandbox_total_increment(SANDBOX_ASLEEP);
runtime_sandbox_total_decrement(last_state);
sandbox_state_history_append(&sandbox->state_history, SANDBOX_ASLEEP);
sandbox_state_totals_increment(SANDBOX_ASLEEP);
sandbox_state_totals_decrement(last_state);
}
static inline void

@ -41,9 +41,9 @@ sandbox_set_as_complete(struct sandbox *sandbox, sandbox_state_t last_state)
/* State Change Bookkeeping */
sandbox->duration_of_state[last_state] += (now - sandbox->timestamp_of.last_state_change);
sandbox->timestamp_of.last_state_change = now;
sandbox_state_history_append(sandbox, SANDBOX_COMPLETE);
runtime_sandbox_total_increment(SANDBOX_COMPLETE);
runtime_sandbox_total_decrement(last_state);
sandbox_state_history_append(&sandbox->state_history, SANDBOX_COMPLETE);
sandbox_state_totals_increment(SANDBOX_COMPLETE);
sandbox_state_totals_decrement(last_state);
/* Admissions Control Post Processing */
admissions_info_update(&sandbox->module->admissions_info, sandbox->duration_of_state[SANDBOX_RUNNING_USER]

@ -33,12 +33,12 @@ sandbox_set_as_error(struct sandbox *sandbox, sandbox_state_t last_state)
uint64_t now = __getcycles();
switch (last_state) {
case SANDBOX_UNINITIALIZED:
/* Technically, this is a degenerate sandbox that we generate by hand */
case SANDBOX_ALLOCATED:
break;
case SANDBOX_RUNNING_SYS: {
local_runqueue_delete(sandbox);
sandbox_free_linear_memory(sandbox);
sandbox_free_http_buffers(sandbox);
break;
}
default: {
@ -50,9 +50,9 @@ sandbox_set_as_error(struct sandbox *sandbox, sandbox_state_t last_state)
/* State Change Bookkeeping */
uint64_t duration_of_last_state = now - sandbox->timestamp_of.last_state_change;
sandbox->duration_of_state[last_state] += duration_of_last_state;
sandbox_state_history_append(sandbox, SANDBOX_ERROR);
runtime_sandbox_total_increment(SANDBOX_ERROR);
runtime_sandbox_total_decrement(last_state);
sandbox_state_history_append(&sandbox->state_history, SANDBOX_ERROR);
sandbox_state_totals_increment(SANDBOX_ERROR);
sandbox_state_totals_decrement(last_state);
/* Admissions Control Post Processing */
admissions_control_subtract(sandbox->admissions_estimate);

@ -7,51 +7,36 @@
#include "arch/context.h"
#include "current_sandbox.h"
#include "ps_list.h"
#include "sandbox_request.h"
#include "sandbox_state_history.h"
#include "sandbox_types.h"
/**
* Transitions a sandbox to the SANDBOX_INITIALIZED state.
* The sandbox was already zeroed out during allocation
* @param sandbox an uninitialized sandbox
* @param sandbox_request the request we are initializing the sandbox from
* @param allocation_timestamp timestamp of allocation
* @param sandbox
* @param last_state
*/
static inline void
sandbox_set_as_initialized(struct sandbox *sandbox, struct sandbox_request *sandbox_request,
uint64_t allocation_timestamp)
sandbox_set_as_initialized(struct sandbox *sandbox, sandbox_state_t last_state)
{
assert(sandbox);
assert(sandbox->state == SANDBOX_ALLOCATED);
assert(sandbox_request != NULL);
assert(allocation_timestamp > 0);
sandbox->state = SANDBOX_INITIALIZED;
uint64_t now = __getcycles();
/* Copy State from Sandbox Request */
sandbox->id = sandbox_request->id;
sandbox->absolute_deadline = sandbox_request->absolute_deadline;
sandbox->admissions_estimate = sandbox_request->admissions_estimate;
sandbox->client_socket_descriptor = sandbox_request->socket_descriptor;
sandbox->timestamp_of.request_arrival = sandbox_request->request_arrival_timestamp;
/* Copy the socket descriptor and address of the client invocation */
memcpy(&sandbox->client_address, &sandbox_request->socket_address, sizeof(struct sockaddr));
/* Initialize the sandbox's context, stack, and instruction pointer */
/* stack.start points to the bottom of the usable stack, so add stack_size to get to top */
arch_context_init(&sandbox->ctxt, (reg_t)current_sandbox_start,
(reg_t)sandbox->stack.start + sandbox->stack.size);
/* Initialize Parsec control structures */
ps_list_init_d(sandbox);
switch (last_state) {
case SANDBOX_ALLOCATED: {
break;
}
default: {
panic("Sandbox %lu | Illegal transition from %s to Preempted\n", sandbox->id,
sandbox_state_stringify(last_state));
}
}
/* State Change Bookkeeping */
sandbox->duration_of_state[SANDBOX_ALLOCATED] = now - allocation_timestamp;
sandbox->timestamp_of.allocation = allocation_timestamp;
sandbox->timestamp_of.last_state_change = allocation_timestamp;
sandbox_state_history_append(sandbox, SANDBOX_INITIALIZED);
runtime_sandbox_total_increment(SANDBOX_INITIALIZED);
sandbox->duration_of_state[last_state] += (now - sandbox->timestamp_of.last_state_change);
sandbox->timestamp_of.last_state_change = now;
sandbox_state_history_append(&sandbox->state_history, SANDBOX_INITIALIZED);
sandbox_state_totals_increment(SANDBOX_INITIALIZED);
sandbox_state_totals_decrement(last_state);
}

@ -25,8 +25,8 @@ sandbox_set_as_interrupted(struct sandbox *sandbox, sandbox_state_t last_state)
sandbox->duration_of_state[last_state] += (now - sandbox->timestamp_of.last_state_change);
sandbox->timestamp_of.last_state_change = now;
/* We do not append SANDBOX_INTERRUPTED to the sandbox_state_history because it would quickly fill the buffer */
runtime_sandbox_total_increment(SANDBOX_INTERRUPTED);
runtime_sandbox_total_decrement(last_state);
sandbox_state_totals_increment(SANDBOX_INTERRUPTED);
sandbox_state_totals_decrement(last_state);
}
static inline void
@ -53,8 +53,8 @@ sandbox_interrupt_return(struct sandbox *sandbox, sandbox_state_t interrupted_st
sandbox->duration_of_state[SANDBOX_INTERRUPTED] += (now - sandbox->timestamp_of.last_state_change);
sandbox->timestamp_of.last_state_change = now;
/* We do not append SANDBOX_INTERRUPTED to the sandbox_state_history because it would quickly fill the buffer */
runtime_sandbox_total_increment(interrupted_state);
runtime_sandbox_total_decrement(SANDBOX_INTERRUPTED);
sandbox_state_totals_increment(interrupted_state);
sandbox_state_totals_decrement(SANDBOX_INTERRUPTED);
barrier();
/* WARNING: Code after this assignment may be preemptable */

@ -38,9 +38,9 @@ sandbox_set_as_preempted(struct sandbox *sandbox, sandbox_state_t last_state)
/* State Change Bookkeeping */
sandbox->duration_of_state[last_state] += (now - sandbox->timestamp_of.last_state_change);
sandbox->timestamp_of.last_state_change = now;
sandbox_state_history_append(sandbox, SANDBOX_PREEMPTED);
runtime_sandbox_total_increment(SANDBOX_PREEMPTED);
runtime_sandbox_total_decrement(last_state);
sandbox_state_history_append(&sandbox->state_history, SANDBOX_PREEMPTED);
sandbox_state_totals_increment(SANDBOX_PREEMPTED);
sandbox_state_totals_decrement(last_state);
}
static inline void

@ -33,6 +33,7 @@ sandbox_set_as_returned(struct sandbox *sandbox, sandbox_state_t last_state)
sandbox->total_time = now - sandbox->timestamp_of.request_arrival;
local_runqueue_delete(sandbox);
sandbox_free_linear_memory(sandbox);
sandbox_free_http_buffers(sandbox);
break;
}
default: {
@ -44,7 +45,7 @@ sandbox_set_as_returned(struct sandbox *sandbox, sandbox_state_t last_state)
/* State Change Bookkeeping */
sandbox->duration_of_state[last_state] += (now - sandbox->timestamp_of.last_state_change);
sandbox->timestamp_of.last_state_change = now;
sandbox_state_history_append(sandbox, SANDBOX_RETURNED);
runtime_sandbox_total_increment(SANDBOX_RETURNED);
runtime_sandbox_total_decrement(last_state);
sandbox_state_history_append(&sandbox->state_history, SANDBOX_RETURNED);
sandbox_state_totals_increment(SANDBOX_RETURNED);
sandbox_state_totals_decrement(last_state);
}

@ -45,9 +45,9 @@ sandbox_set_as_runnable(struct sandbox *sandbox, sandbox_state_t last_state)
/* State Change Bookkeeping */
sandbox->duration_of_state[last_state] += (now - sandbox->timestamp_of.last_state_change);
sandbox->timestamp_of.last_state_change = now;
sandbox_state_history_append(sandbox, SANDBOX_RUNNABLE);
runtime_sandbox_total_increment(SANDBOX_RUNNABLE);
runtime_sandbox_total_decrement(last_state);
sandbox_state_history_append(&sandbox->state_history, SANDBOX_RUNNABLE);
sandbox_state_totals_increment(SANDBOX_RUNNABLE);
sandbox_state_totals_decrement(last_state);
}

@ -40,9 +40,9 @@ sandbox_set_as_running_sys(struct sandbox *sandbox, sandbox_state_t last_state)
/* State Change Bookkeeping */
sandbox->duration_of_state[last_state] += (now - sandbox->timestamp_of.last_state_change);
sandbox->timestamp_of.last_state_change = now;
sandbox_state_history_append(sandbox, SANDBOX_RUNNING_SYS);
runtime_sandbox_total_increment(SANDBOX_RUNNING_SYS);
runtime_sandbox_total_decrement(last_state);
sandbox_state_history_append(&sandbox->state_history, SANDBOX_RUNNING_SYS);
sandbox_state_totals_increment(SANDBOX_RUNNING_SYS);
sandbox_state_totals_decrement(last_state);
}
static inline void

@ -36,9 +36,9 @@ sandbox_set_as_running_user(struct sandbox *sandbox, sandbox_state_t last_state)
/* State Change Bookkeeping */
sandbox->duration_of_state[last_state] += (now - sandbox->timestamp_of.last_state_change);
sandbox->timestamp_of.last_state_change = now;
sandbox_state_history_append(sandbox, SANDBOX_RUNNING_USER);
runtime_sandbox_total_increment(SANDBOX_RUNNING_USER);
runtime_sandbox_total_decrement(last_state);
sandbox_state_history_append(&sandbox->state_history, SANDBOX_RUNNING_USER);
sandbox_state_totals_increment(SANDBOX_RUNNING_USER);
sandbox_state_totals_decrement(last_state);
barrier();
sandbox->state = SANDBOX_RUNNING_USER;

@ -7,6 +7,8 @@
#include "sandbox_types.h"
extern void stub_init(int32_t offset);
/**
* Takes the arguments from the sandbox struct and writes them into the WebAssembly linear memory
*/
@ -15,12 +17,18 @@ sandbox_setup_arguments(struct sandbox *sandbox)
{
assert(sandbox != NULL);
int32_t argument_count = 0;
/* whatever gregor has, to be able to pass arguments to a module! */
sandbox->arguments_offset = local_sandbox_context_cache.memory.size;
assert(local_sandbox_context_cache.memory.start == sandbox->memory.start);
expand_memory();
int32_t string_off = sandbox->arguments_offset;
/* Copy arguments into linear memory. It seems like malloc would clobber this, but I think this goes away in
* WASI, so not worth fixing*/
sandbox->arguments_offset = wasm_memory_get_size(sandbox->memory);
/* Assumption: we can fit the arguments in a single wasm page */
int rc = wasm_memory_expand(sandbox->memory, WASM_PAGE_SIZE);
assert(rc == 0);
/* We have to update our cache here */
memcpy(&current_wasm_module_instance.memory, sandbox->memory, sizeof(struct wasm_memory));
stub_init(string_off);
stub_init(sandbox->arguments_offset);
}

@ -32,31 +32,31 @@ sandbox_state_stringify(sandbox_state_t state)
return sandbox_state_labels[state];
}
#ifdef LOG_SANDBOX_COUNT
extern _Atomic uint32_t sandbox_state_count[SANDBOX_STATE_COUNT];
#ifdef SANDBOX_STATE_TOTALS
extern _Atomic uint32_t sandbox_state_totals[SANDBOX_STATE_COUNT];
#endif
static inline void
sandbox_count_initialize()
sandbox_state_totals_initialize()
{
#ifdef LOG_SANDBOX_COUNT
for (int i = 0; i < SANDBOX_STATE_COUNT; i++) atomic_init(&sandbox_state_count[i], 0);
#ifdef SANDBOX_STATE_TOTALS
for (int i = 0; i < SANDBOX_STATE_COUNT; i++) atomic_init(&sandbox_state_totals[i], 0);
#endif
}
static inline void
runtime_sandbox_total_increment(sandbox_state_t state)
sandbox_state_totals_increment(sandbox_state_t state)
{
#ifdef LOG_SANDBOX_COUNT
atomic_fetch_add(&sandbox_state_count[state], 1);
#ifdef SANDBOX_STATE_TOTALS
atomic_fetch_add(&sandbox_state_totals[state], 1);
#endif
}
static inline void
runtime_sandbox_total_decrement(sandbox_state_t state)
sandbox_state_totals_decrement(sandbox_state_t state)
{
#ifdef LOG_SANDBOX_COUNT
if (atomic_load(&sandbox_state_count[state]) == 0) panic("Underflow of %s\n", sandbox_state_stringify(state));
atomic_fetch_sub(&sandbox_state_count[state], 1);
#ifdef SANDBOX_STATE_TOTALS
if (atomic_load(&sandbox_state_totals[state]) == 0) panic("Underflow of %s\n", sandbox_state_stringify(state));
atomic_fetch_sub(&sandbox_state_totals[state], 1);
#endif
}

@ -3,12 +3,30 @@
#include "sandbox_state.h"
#include "sandbox_types.h"
#ifdef LOG_STATE_CHANGES
#define SANDBOX_STATE_HISTORY_CAPACITY 100
#else
#define SANDBOX_STATE_HISTORY_CAPACITY 0
#endif
struct sandbox_state_history {
uint16_t size;
sandbox_state_t buffer[SANDBOX_STATE_HISTORY_CAPACITY];
};
static inline void
sandbox_state_history_init(struct sandbox_state_history *self)
{
#ifdef LOG_STATE_CHANGES
memset(self, 0,
sizeof(struct sandbox_state_history) + SANDBOX_STATE_HISTORY_CAPACITY * sizeof(sandbox_state_t));
#endif
}
static inline void
sandbox_state_history_append(struct sandbox *sandbox, sandbox_state_t state)
sandbox_state_history_append(struct sandbox_state_history *self, sandbox_state_t state)
{
#ifdef LOG_STATE_CHANGES
if (likely(sandbox->state_history_count < SANDBOX_STATE_HISTORY_CAPACITY)) {
sandbox->state_history[sandbox->state_history_count++] = state;
}
if (likely(self->size < SANDBOX_STATE_HISTORY_CAPACITY)) { self->buffer[self->size++] = state; }
#endif
}

@ -0,0 +1,19 @@
#pragma once
#include <stdatomic.h>
#include <stdint.h>
/* Count of the total number of requests we've ever allocated. Never decrements as it is used to generate IDs */
extern _Atomic uint32_t sandbox_total;
static inline void
sandbox_total_initialize()
{
atomic_init(&sandbox_total, 0);
}
static inline uint32_t
sandbox_total_postfix_increment()
{
return atomic_fetch_add(&sandbox_total, 1);
}

@ -12,25 +12,20 @@
#include "module.h"
#include "ps_list.h"
#include "sandbox_state.h"
#include "sandbox_state_history.h"
#include "vec_u8.h"
#include "wasm_memory.h"
#include "wasm_types.h"
#include "wasm_stack.h"
#ifdef LOG_SANDBOX_MEMORY_PROFILE
#define SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT 1024
#endif
#ifdef LOG_STATE_CHANGES
#define SANDBOX_STATE_HISTORY_CAPACITY 100
#endif
/*********************
* Structs and Types *
********************/
struct sandbox_stack {
void * start; /* points to the bottom of the usable stack */
uint32_t size;
};
struct sandbox_timestamps {
uint64_t last_state_change; /* Used for bookkeeping of actual execution time */
uint64_t request_arrival; /* Timestamp when request is received */
@ -43,61 +38,28 @@ struct sandbox_timestamps {
#endif
};
/*
* Static In-memory buffers are used for HTTP requests read in via STDIN and HTTP
* responses written back out via STDOUT. These are allocated in pages immediately
* adjacent to the sandbox struct in the following layout. The capacity of these
* buffers are configured in the module spec and stored in sandbox->module.max_request_size
* and sandbox->module.max_response_size.
*
* Because the sandbox struct, the request header, and the response header are sized
* in pages, we must store the base pointer to the buffer. The length is increased
* and should not exceed the respective module max size.
*
* ---------------------------------------------------
* | Sandbox | Request | Response |
* ---------------------------------------------------
*
* After the sandbox writes its response, a header is written at a negative offset
* overwriting the tail end of the request buffer. This assumes that the request
* data is no longer needed because the sandbox has run to completion
*
* ---------------------------------------------------
* | Sandbox | Garbage | HDR | Response |
* ---------------------------------------------------
*/
struct sandbox_buffer {
char * base;
size_t length;
};
struct sandbox {
uint64_t id;
sandbox_state_t state;
#ifdef LOG_STATE_CHANGES
sandbox_state_t state_history[SANDBOX_STATE_HISTORY_CAPACITY];
uint16_t state_history_count;
#endif
uint64_t id;
sandbox_state_t state;
struct sandbox_state_history state_history;
struct ps_list list; /* used by ps_list's default name-based MACROS for the scheduling runqueue */
/* HTTP State */
struct sockaddr client_address; /* client requesting connection! */
int client_socket_descriptor;
http_parser http_parser;
struct http_request http_request;
ssize_t http_request_length; /* TODO: Get rid of me */
struct sandbox_buffer request;
struct sandbox_buffer response;
struct sockaddr client_address; /* client requesting connection! */
int client_socket_descriptor;
http_parser http_parser;
struct http_request http_request;
struct vec_u8 request;
struct vec_u8 response;
/* WebAssembly Module State */
struct module *module; /* the module this is an instance of */
/* WebAssembly Instance State */
struct arch_context ctxt;
struct sandbox_stack stack;
struct wasm_memory memory;
struct arch_context ctxt;
struct wasm_stack stack;
struct wasm_memory *memory;
/* Scheduling and Temporal State */
struct sandbox_timestamps timestamp_of;

@ -13,7 +13,6 @@
#include "local_runqueue_minheap.h"
#include "local_runqueue_list.h"
#include "panic.h"
#include "sandbox_request.h"
#include "sandbox_functions.h"
#include "sandbox_types.h"
#include "sandbox_set_as_preempted.h"
@ -74,67 +73,49 @@ static inline struct sandbox *
scheduler_edf_get_next()
{
/* Get the deadline of the sandbox at the head of the local request queue */
struct sandbox * local = local_runqueue_get_next();
uint64_t local_deadline = local == NULL ? UINT64_MAX : local->absolute_deadline;
struct sandbox_request *request = NULL;
struct sandbox *local = local_runqueue_get_next();
uint64_t local_deadline = local == NULL ? UINT64_MAX : local->absolute_deadline;
struct sandbox *global = NULL;
uint64_t global_deadline = global_request_scheduler_peek();
/* Try to pull and allocate from the global queue if earlier
* This will be placed at the head of the local runqueue */
if (global_deadline < local_deadline) {
if (global_request_scheduler_remove_if_earlier(&request, local_deadline) == 0) {
assert(request != NULL);
assert(request->absolute_deadline < local_deadline);
struct sandbox *global = sandbox_allocate(request);
if (!global) goto err_allocate;
if (global_request_scheduler_remove_if_earlier(&global, local_deadline) == 0) {
assert(global != NULL);
assert(global->absolute_deadline < local_deadline);
sandbox_prepare_execution_environment(global);
assert(global->state == SANDBOX_INITIALIZED);
sandbox_set_as_runnable(global, SANDBOX_INITIALIZED);
}
}
/* Return what is at the head of the local runqueue or NULL if empty */
done:
/* Return what is at the head of the local runqueue or NULL if empty */
return local_runqueue_get_next();
err_allocate:
client_socket_send(request->socket_descriptor, 503);
client_socket_close(request->socket_descriptor, &request->socket_address);
free(request);
goto done;
}
static inline struct sandbox *
scheduler_fifo_get_next()
{
struct sandbox *sandbox = local_runqueue_get_next();
struct sandbox *local = local_runqueue_get_next();
struct sandbox_request *sandbox_request = NULL;
struct sandbox *global = NULL;
if (sandbox == NULL) {
if (local == NULL) {
/* If the local runqueue is empty, pull from global request scheduler */
if (global_request_scheduler_remove(&sandbox_request) < 0) goto err;
if (global_request_scheduler_remove(&global) < 0) goto done;
sandbox = sandbox_allocate(sandbox_request);
if (!sandbox) goto err_allocate;
sandbox_set_as_runnable(sandbox, SANDBOX_INITIALIZED);
} else if (sandbox == current_sandbox_get()) {
sandbox_prepare_execution_environment(global);
sandbox_set_as_runnable(global, SANDBOX_INITIALIZED);
} else if (local == current_sandbox_get()) {
/* Execute Round Robin Scheduling Logic if the head is the current sandbox */
local_runqueue_list_rotate();
sandbox = local_runqueue_get_next();
}
done:
return sandbox;
err_allocate:
client_socket_send(sandbox_request->socket_descriptor, 503);
client_socket_close(sandbox_request->socket_descriptor, &sandbox->client_address);
free(sandbox_request);
err:
sandbox = NULL;
goto done;
return local_runqueue_get_next();
}
static inline struct sandbox *

@ -63,7 +63,8 @@ scheduler_execute_epoll_loop(void)
case SANDBOX_ERROR:
panic("Expected to have closed socket");
default:
client_socket_send(sandbox->client_socket_descriptor, 503);
client_socket_send_oneshot(sandbox->client_socket_descriptor,
http_header_build(503), http_header_len(503));
sandbox_close_http(sandbox);
sandbox_set_as_error(sandbox, sandbox->state);
}

@ -4,8 +4,6 @@
#include <stdio.h>
#include <threads.h>
#include "wasm_types.h"
/* For this family of macros, do NOT pass zero as the pow2 */
#define round_to_pow2(x, pow2) (((unsigned long)(x)) & (~((pow2)-1)))
#define round_up_to_pow2(x, pow2) (round_to_pow2(((unsigned long)(x)) + (pow2)-1, (pow2)))
@ -20,18 +18,10 @@
#define PAGE_SIZE (unsigned long)(1 << 12)
#define WEAK __attribute__((weak))
/* memory also provides the table access functions */
#define INDIRECT_TABLE_SIZE (1 << 10)
struct indirect_table_entry {
uint32_t type_id;
void * func_pointer;
};
/* Cache of Frequently Accessed Members used to avoid pointer chasing */
struct sandbox_context_cache {
struct wasm_memory memory;
struct indirect_table_entry *module_indirect_table;
};
#ifndef unlikely
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
extern thread_local struct sandbox_context_cache local_sandbox_context_cache;
#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
#endif

@ -0,0 +1,113 @@
#pragma once
#include <stdint.h>
#include <stdlib.h>
struct vec_u8 {
size_t length;
size_t capacity;
uint8_t *buffer;
};
static inline struct vec_u8 *vec_u8_alloc(void);
static inline int vec_u8_init(struct vec_u8 *self, size_t capacity);
static inline struct vec_u8 *vec_u8_new(size_t capacity);
static inline void vec_u8_deinit(struct vec_u8 *self);
static inline void vec_u8_free(struct vec_u8 *self);
static inline void vec_u8_delete(struct vec_u8 *self);
/**
* Allocates an uninitialized vec on the heap'
* @returns a pointer to an uninitialized vec on the heap
*/
static inline struct vec_u8 *
vec_u8_alloc(void)
{
return (struct vec_u8 *)calloc(1, sizeof(struct vec_u8));
}
/**
* Initializes a vec, allocating a backing buffer for the provided capcity
* @param self pointer to an uninitialized vec
* @param capacity
* @returns 0 on success, -1 on failure
*/
static inline int
vec_u8_init(struct vec_u8 *self, size_t capacity)
{
if (capacity == 0) {
self->buffer = NULL;
} else {
self->buffer = calloc(capacity, sizeof(uint8_t));
if (self->buffer == NULL) return -1;
}
self->length = 0;
self->capacity = capacity;
return 0;
}
/**
* Allocate and initialize a vec with a backing buffer
* @param capacity
* @returns a pointer to an initialized vec on the heap, ready for use
*/
static inline struct vec_u8 *
vec_u8_new(size_t capacity)
{
struct vec_u8 *self = vec_u8_alloc();
if (self == NULL) return self;
int rc = vec_u8_init(self, capacity);
if (rc < 0) {
vec_u8_free(self);
return NULL;
}
return self;
}
/**
* Deinitialize a vec, clearing out members and releasing the backing buffer
* @param self
*/
static inline void
vec_u8_deinit(struct vec_u8 *self)
{
if (self->capacity == 0) {
assert(self->buffer == NULL);
assert(self->length == 0);
return;
}
assert(self->buffer != NULL);
free(self->buffer);
self->buffer = NULL;
self->length = 0;
self->capacity = 0;
}
/**
* Frees a vec struct allocated on the heap
* Assumes that the vec has already been deinitialized
*/
static inline void
vec_u8_free(struct vec_u8 *self)
{
assert(self->buffer == NULL);
assert(self->length == 0);
assert(self->capacity == 0);
free(self);
}
/**
* Deinitializes and frees a vec allocated to the heap
* @param self
*/
static inline void
vec_u8_delete(struct vec_u8 *self)
{
vec_u8_deinit(self);
vec_u8_free(self);
}

@ -0,0 +1,363 @@
#pragma once
#include <assert.h>
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include "types.h" /* PAGE_SIZE */
#include "wasm_types.h"
#define WASM_MEMORY_MAX (size_t) UINT32_MAX + 1
#define WASM_MEMORY_SIZE_TO_ALLOC ((size_t)WASM_MEMORY_MAX + /* guard page */ PAGE_SIZE)
struct wasm_memory {
size_t size; /* Initial Size in bytes */
size_t capacity; /* Size backed by actual pages */
size_t max; /* Soft cap in bytes. Defaults to 4GB */
uint8_t *buffer;
};
static INLINE struct wasm_memory *wasm_memory_alloc(void);
static INLINE int wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max);
static INLINE struct wasm_memory *wasm_memory_new(size_t initial, size_t max);
static INLINE void wasm_memory_deinit(struct wasm_memory *self);
static INLINE void wasm_memory_free(struct wasm_memory *self);
static INLINE void wasm_memory_delete(struct wasm_memory *self);
static INLINE struct wasm_memory *
wasm_memory_alloc(void)
{
return malloc(sizeof(struct wasm_memory));
}
static INLINE int
wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max)
{
assert(self != NULL);
/* We assume WASI modules, which are required to declare and export a linear memory with a non-zero size to
* allow a standard lib to initialize. Technically, a WebAssembly module that exports pure functions may not use
* a linear memory */
assert(initial > 0);
assert(initial <= (size_t)UINT32_MAX + 1);
assert(max > 0);
assert(max <= (size_t)UINT32_MAX + 1);
/* Allocate buffer of contiguous virtual addresses for full wasm32 linear memory and guard page */
self->buffer = mmap(NULL, WASM_MEMORY_SIZE_TO_ALLOC, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (self->buffer == MAP_FAILED) return -1;
/* Set the initial bytes to read / write */
int rc = mprotect(self->buffer, initial, PROT_READ | PROT_WRITE);
if (rc != 0) {
munmap(self->buffer, WASM_MEMORY_SIZE_TO_ALLOC);
return -1;
}
self->size = initial;
self->capacity = initial;
self->max = max;
return 0;
}
static INLINE struct wasm_memory *
wasm_memory_new(size_t initial, size_t max)
{
struct wasm_memory *self = wasm_memory_alloc();
if (self == NULL) return self;
int rc = wasm_memory_init(self, initial, max);
if (rc < 0) {
assert(0);
wasm_memory_free(self);
return NULL;
}
return self;
}
static INLINE void
wasm_memory_deinit(struct wasm_memory *self)
{
assert(self != NULL);
assert(self->buffer != NULL);
munmap(self->buffer, WASM_MEMORY_SIZE_TO_ALLOC);
self->buffer = NULL;
self->size = 0;
self->capacity = 0;
self->max = 0;
}
static INLINE void
wasm_memory_free(struct wasm_memory *self)
{
assert(self != NULL);
/* Assume prior deinitialization so we don't leak buffers */
assert(self->buffer == NULL);
free(self);
}
static INLINE void
wasm_memory_delete(struct wasm_memory *self)
{
assert(self != NULL);
wasm_memory_deinit(self);
wasm_memory_free(self);
}
static INLINE void
wasm_memory_wipe(struct wasm_memory *self)
{
memset(self->buffer, 0, self->size);
}
static INLINE int
wasm_memory_expand(struct wasm_memory *self, size_t size_to_expand)
{
size_t target_size = self->size + size_to_expand;
if (unlikely(target_size > self->max)) {
fprintf(stderr, "wasm_memory_expand - Out of Memory!. %lu out of %lu\n", self->size, self->max);
return -1;
}
/* If recycling a wasm_memory from an object pool, a previous execution may have already expanded to or what
* beyond what we need. The capacity represents the "high water mark" of previous executions. If the desired
* size is less than this "high water mark," we just need to update size for accounting purposes. Otherwise, we
* need to actually issue an mprotect syscall. The goal of these optimizations is to reduce mmap and demand
* paging overhead for repeated instantiations of a WebAssembly module. */
if (target_size > self->capacity) {
int rc = mprotect(self->buffer, target_size, PROT_READ | PROT_WRITE);
if (rc != 0) {
perror("wasm_memory_expand mprotect");
return -1;
}
self->capacity = target_size;
}
self->size = target_size;
return 0;
}
static INLINE void
wasm_memory_set_size(struct wasm_memory *self, size_t size)
{
self->size = size;
}
static INLINE size_t
wasm_memory_get_size(struct wasm_memory *self)
{
return self->size;
}
static INLINE void
wasm_memory_initialize_region(struct wasm_memory *self, uint32_t offset, uint32_t region_size, uint8_t region[])
{
assert((size_t)offset + region_size <= self->size);
memcpy(&self->buffer[offset], region, region_size);
}
/* NOTE: These wasm_memory functions require pointer dereferencing. For this reason, they are not directly by wasm32
* instructions. These functions are intended to be used by the runtime to interacts with linear memories. */
/**
* Translates WASM offsets into runtime VM pointers
* @param offset an offset into the WebAssembly linear memory
* @param bounds_check the size of the thing we are pointing to
* @return void pointer to something in WebAssembly linear memory
*/
static INLINE void *
wasm_memory_get_ptr_void(struct wasm_memory *self, uint32_t offset, uint32_t size)
{
assert(offset + size <= self->size);
return (void *)&self->buffer[offset];
}
/**
* Get an ASCII character from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return char at the offset
*/
static INLINE char
wasm_memory_get_char(struct wasm_memory *self, uint32_t offset)
{
assert(offset + sizeof(char) <= self->size);
return *(char *)&self->buffer[offset];
}
/**
* Get an float from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return float at the offset
*/
static INLINE float
wasm_memory_get_f32(struct wasm_memory *self, uint32_t offset)
{
assert(offset + sizeof(float) <= self->size);
return *(float *)&self->buffer[offset];
}
/**
* Get a double from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return double at the offset
*/
static INLINE double
wasm_memory_get_f64(struct wasm_memory *self, uint32_t offset)
{
assert(offset + sizeof(double) <= self->size);
return *(double *)&self->buffer[offset];
}
/**
* Get a int8_t from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return int8_t at the offset
*/
static INLINE int8_t
wasm_memory_get_i8(struct wasm_memory *self, uint32_t offset)
{
assert(offset + sizeof(int8_t) <= self->size);
return *(int8_t *)&self->buffer[offset];
}
/**
* Get a int16_t from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return int16_t at the offset
*/
static INLINE int16_t
wasm_memory_get_i16(struct wasm_memory *self, uint32_t offset)
{
assert(offset + sizeof(int16_t) <= self->size);
return *(int16_t *)&self->buffer[offset];
}
/**
* Get a int32_t from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return int32_t at the offset
*/
static INLINE int32_t
wasm_memory_get_i32(struct wasm_memory *self, uint32_t offset)
{
assert(offset + sizeof(int32_t) <= self->size);
return *(int32_t *)&self->buffer[offset];
}
/**
* Get a int32_t from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return int32_t at the offset
*/
static INLINE int64_t
wasm_memory_get_i64(struct wasm_memory *self, uint32_t offset)
{
assert(offset + sizeof(int64_t) <= self->size);
return *(int64_t *)&self->buffer[offset];
}
static INLINE uint32_t
wasm_memory_get_page_count(struct wasm_memory *self)
{
return (uint32_t)(self->size / WASM_PAGE_SIZE);
}
/**
* Get a null-terminated String from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @param size the maximum expected length in characters
* @return pointer to the string or NULL if max_length is reached without finding null-terminator
*/
static INLINE char *
wasm_memory_get_string(struct wasm_memory *self, uint32_t offset, uint32_t size)
{
assert(offset + (sizeof(char) * size) <= self->size);
if (strnlen((const char *)&self->buffer[offset], size) < size) {
return (char *)&self->buffer[offset];
} else {
return NULL;
}
}
/**
* Write a float to WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return float at the offset
*/
static INLINE void
wasm_memory_set_f32(struct wasm_memory *self, uint32_t offset, float value)
{
assert(offset + sizeof(float) <= self->size);
*(float *)&self->buffer[offset] = value;
}
/**
* Write a double to WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return double at the offset
*/
static INLINE void
wasm_memory_set_f64(struct wasm_memory *self, uint32_t offset, double value)
{
assert(offset + sizeof(double) <= self->size);
*(double *)&self->buffer[offset] = value;
}
/**
* Write a int8_t to WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return int8_t at the offset
*/
static INLINE void
wasm_memory_set_i8(struct wasm_memory *self, uint32_t offset, int8_t value)
{
assert(offset + sizeof(int8_t) <= self->size);
*(int8_t *)&self->buffer[offset] = value;
}
/**
* Write a int16_t to WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return int16_t at the offset
*/
static INLINE void
wasm_memory_set_i16(struct wasm_memory *self, uint32_t offset, int16_t value)
{
assert(offset + sizeof(int16_t) <= self->size);
*(int16_t *)&self->buffer[offset] = value;
}
/**
* Write a int32_t to WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return int32_t at the offset
*/
static INLINE void
wasm_memory_set_i32(struct wasm_memory *self, uint32_t offset, int32_t value)
{
assert(offset + sizeof(int32_t) <= self->size);
*(int32_t *)&self->buffer[offset] = value;
}
/**
* Write a int64_t to WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return int64_t at the offset
*/
static INLINE void
wasm_memory_set_i64(struct wasm_memory *self, uint64_t offset, int64_t value)
{
assert(offset + sizeof(int64_t) <= self->size);
*(int64_t *)&self->buffer[offset] = value;
}

@ -0,0 +1,12 @@
#pragma once
#include "wasm_memory.h"
#include "wasm_table.h"
/* This structure is the runtime representation of the unique state of a module instance
* Currently this is not spec-compliant, as it only supports a single table and a single memory and it excludes many
* entities https://webassembly.github.io/spec/core/exec/runtime.html#module-instances
*/
struct wasm_module_instance {
struct wasm_memory memory;
struct wasm_table *table;
};

@ -0,0 +1,69 @@
#pragma once
#include <stdint.h>
#include <stdlib.h>
#include <sys/mman.h>
#include "sandbox_types.h"
#include "types.h"
struct wasm_stack {
size_t capacity; /* Usable capacity. Excludes size of guard page that we need to free */
uint8_t *high; /* The highest address of the stack. Grows down from here */
uint8_t *low; /* The address of the lowest usabe address. Above guard page */
uint8_t *buffer; /* Points to Guard Page */
};
/**
* Allocates a static sized stack for a sandbox with a guard page underneath
* Because a stack grows down, this protects against stack overflow
* TODO: Should this use MAP_GROWSDOWN to enable demand paging for the stack?
* @param sandbox sandbox that we want to allocate a stack for
* @returns 0 on success, -1 on error
*/
static INLINE int
wasm_stack_allocate(struct wasm_stack *stack, size_t capacity)
{
assert(stack);
int rc = 0;
stack->buffer = (uint8_t *)mmap(NULL, /* guard page */ PAGE_SIZE + capacity, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (unlikely(stack->buffer == MAP_FAILED)) {
perror("sandbox allocate stack");
goto err_stack_allocation_failed;
}
stack->low = (uint8_t *)mmap(stack->buffer + /* guard page */ PAGE_SIZE, capacity, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (unlikely(stack->low == MAP_FAILED)) {
perror("sandbox set stack read/write");
goto err_stack_prot_failed;
}
stack->capacity = capacity;
stack->high = stack->low + capacity;
rc = 0;
done:
return rc;
err_stack_prot_failed:
rc = munmap(stack->buffer, PAGE_SIZE + capacity);
if (rc == -1) perror("munmap");
err_stack_allocation_failed:
stack->buffer = NULL;
rc = -1;
goto done;
}
static INLINE void
wasm_stack_free(struct wasm_stack *stack)
{
assert(stack != NULL);
assert(stack->buffer != NULL);
/* The stack start is the bottom of the usable stack, but we allocated a guard page below this */
int rc = munmap(stack->buffer, stack->capacity + PAGE_SIZE);
stack->buffer = NULL;
if (unlikely(rc == -1)) perror("munmap");
}

@ -0,0 +1,118 @@
#pragma once
#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
#include "types.h"
/* memory also provides the table access functions */
#define INDIRECT_TABLE_SIZE (1 << 10)
struct wasm_table_entry {
uint32_t type_id;
void * func_pointer;
};
struct wasm_table {
uint32_t length;
uint32_t capacity;
struct wasm_table_entry *buffer;
};
static INLINE struct wasm_table *wasm_table_alloc(void);
static INLINE int wasm_table_init(struct wasm_table *self, size_t capacity);
static INLINE struct wasm_table *wasm_table_new(size_t capacity);
static INLINE void wasm_table_deinit(struct wasm_table *self);
static INLINE void wasm_table_free(struct wasm_table *self);
static INLINE void wasm_table_delete(struct wasm_table *self);
static INLINE struct wasm_table *
wasm_table_alloc(void)
{
return (struct wasm_table *)malloc(sizeof(struct wasm_table));
}
static INLINE int
wasm_table_init(struct wasm_table *self, size_t capacity)
{
assert(self != NULL);
if (capacity > 0) {
self->buffer = calloc(capacity, sizeof(struct wasm_table_entry));
if (self->buffer == NULL) return -1;
}
self->capacity = capacity;
self->length = 0;
return 0;
}
static INLINE struct wasm_table *
wasm_table_new(size_t capacity)
{
struct wasm_table *self = wasm_table_alloc();
if (self == NULL) return NULL;
int rc = wasm_table_init(self, capacity);
if (rc < 0) {
wasm_table_free(self);
return NULL;
}
return self;
}
static INLINE void
wasm_table_deinit(struct wasm_table *self)
{
assert(self != NULL);
if (self->capacity > 0) {
assert(self->buffer == NULL);
assert(self->length == 0);
return;
}
assert(self->buffer != NULL);
free(self->buffer);
self->buffer = NULL;
self->length = 0;
self->capacity = 0;
}
static INLINE void
wasm_table_free(struct wasm_table *self)
{
assert(self != NULL);
free(self);
}
static INLINE void *
wasm_table_get(struct wasm_table *self, uint32_t idx, uint32_t type_id)
{
assert(self != NULL);
assert(idx < self->capacity);
struct wasm_table_entry f = self->buffer[idx];
// FIXME: Commented out function type check because of gocr
// assert(f.type_id == type_id);
assert(f.func_pointer != NULL);
return f.func_pointer;
}
static INLINE void
wasm_table_set(struct wasm_table *self, uint32_t idx, uint32_t type_id, char *pointer)
{
assert(self != NULL);
assert(idx < self->capacity);
assert(pointer != NULL);
/* TODO: atomic for multiple concurrent invocations? Issue #97 */
if (self->buffer[idx].type_id == type_id && self->buffer[idx].func_pointer == pointer) return;
self->buffer[idx] = (struct wasm_table_entry){ .type_id = type_id, .func_pointer = pointer };
}

@ -2,16 +2,24 @@
#include <stdint.h>
/* FIXME: per-module configuration? Issue #101 */
#define WASM_PAGE_SIZE (1024 * 64) /* 64KB */
#define WASM_MEMORY_PAGES_INITIAL (1 << 8) /* 256 Pages ~16MB */
#define WASM_MEMORY_PAGES_MAX (1 << 15) /* 32,768 Pages ~4GB */
#define WASM_MEMORY_PAGES_MAX (1 << 16) /* 32,768 Pages ~4GB */
#define WASM_STACK_SIZE (1 << 19) /* 512KB */
#define WASM_STACK_SIZE (1 << 19) /* 512KB */
/* Wasm initialization functions generated by the compiler */
#define AWSM_ABI_INITIALIZE_GLOBALS "populate_globals"
#define AWSM_ABI_INITIALIZE_MEMORY "populate_memory"
#define AWSM_ABI_INITIALIZE_TABLE "populate_table"
#define AWSM_ABI_INITIALIZE_LIBC "wasmf___init_libc"
#define AWSM_ABI_ENTRYPOINT "wasmf_main"
/* bytes, not wasm pages */
struct wasm_memory {
void * start;
uint32_t size;
uint64_t max;
};
#define AWSM_ABI_STARTING_PAGES "starting_pages"
#define AWSM_ABI_MAX_PAGES "max_pages"
/* functions in the module to lookup and call per sandbox. */
typedef void (*awsm_abi_init_globals_fn_t)(void);
typedef void (*awsm_abi_init_mem_fn_t)(void);
typedef void (*awsm_abi_init_tbl_fn_t)(void);
typedef void (*awsm_abi_init_libc_fn_t)(int32_t, int32_t);
typedef int32_t (*awsm_abi_entrypoint_fn_t)(int32_t a, int32_t b);

@ -10,43 +10,3 @@ extern thread_local int worker_thread_epoll_file_descriptor;
extern thread_local int worker_thread_idx;
void *worker_thread_main(void *return_code);
/**
* Translates WASM offsets into runtime VM pointers
* @param offset an offset into the WebAssembly linear memory
* @param bounds_check the size of the thing we are pointing to
* @return void pointer to something in WebAssembly linear memory
*/
static inline void *
worker_thread_get_memory_ptr_void(uint32_t offset, uint32_t bounds_check)
{
return (void *)get_memory_ptr_for_runtime(offset, bounds_check);
}
/**
* Get a single-byte extended ASCII character from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @return char at the offset
*/
static inline char
worker_thread_get_memory_character(uint32_t offset)
{
return get_memory_ptr_for_runtime(offset, 1)[0];
}
/**
* Get a null-terminated String from WebAssembly linear memory
* @param offset an offset into the WebAssembly linear memory
* @param max_length the maximum expected length in characters
* @return pointer to the string or NULL if max_length is reached without finding null-terminator
*/
static inline char *
worker_thread_get_memory_string(uint32_t offset, uint32_t max_length)
{
for (uint32_t i = 0; i < max_length; i++) {
if (worker_thread_get_memory_character(offset + i) == '\0') {
return (char *)worker_thread_get_memory_ptr_void(offset, 1);
}
}
return NULL;
}

@ -1,9 +1,9 @@
#include <threads.h>
#include "current_sandbox.h"
#include "current_sandbox_send_response.h"
#include "sandbox_functions.h"
#include "sandbox_receive_request.h"
#include "sandbox_send_response.h"
#include "sandbox_set_as_asleep.h"
#include "sandbox_set_as_error.h"
#include "sandbox_set_as_returned.h"
@ -16,15 +16,6 @@
thread_local struct sandbox *worker_thread_current_sandbox = NULL;
thread_local struct sandbox_context_cache local_sandbox_context_cache = {
.memory = {
.start = NULL,
.size = 0,
.max = 0,
},
.module_indirect_table = NULL,
};
/**
* @brief Switches from an executing sandbox to the worker thread base context
*
@ -96,10 +87,12 @@ current_sandbox_init()
rc = sandbox_receive_request(sandbox);
if (rc == -2) {
/* Request size exceeded Buffer, send 413 Payload Too Large */
client_socket_send(sandbox->client_socket_descriptor, 413);
client_socket_send(sandbox->client_socket_descriptor, http_header_build(413), http_header_len(413),
current_sandbox_sleep);
goto err;
} else if (rc == -1) {
client_socket_send(sandbox->client_socket_descriptor, 400);
client_socket_send(sandbox->client_socket_descriptor, http_header_build(400), http_header_len(400),
current_sandbox_sleep);
goto err;
}
@ -133,7 +126,7 @@ current_sandbox_fini()
sandbox->timestamp_of.completion = __getcycles();
/* Retrieve the result, construct the HTTP response, and send to client */
if (sandbox_send_response(sandbox) < 0) {
if (current_sandbox_send_response() < 0) {
error_message = "Unable to build and send client response\n";
goto err;
};

@ -0,0 +1,28 @@
#include <stdlib.h>
#include "current_sandbox.h"
#include "wasm_module_instance.h"
#include "wasm_memory.h"
thread_local struct wasm_module_instance current_wasm_module_instance = {
.memory =
(struct wasm_memory){
.size = 0,
.max = 0,
.capacity = 0,
.buffer = NULL,
},
.table = NULL,
};
/**
* Because we copy the members of a sandbox when it is set to current_sandbox, current_wasm_module_instance acts as a
* cache. If we change state by doing something like expanding a member, we have to perform writeback on the sandbox
* member that we copied from.
*/
void
current_wasm_module_instance_memory_writeback(void)
{
struct sandbox *current_sandbox = current_sandbox_get();
memcpy(current_sandbox->memory, &current_wasm_module_instance.memory, sizeof(struct wasm_memory));
}

@ -4,6 +4,7 @@
#include "arch/getcycles.h"
#include "worker_thread.h"
#include "current_sandbox.h"
extern int32_t inner_syscall_handler(int32_t n, int32_t a, int32_t b, int32_t c, int32_t d, int32_t e, int32_t f);
@ -36,7 +37,7 @@ env_a_ctz_64(uint64_t x)
INLINE void
env_a_and_64(int32_t p_off, uint64_t v)
{
uint64_t *p = worker_thread_get_memory_ptr_void(p_off, sizeof(uint64_t));
uint64_t *p = current_sandbox_get_ptr_void(p_off, sizeof(uint64_t));
ck_pr_and_64(p, v);
}
@ -44,7 +45,7 @@ INLINE void
env_a_or_64(int32_t p_off, int64_t v)
{
assert(sizeof(int64_t) == sizeof(uint64_t));
uint64_t *p = worker_thread_get_memory_ptr_void(p_off, sizeof(int64_t));
uint64_t *p = current_sandbox_get_ptr_void(p_off, sizeof(int64_t));
ck_pr_or_64(p, v);
}
@ -52,7 +53,7 @@ int32_t
env_a_cas(int32_t p_off, int32_t t, int32_t s)
{
assert(sizeof(int32_t) == sizeof(volatile int));
int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(int32_t));
int *p = current_sandbox_get_ptr_void(p_off, sizeof(int32_t));
return ck_pr_cas_int(p, t, s);
}
@ -61,7 +62,7 @@ void
env_a_or(int32_t p_off, int32_t v)
{
assert(sizeof(int32_t) == sizeof(volatile int));
int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(int32_t));
int *p = current_sandbox_get_ptr_void(p_off, sizeof(int32_t));
ck_pr_or_int(p, v);
}
@ -69,7 +70,7 @@ int32_t
env_a_swap(int32_t x_off, int32_t v)
{
assert(sizeof(int32_t) == sizeof(volatile int));
int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(int32_t));
int *x = current_sandbox_get_ptr_void(x_off, sizeof(int32_t));
int p;
do {
@ -84,7 +85,7 @@ int32_t
env_a_fetch_add(int32_t x_off, int32_t v)
{
assert(sizeof(int32_t) == sizeof(volatile int));
int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(int32_t));
int *x = current_sandbox_get_ptr_void(x_off, sizeof(int32_t));
return ck_pr_faa_int(x, v);
}
@ -92,7 +93,7 @@ void
env_a_inc(int32_t x_off)
{
assert(sizeof(int32_t) == sizeof(volatile int));
int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(int32_t));
int *x = current_sandbox_get_ptr_void(x_off, sizeof(int32_t));
ck_pr_inc_int(x);
}
@ -100,7 +101,7 @@ void
env_a_dec(int32_t x_off)
{
assert(sizeof(int32_t) == sizeof(volatile int));
int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(int32_t));
int *x = (int *)current_sandbox_get_ptr_void(x_off, sizeof(int32_t));
ck_pr_dec_int(x);
}
@ -108,7 +109,7 @@ void
env_a_store(int32_t p_off, int32_t x)
{
assert(sizeof(int32_t) == sizeof(volatile int));
int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(int32_t));
int *p = (int *)current_sandbox_get_ptr_void(p_off, sizeof(int32_t));
ck_pr_store_int(p, x);
}

@ -4,14 +4,14 @@
#include "panic.h"
/* Default uninitialized implementations of the polymorphic interface */
noreturn static struct sandbox_request *
noreturn static struct sandbox *
uninitialized_add(void *arg)
{
panic("Global Request Scheduler Add was called before initialization\n");
}
noreturn static int
uninitialized_remove(struct sandbox_request **arg)
uninitialized_remove(struct sandbox **arg)
{
panic("Global Request Scheduler Remove was called before initialization\n");
}
@ -41,44 +41,44 @@ global_request_scheduler_initialize(struct global_request_scheduler_config *conf
/**
* Adds a sandbox request to the request scheduler
* @param sandbox_request
* Adds a sandbox to the request scheduler
* @param sandbox
*/
struct sandbox_request *
global_request_scheduler_add(struct sandbox_request *sandbox_request)
struct sandbox *
global_request_scheduler_add(struct sandbox *sandbox)
{
assert(sandbox_request != NULL);
return global_request_scheduler.add_fn(sandbox_request);
assert(sandbox != NULL);
return global_request_scheduler.add_fn(sandbox);
}
/**
* Removes a sandbox request according to the scheduling policy of the variant
* Removes a sandbox according to the scheduling policy of the variant
* @param removed_sandbox where to write the adddress of the removed sandbox
* @returns 0 if successfully returned a sandbox request, -ENOENT if empty, -EAGAIN if atomic operation unsuccessful
* @returns 0 if successfully returned a sandbox, -ENOENT if empty, -EAGAIN if atomic operation unsuccessful
*/
int
global_request_scheduler_remove(struct sandbox_request **removed_sandbox)
global_request_scheduler_remove(struct sandbox **removed_sandbox)
{
assert(removed_sandbox != NULL);
return global_request_scheduler.remove_fn(removed_sandbox);
}
/**
* Removes a sandbox request according to the scheduling policy of the variant
* Removes a sandbox according to the scheduling policy of the variant
* @param removed_sandbox where to write the adddress of the removed sandbox
* @param target_deadline the deadline that must be validated before dequeuing
* @returns 0 if successfully returned a sandbox request, -ENOENT if empty or if no element meets target_deadline,
* @returns 0 if successfully returned a sandbox, -ENOENT if empty or if no element meets target_deadline,
* -EAGAIN if atomic operation unsuccessful
*/
int
global_request_scheduler_remove_if_earlier(struct sandbox_request **removed_sandbox, uint64_t target_deadline)
global_request_scheduler_remove_if_earlier(struct sandbox **removed_sandbox, uint64_t target_deadline)
{
assert(removed_sandbox != NULL);
return global_request_scheduler.remove_if_earlier_fn(removed_sandbox, target_deadline);
}
/**
* Peeks at the priority of the highest priority sandbox request
* Peeks at the priority of the highest priority sandbox
* @returns highest priority
*/
uint64_t

@ -1,46 +1,48 @@
#include "global_request_scheduler.h"
#include "global_request_scheduler_deque.h"
#include "runtime.h"
#define GLOBAL_REQUEST_SCHEDULER_DEQUE_CAPACITY (1 << 19)
static struct deque_sandbox *global_request_scheduler_deque;
/* TODO: Should this be used??? */
static pthread_mutex_t global_request_scheduler_deque_mutex = PTHREAD_MUTEX_INITIALIZER;
/**
* Pushes a sandbox request to the global deque
* @param sandbox_request
* Pushes a sandbox to the global deque
* @param sandbox_raw
* @returns pointer to request if added. NULL otherwise
*/
static struct sandbox_request *
global_request_scheduler_deque_add(void *sandbox_request_raw)
static struct sandbox *
global_request_scheduler_deque_add(void *sandbox_raw)
{
struct sandbox_request *sandbox_request = (struct sandbox_request *)sandbox_request_raw;
int return_code = 1;
struct sandbox *sandbox = (struct sandbox *)sandbox_raw;
int return_code = 1;
return_code = deque_push_sandbox(global_request_scheduler_deque, &sandbox_request);
return_code = deque_push_sandbox(global_request_scheduler_deque, &sandbox);
if (return_code != 0) return NULL;
return sandbox_request_raw;
return sandbox_raw;
}
/**
* Stealing from the dequeue is a lock-free, cross-core "pop", which removes the element from the end opposite to
* "pop". Because the producer and consumer (the core stealine the sandbox request) modify different ends,
* "pop". Because the producer and consumer (the core stealine the sandbox) modify different ends,
* no locks are required, and coordination is achieved by instead retrying on inconsistent indices.
*
* Relevant Read: https://www.dre.vanderbilt.edu/~schmidt/PDF/work-stealing-dequeue.pdf
*
* @returns 0 if successfully returned a sandbox request, -ENOENT if empty, -EAGAIN if atomic instruction unsuccessful
* @returns 0 if successfully returned a sandbox, -ENOENT if empty, -EAGAIN if atomic instruction unsuccessful
*/
static int
global_request_scheduler_deque_remove(struct sandbox_request **removed_sandbox_request)
global_request_scheduler_deque_remove(struct sandbox **removed_sandbox)
{
return deque_steal_sandbox(global_request_scheduler_deque, removed_sandbox_request);
return deque_steal_sandbox(global_request_scheduler_deque, removed_sandbox);
}
static int
global_request_scheduler_deque_remove_if_earlier(struct sandbox_request **removed_sandbox_request,
uint64_t target_deadline)
global_request_scheduler_deque_remove_if_earlier(struct sandbox **removed_sandbox, uint64_t target_deadline)
{
panic("Deque variant does not support this call\n");
return -1;
@ -53,7 +55,7 @@ global_request_scheduler_deque_initialize()
global_request_scheduler_deque = (struct deque_sandbox *)malloc(sizeof(struct deque_sandbox));
assert(global_request_scheduler_deque);
/* Note: Below is a Macro */
deque_init_sandbox(global_request_scheduler_deque, RUNTIME_MAX_SANDBOX_REQUEST_COUNT);
deque_init_sandbox(global_request_scheduler_deque, GLOBAL_REQUEST_SCHEDULER_DEQUE_CAPACITY);
/* Register Function Pointers for Abstract Scheduling API */
struct global_request_scheduler_config config = {

@ -10,43 +10,42 @@
static struct priority_queue *global_request_scheduler_minheap;
/**
* Pushes a sandbox request to the global deque
* @param sandbox_request
* @returns pointer to request if added. NULL otherwise
* Pushes a sandbox to the global deque
* @param sandbox
* @returns pointer to request if added. Panics runtime otherwise
*/
static struct sandbox_request *
global_request_scheduler_minheap_add(void *sandbox_request)
static struct sandbox *
global_request_scheduler_minheap_add(void *sandbox_raw)
{
assert(sandbox_request);
assert(sandbox_raw);
assert(global_request_scheduler_minheap);
if (unlikely(!listener_thread_is_running())) panic("%s is only callable by the listener thread\n", __func__);
int return_code = priority_queue_enqueue(global_request_scheduler_minheap, sandbox_request);
int return_code = priority_queue_enqueue(global_request_scheduler_minheap, sandbox_raw);
/* TODO: Propagate -1 to caller. Issue #91 */
if (return_code == -ENOSPC) panic("Request Queue is full\n");
return sandbox_request;
return (struct sandbox *)sandbox_raw;
}
/**
* @param pointer to the pointer that we want to set to the address of the removed sandbox request
* @param pointer to the pointer that we want to set to the address of the removed sandbox
* @returns 0 if successful, -ENOENT if empty
*/
int
global_request_scheduler_minheap_remove(struct sandbox_request **removed_sandbox_request)
global_request_scheduler_minheap_remove(struct sandbox **removed_sandbox)
{
return priority_queue_dequeue(global_request_scheduler_minheap, (void **)removed_sandbox_request);
return priority_queue_dequeue(global_request_scheduler_minheap, (void **)removed_sandbox);
}
/**
* @param removed_sandbox_request pointer to set to removed sandbox request
* @param removed_sandbox pointer to set to removed sandbox
* @param target_deadline the deadline that the request must be earlier than to dequeue
* @returns 0 if successful, -ENOENT if empty or if request isn't earlier than target_deadline
*/
int
global_request_scheduler_minheap_remove_if_earlier(struct sandbox_request **removed_sandbox_request,
uint64_t target_deadline)
global_request_scheduler_minheap_remove_if_earlier(struct sandbox **removed_sandbox, uint64_t target_deadline)
{
return priority_queue_dequeue_if_earlier(global_request_scheduler_minheap, (void **)removed_sandbox_request,
return priority_queue_dequeue_if_earlier(global_request_scheduler_minheap, (void **)removed_sandbox,
target_deadline);
}
@ -63,10 +62,10 @@ global_request_scheduler_minheap_peek(void)
}
uint64_t
sandbox_request_get_priority_fn(void *element)
sandbox_get_priority_fn(void *element)
{
struct sandbox_request *sandbox_request = (struct sandbox_request *)element;
return sandbox_request->absolute_deadline;
struct sandbox *sandbox = (struct sandbox *)element;
return sandbox->absolute_deadline;
};
@ -76,7 +75,7 @@ sandbox_request_get_priority_fn(void *element)
void
global_request_scheduler_minheap_initialize()
{
global_request_scheduler_minheap = priority_queue_initialize(4096, true, sandbox_request_get_priority_fn);
global_request_scheduler_minheap = priority_queue_initialize(4096, true, sandbox_get_priority_fn);
struct global_request_scheduler_config config = {
.add_fn = global_request_scheduler_minheap_add,

@ -14,6 +14,7 @@
#include "scheduler.h"
#include "sandbox_functions.h"
#include "worker_thread.h"
#include "wasm_module_instance.h"
// What should we tell the child program its UID and GID are?
#define UID 0xFF
@ -39,11 +40,12 @@
void
stub_init(int32_t offset)
{
struct sandbox *current_sandbox = current_sandbox_get();
// What program name will we put in the auxiliary vectors
char *program_name = current_sandbox_get()->module->name;
char *program_name = current_sandbox->module->name;
// Copy the program name into WASM accessible memory
int32_t program_name_offset = offset;
strcpy(get_memory_ptr_for_runtime(offset, sizeof(program_name)), program_name);
strcpy(wasm_memory_get_ptr_void(current_sandbox->memory, offset, sizeof(program_name)), program_name);
offset += sizeof(program_name);
// The construction of this is:
@ -69,7 +71,8 @@ stub_init(int32_t offset)
0,
};
int32_t env_vec_offset = offset;
memcpy(get_memory_ptr_for_runtime(env_vec_offset, sizeof(env_vec)), env_vec, sizeof(env_vec));
memcpy(wasm_memory_get_ptr_void(current_sandbox->memory, env_vec_offset, sizeof(env_vec)), env_vec,
sizeof(env_vec));
module_initialize_libc(current_sandbox_get()->module, env_vec_offset, program_name_offset);
}
@ -92,7 +95,7 @@ wasm_read(int32_t filedes, int32_t buf_offset, int32_t nbyte)
/* Non-blocking copy on stdin */
if (filedes == 0) {
char * buffer = worker_thread_get_memory_ptr_void(buf_offset, nbyte);
char * buffer = current_sandbox_get_ptr_void(buf_offset, nbyte);
struct http_request *current_request = &current_sandbox->http_request;
if (current_request->body_length <= 0) return 0;
int bytes_to_read = nbyte > current_request->body_length ? current_request->body_length : nbyte;
@ -102,7 +105,7 @@ wasm_read(int32_t filedes, int32_t buf_offset, int32_t nbyte)
return bytes_to_read;
}
char *buf = worker_thread_get_memory_ptr_void(buf_offset, nbyte);
char *buf = current_sandbox_get_ptr_void(buf_offset, nbyte);
int32_t res = 0;
while (res < nbyte) {
@ -132,18 +135,19 @@ err:
int32_t
wasm_write(int32_t fd, int32_t buf_offset, int32_t buf_size)
{
struct sandbox *s = current_sandbox_get();
char * buffer = worker_thread_get_memory_ptr_void(buf_offset, buf_size);
struct sandbox *s = current_sandbox_get();
char * buffer = current_sandbox_get_ptr_void(buf_offset, buf_size);
struct vec_u8 * response = &s->response;
if (fd == STDERR_FILENO) { write(STDERR_FILENO, buffer, buf_size); }
if (fd == STDOUT_FILENO) {
int buffer_remaining = s->module->max_response_size - s->response.length;
int buffer_remaining = response->capacity - response->length;
int to_write = buffer_remaining > buf_size ? buf_size : buffer_remaining;
if (to_write == 0) return 0;
memcpy(&s->response.base[s->response.length], buffer, to_write);
s->response.length += to_write;
memcpy(&response->buffer[response->length], buffer, to_write);
response->length += to_write;
return to_write;
}
@ -173,7 +177,7 @@ err:
int32_t
wasm_open(int32_t path_off, int32_t flags, int32_t mode)
{
char *path = worker_thread_get_memory_string(path_off, MODULE_MAX_PATH_LENGTH);
char *path = current_sandbox_get_string(path_off, MODULE_MAX_PATH_LENGTH);
int res = ENOTSUP;
@ -219,8 +223,8 @@ wasm_mmap(int32_t addr, int32_t len, int32_t prot, int32_t flags, int32_t fd, in
assert(len % WASM_PAGE_SIZE == 0);
int32_t result = local_sandbox_context_cache.memory.size;
for (int i = 0; i < len / WASM_PAGE_SIZE; i++) { expand_memory(); }
int32_t result = wasm_memory_get_size(&current_wasm_module_instance.memory);
if (wasm_memory_expand(&current_wasm_module_instance.memory, len) == -1) { result = (uint32_t)-1; }
return result;
}
@ -252,7 +256,7 @@ int32_t
wasm_readv(int32_t fd, int32_t iov_offset, int32_t iovcnt)
{
int32_t read = 0;
struct wasm_iovec *iov = worker_thread_get_memory_ptr_void(iov_offset, iovcnt * sizeof(struct wasm_iovec));
struct wasm_iovec *iov = current_sandbox_get_ptr_void(iov_offset, iovcnt * sizeof(struct wasm_iovec));
for (int i = 0; i < iovcnt; i++) { read += wasm_read(fd, iov[i].base_offset, iov[i].len); }
return read;
@ -266,8 +270,7 @@ wasm_writev(int32_t fd, int32_t iov_offset, int32_t iovcnt)
if (fd == STDOUT_FILENO || fd == STDERR_FILENO) {
// both 1 and 2 go to client.
int len = 0;
struct wasm_iovec *iov = worker_thread_get_memory_ptr_void(iov_offset,
iovcnt * sizeof(struct wasm_iovec));
struct wasm_iovec *iov = current_sandbox_get_ptr_void(iov_offset, iovcnt * sizeof(struct wasm_iovec));
for (int i = 0; i < iovcnt; i++) { len += wasm_write(fd, iov[i].base_offset, iov[i].len); }
return len;
@ -277,7 +280,7 @@ wasm_writev(int32_t fd, int32_t iov_offset, int32_t iovcnt)
assert(0);
struct wasm_iovec *iov = worker_thread_get_memory_ptr_void(iov_offset, iovcnt * sizeof(struct wasm_iovec));
struct wasm_iovec *iov = current_sandbox_get_ptr_void(iov_offset, iovcnt * sizeof(struct wasm_iovec));
// If we aren't on MUSL, pass writev to printf if possible
#if defined(__GLIBC__)
@ -285,7 +288,7 @@ wasm_writev(int32_t fd, int32_t iov_offset, int32_t iovcnt)
int sum = 0;
for (int i = 0; i < iovcnt; i++) {
int32_t len = iov[i].len;
void * ptr = worker_thread_get_memory_ptr_void(iov[i].base_offset, len);
void * ptr = current_sandbox_get_ptr_void(iov[i].base_offset, len);
printf("%.*s", len, (char *)ptr);
sum += len;
@ -297,7 +300,7 @@ wasm_writev(int32_t fd, int32_t iov_offset, int32_t iovcnt)
struct iovec vecs[iovcnt];
for (int i = 0; i < iovcnt; i++) {
int32_t len = iov[i].len;
void * ptr = worker_thread_get_memory_ptr_void(iov[i].base_offset, len);
void * ptr = current_sandbox_get_ptr_void(iov[i].base_offset, len);
vecs[i] = (struct iovec){ ptr, len };
}
@ -318,25 +321,20 @@ wasm_mremap(int32_t offset, int32_t old_size, int32_t new_size, int32_t flags)
if (new_size <= old_size) return offset;
// If at end of linear memory, just expand and return same address
if (offset + old_size == local_sandbox_context_cache.memory.size) {
int32_t amount_to_expand = new_size - old_size;
int32_t pages_to_allocate = amount_to_expand / WASM_PAGE_SIZE;
if (amount_to_expand % WASM_PAGE_SIZE > 0) pages_to_allocate++;
for (int i = 0; i < pages_to_allocate; i++) expand_memory();
if (offset + old_size == current_wasm_module_instance.memory.size) {
int32_t amount_to_expand = new_size - old_size;
wasm_memory_expand(&current_wasm_module_instance.memory, amount_to_expand);
return offset;
}
// Otherwise allocate at end of address space and copy
int32_t pages_to_allocate = new_size / WASM_PAGE_SIZE;
if (new_size % WASM_PAGE_SIZE > 0) pages_to_allocate++;
int32_t new_offset = local_sandbox_context_cache.memory.size;
for (int i = 0; i < pages_to_allocate; i++) expand_memory();
int32_t new_offset = current_wasm_module_instance.memory.size;
wasm_memory_expand(&current_wasm_module_instance.memory, new_size);
// Get pointer of old offset and pointer of new offset
char *linear_mem = local_sandbox_context_cache.memory.start;
char *src = &linear_mem[offset];
char *dest = &linear_mem[new_offset];
uint8_t *linear_mem = current_wasm_module_instance.memory.buffer;
uint8_t *src = &linear_mem[offset];
uint8_t *dest = &linear_mem[new_offset];
// Copy Values. We can use memcpy because we don't overlap
memcpy((void *)dest, (void *)src, old_size);
@ -384,8 +382,7 @@ wasm_get_time(int32_t clock_id, int32_t timespec_off)
assert(0);
}
struct wasm_time_spec *timespec = worker_thread_get_memory_ptr_void(timespec_off,
sizeof(struct wasm_time_spec));
struct wasm_time_spec *timespec = current_sandbox_get_ptr_void(timespec_off, sizeof(struct wasm_time_spec));
struct timespec native_timespec = { 0, 0 };
int res = clock_gettime(real_clock, &native_timespec);

@ -7,6 +7,7 @@
#include "generic_thread.h"
#include "listener_thread.h"
#include "runtime.h"
#include "sandbox_functions.h"
/*
* Descriptor of the epoll instance used to monitor the socket descriptors of registered
@ -169,21 +170,27 @@ listener_thread_main(void *dummy)
*/
uint64_t work_admitted = admissions_control_decide(module->admissions_info.estimate);
if (work_admitted == 0) {
client_socket_send(client_socket, 503);
client_socket_send_oneshot(client_socket, http_header_build(503),
http_header_len(503));
if (unlikely(close(client_socket) < 0))
debuglog("Error closing client socket - %s", strerror(errno));
continue;
}
/* Allocate a Sandbox Request */
struct sandbox_request *sandbox_request =
sandbox_request_allocate(module, client_socket,
(const struct sockaddr *)&client_address,
request_arrival_timestamp, work_admitted);
/* Allocate a Sandbox */
struct sandbox *sandbox = sandbox_new(module, client_socket,
(const struct sockaddr *)&client_address,
request_arrival_timestamp, work_admitted);
if (unlikely(sandbox == NULL)) {
client_socket_send_oneshot(sandbox->client_socket_descriptor,
http_header_build(503), http_header_len(503));
client_socket_close(sandbox->client_socket_descriptor,
&sandbox->client_address);
}
/* Add to the Global Sandbox Request Scheduler */
global_request_scheduler_add(sandbox_request);
global_request_scheduler_add(sandbox);
} /* while true */
} /* for loop */

@ -297,7 +297,7 @@ log_compiletime_config()
pretty_print_key_disabled("Log Preemption");
#endif
#ifdef LOG_REQUEST_ALLOCATION
#ifdef LOG_SANDBOX_ALLOCATION
pretty_print_key_enabled("Log Request Allocation");
#else
pretty_print_key_disabled("Log Request Allocation");
@ -321,10 +321,10 @@ log_compiletime_config()
pretty_print_key_disabled("Log Total Reqs/Resps");
#endif
#ifdef LOG_SANDBOX_COUNT
pretty_print_key_enabled("Log Sandbox Count");
#ifdef SANDBOX_STATE_TOTALS
pretty_print_key_enabled("Log Sandbox State Count");
#else
pretty_print_key_disabled("Log Sandbox Count");
pretty_print_key_disabled("Log Sandbox State Count");
#endif
#ifdef LOG_LOCAL_RUNQUEUE

@ -1,90 +0,0 @@
#include "current_sandbox.h"
#include "panic.h"
#include "runtime.h"
#include "sandbox_types.h"
#include "types.h"
#include <sys/mman.h>
/**
* @brief Expand the linear memory of the active WebAssembly sandbox by a single page
*
* @return int
*/
int
expand_memory(void)
{
struct sandbox *sandbox = current_sandbox_get();
assert(sandbox->state == SANDBOX_RUNNING_USER || sandbox->state == SANDBOX_RUNNING_SYS);
assert(local_sandbox_context_cache.memory.size % WASM_PAGE_SIZE == 0);
/* Return -1 if we've hit the linear memory max */
if (unlikely(local_sandbox_context_cache.memory.size + WASM_PAGE_SIZE
>= local_sandbox_context_cache.memory.max)) {
debuglog("expand_memory - Out of Memory!. %u out of %lu\n", local_sandbox_context_cache.memory.size,
local_sandbox_context_cache.memory.max);
return -1;
}
// Remap the relevant wasm page to readable
char *mem_as_chars = local_sandbox_context_cache.memory.start;
char *page_address = &mem_as_chars[local_sandbox_context_cache.memory.size];
void *map_result = mmap(page_address, WASM_PAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (map_result == MAP_FAILED) {
debuglog("Mapping of new memory failed");
return -1;
}
local_sandbox_context_cache.memory.size += WASM_PAGE_SIZE;
#ifdef LOG_SANDBOX_MEMORY_PROFILE
// Cache the runtime of the first N page allocations
if (likely(sandbox->timestamp_of.page_allocations_size < SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT)) {
sandbox->timestamp_of.page_allocations[sandbox->timestamp_of.page_allocations_size++] =
sandbox->duration_of_state.running
+ (uint32_t)(__getcycles() - sandbox->timestamp_of.last_state_change);
}
#endif
// local_sandbox_context_cache is "forked state", so update authoritative member
sandbox->memory.size = local_sandbox_context_cache.memory.size;
return 0;
}
INLINE char *
get_memory_ptr_for_runtime(uint32_t offset, uint32_t bounds_check)
{
// Due to how we setup memory for x86, the virtual memory mechanism will catch the error, if bounds <
// WASM_PAGE_SIZE
assert(bounds_check < WASM_PAGE_SIZE
|| (local_sandbox_context_cache.memory.size > bounds_check
&& offset <= local_sandbox_context_cache.memory.size - bounds_check));
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
char *address = &mem_as_chars[offset];
return address;
}
/**
* @brief Stub that implements the WebAssembly memory.grow instruction
*
* @param count number of pages to grow the WebAssembly linear memory by
* @return The previous size of the linear memory in pages or -1 if enough memory cannot be allocated
*/
int32_t
instruction_memory_grow(uint32_t count)
{
int rc = local_sandbox_context_cache.memory.size / WASM_PAGE_SIZE;
for (int i = 0; i < count; i++) {
if (unlikely(expand_memory() != 0)) {
rc = -1;
break;
}
}
return rc;
}

@ -1,22 +0,0 @@
#include <assert.h>
#include <string.h>
#include "runtime.h"
#include "types.h"
/* Region initialization helper function */
EXPORT void
initialize_region(uint32_t offset, uint32_t data_count, char *data)
{
assert(local_sandbox_context_cache.memory.size >= data_count);
assert(offset < local_sandbox_context_cache.memory.size - data_count);
memcpy(get_memory_ptr_for_runtime(offset, data_count), data, data_count);
}
/* If we are using runtime globals, we need to populate them */
WEAK void
populate_globals()
{
assert(0); /* FIXME: is this used in WASM as dynamic modules? Issue #105. */
}

@ -16,6 +16,7 @@
#include "panic.h"
#include "runtime.h"
#include "scheduler.h"
#include "wasm_table.h"
const int JSON_MAX_ELEMENT_COUNT = 16;
const int JSON_MAX_ELEMENT_SIZE = 1024;
@ -182,6 +183,11 @@ module_new(char *name, char *path, uint32_t stack_size, uint32_t max_memory, uin
admissions_info_initialize(&module->admissions_info, admissions_percentile, expected_execution,
module->relative_deadline);
/* WebAssembly Indirect Table */
/* TODO: Should this be part of the module or per-sandbox? */
/* TODO: How should this table be sized? */
module->indirect_table = wasm_table_new(INDIRECT_TABLE_SIZE);
/* Request Response Buffer */
if (request_size == 0) request_size = MODULE_DEFAULT_REQUEST_RESPONSE_SIZE;
if (response_size == 0) response_size = MODULE_DEFAULT_REQUEST_RESPONSE_SIZE;
@ -189,22 +195,22 @@ module_new(char *name, char *path, uint32_t stack_size, uint32_t max_memory, uin
module->max_response_size = round_up_to_page(response_size);
/* Table initialization calls a function that runs within the sandbox. Rather than setting the current sandbox,
* we partially fake this out by only setting the module_indirect_table and then clearing after table
* we partially fake this out by only setting the table and then clearing after table
* initialization is complete.
*
* assumption: This approach depends on module_new only being invoked at program start before preemption is
* enabled. We are check that local_sandbox_context_cache.module_indirect_table is NULL to gain confidence that
* enabled. We are check that current_wasm_module_instance.table is NULL to gain confidence that
* we are not invoking this in a way that clobbers a current module.
*
* If we want to be able to do this later, we can possibly defer module_initialize_table until the first
* invocation. Alternatively, we can maintain the module_indirect_table per sandbox and call initialize
* invocation. Alternatively, we can maintain the table per sandbox and call initialize
* on each sandbox if this "assumption" is too restrictive and we're ready to pay a per-sandbox performance hit.
*/
assert(local_sandbox_context_cache.module_indirect_table == NULL);
local_sandbox_context_cache.module_indirect_table = module->indirect_table;
assert(current_wasm_module_instance.table == NULL);
current_wasm_module_instance.table = module->indirect_table;
module_initialize_table(module);
local_sandbox_context_cache.module_indirect_table = NULL;
current_wasm_module_instance.table = NULL;
/* Start listening for requests */
rc = module_listen(module);

@ -20,7 +20,7 @@
#include "listener_thread.h"
#include "module.h"
#include "runtime.h"
#include "sandbox_request.h"
#include "sandbox_total.h"
#include "scheduler.h"
#include "software_interrupt.h"
@ -101,8 +101,8 @@ runtime_initialize(void)
memset(runtime_worker_threads_deadline, UINT8_MAX, runtime_worker_threads_count * sizeof(uint64_t));
http_total_init();
sandbox_request_count_initialize();
sandbox_count_initialize();
sandbox_total_initialize();
sandbox_state_totals_initialize();
/* Setup Scheduler */
scheduler_initialize();

@ -5,87 +5,48 @@
#include "current_sandbox.h"
#include "debuglog.h"
#include "panic.h"
#include "runtime.h"
#include "sandbox_functions.h"
#include "sandbox_set_as_error.h"
#include "sandbox_set_as_initialized.h"
#include "sandbox_set_as_allocated.h"
#include "sandbox_total.h"
#include "wasm_memory.h"
#include "wasm_stack.h"
_Atomic uint32_t sandbox_total = 0;
static inline void
sandbox_log_allocation(struct sandbox *sandbox)
{
#ifdef LOG_SANDBOX_ALLOCATION
debuglog("Sandbox %lu: of %s:%d\n", sandbox->id, sandbox->module->name, sandbox->module->port);
#endif
}
/**
* Allocates a WebAssembly sandbox represented by the following layout
* struct sandbox | HTTP Req Buffer | HTTP Resp Buffer | 4GB of Wasm Linear Memory | Guard Page
* @param module the module that we want to run
* @returns the resulting sandbox or NULL if mmap failed
* Allocates a WebAssembly linear memory for a sandbox based on the starting_pages and max_pages globals present in
* the associated *.so module
* @param sandbox sandbox that we want to allocate a linear memory for
* @returns 0 on success, -1 on error
*/
static inline struct sandbox *
sandbox_allocate_memory(struct module *module)
static inline int
sandbox_allocate_linear_memory(struct sandbox *sandbox)
{
assert(module != NULL);
char * error_message = NULL;
unsigned long memory_size = WASM_PAGE_SIZE * WASM_MEMORY_PAGES_INITIAL; /* The initial pages */
uint64_t memory_max = (uint64_t)WASM_PAGE_SIZE * WASM_MEMORY_PAGES_MAX;
struct sandbox *sandbox = NULL;
unsigned long page_aligned_sandbox_size = round_up_to_page(sizeof(struct sandbox));
unsigned long size_to_alloc = page_aligned_sandbox_size + module->max_request_size + module->max_request_size
+ memory_max + /* guard page */ PAGE_SIZE;
unsigned long size_to_read_write = page_aligned_sandbox_size + module->max_request_size
+ module->max_request_size + memory_size;
/*
* Control information should be page-aligned
*/
assert(round_up_to_page(size_to_alloc) == size_to_alloc);
/* At an address of the system's choosing, allocate the memory, marking it as inaccessible */
errno = 0;
void *addr = mmap(NULL, size_to_alloc, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
error_message = "sandbox_allocate_memory - memory allocation failed";
goto alloc_failed;
}
assert(addr != NULL);
/* Set the struct sandbox, HTTP Req/Resp buffer, and the initial Wasm Pages as read/write */
errno = 0;
void *addr_rw = mmap(addr, size_to_read_write, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-1, 0);
if (addr_rw == MAP_FAILED) {
error_message = "set to r/w";
goto set_rw_failed;
}
sandbox = (struct sandbox *)addr_rw;
/* Populate Sandbox members */
sandbox->state = SANDBOX_UNINITIALIZED;
sandbox->module = module;
module_acquire(module);
assert(sandbox != NULL);
sandbox->request.base = (char *)addr + page_aligned_sandbox_size;
sandbox->request.length = 0;
char *error_message = NULL;
sandbox->response.base = (char *)addr + page_aligned_sandbox_size + module->max_request_size;
sandbox->request.length = 0;
size_t initial = (size_t)sandbox->module->abi.starting_pages * WASM_PAGE_SIZE;
size_t max = (size_t)sandbox->module->abi.max_pages * WASM_PAGE_SIZE;
sandbox->memory.start = (char *)addr + page_aligned_sandbox_size + module->max_request_size
+ module->max_request_size;
sandbox->memory.size = memory_size;
sandbox->memory.max = memory_max;
assert(initial <= (size_t)UINT32_MAX + 1);
assert(max <= (size_t)UINT32_MAX + 1);
memset(&sandbox->duration_of_state, 0, SANDBOX_STATE_COUNT * sizeof(uint64_t));
sandbox->memory = wasm_memory_new(initial, max);
if (unlikely(sandbox->memory == NULL)) return -1;
done:
return sandbox;
set_rw_failed:
sandbox = NULL;
errno = 0;
int rc = munmap(addr, size_to_alloc);
if (rc == -1) perror("Failed to munmap after fail to set r/w");
alloc_failed:
err:
perror(error_message);
goto done;
return 0;
}
static inline int
@ -94,58 +55,71 @@ sandbox_allocate_stack(struct sandbox *sandbox)
assert(sandbox);
assert(sandbox->module);
int rc = 0;
return wasm_stack_allocate(&sandbox->stack, sandbox->module->stack_size);
}
char *addr = mmap(NULL, sandbox->module->stack_size + /* guard page */ PAGE_SIZE, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (unlikely(addr == MAP_FAILED)) {
perror("sandbox allocate stack");
goto err_stack_allocation_failed;
}
static inline void
sandbox_free_stack(struct sandbox *sandbox)
{
assert(sandbox);
/* Set the struct sandbox, HTTP Req/Resp buffer, and the initial Wasm Pages as read/write */
char *addr_rw = mmap(addr + /* guard page */ PAGE_SIZE, sandbox->module->stack_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (unlikely(addr_rw == MAP_FAILED)) {
perror("sandbox set stack read/write");
goto err_stack_allocation_failed;
return wasm_stack_free(&sandbox->stack);
}
/**
* Allocate http request and response buffers for a sandbox
* @param sandbox sandbox that we want to allocate HTTP buffers for
* @returns 0 on success, -1 on error
*/
static inline int
sandbox_allocate_http_buffers(struct sandbox *self)
{
int rc;
rc = vec_u8_init(&self->request, self->module->max_request_size);
if (rc < 0) return -1;
rc = vec_u8_init(&self->response, self->module->max_response_size);
if (rc < 0) {
vec_u8_deinit(&self->request);
return -1;
}
sandbox->stack.start = addr_rw;
sandbox->stack.size = sandbox->module->stack_size;
return 0;
}
rc = 0;
done:
return rc;
err_stack_prot_failed:
rc = munmap(addr, sandbox->stack.size + PAGE_SIZE);
if (rc == -1) perror("munmap");
err_stack_allocation_failed:
sandbox->stack.start = NULL;
sandbox->stack.size = 0;
goto done;
static inline struct sandbox *
sandbox_allocate(void)
{
struct sandbox *sandbox = NULL;
size_t page_aligned_sandbox_size = round_up_to_page(sizeof(struct sandbox));
sandbox = calloc(1, page_aligned_sandbox_size);
sandbox_set_as_allocated(sandbox);
return sandbox;
}
/**
* Allocates a new sandbox from a sandbox request
* Frees the sandbox request on success
* @param sandbox_request request being allocated
* @returns sandbox * on success, NULL on error
* Allocates HTTP buffers and performs our approximation of "WebAssembly instantiation"
* @param sandbox
* @returns 0 on success, -1 on error
*/
struct sandbox *
sandbox_allocate(struct sandbox_request *sandbox_request)
int
sandbox_prepare_execution_environment(struct sandbox *sandbox)
{
/* Validate Arguments */
assert(sandbox_request != NULL);
assert(sandbox != NULL);
char * error_message = "";
uint64_t now = __getcycles();
struct sandbox *sandbox;
char * error_message = "";
uint64_t now = __getcycles();
int rc;
if (sandbox_allocate_http_buffers(sandbox)) {
error_message = "failed to allocate http buffers";
goto err_http_allocation_failed;
}
/* Allocate Sandbox control structures, buffers, and linear memory in a 4GB address space */
sandbox = sandbox_allocate_memory(sandbox_request->module);
if (!sandbox) {
error_message = "failed to allocate sandbox heap and linear memory";
/* Allocate linear memory in a 4GB address space */
if (sandbox_allocate_linear_memory(sandbox)) {
error_message = "failed to allocate sandbox linear memory";
goto err_memory_allocation_failed;
}
@ -154,38 +128,76 @@ sandbox_allocate(struct sandbox_request *sandbox_request)
error_message = "failed to allocate sandbox stack";
goto err_stack_allocation_failed;
}
sandbox->state = SANDBOX_ALLOCATED;
#ifdef LOG_STATE_CHANGES
sandbox->state_history_count = 0;
sandbox->state_history[sandbox->state_history_count++] = SANDBOX_ALLOCATED;
memset(&sandbox->state_history, 0, SANDBOX_STATE_HISTORY_CAPACITY * sizeof(sandbox_state_t));
#endif
/* Set state to initializing */
sandbox_set_as_initialized(sandbox, sandbox_request, now);
/* Initialize the sandbox's context, stack, and instruction pointer */
/* stack grows down, so set to high address */
arch_context_init(&sandbox->ctxt, (reg_t)current_sandbox_start, (reg_t)sandbox->stack.high);
free(sandbox_request);
rc = 0;
done:
return sandbox;
return rc;
err_stack_allocation_failed:
/*
* This is a degenerate sandbox that never successfully completed initialization, so we need to
* hand jam some things to be able to cleanly transition to ERROR state
*/
sandbox->state = SANDBOX_UNINITIALIZED;
sandbox->timestamp_of.last_state_change = now;
#ifdef LOG_SANDBOX_MEMORY_PROFILE
sandbox->timestamp_of.page_allocations_size = 0;
#endif
ps_list_init_d(sandbox);
err_memory_allocation_failed:
sandbox_set_as_error(sandbox, SANDBOX_UNINITIALIZED);
err_http_allocation_failed:
client_socket_send_oneshot(sandbox->client_socket_descriptor, http_header_build(503), http_header_len(503));
client_socket_close(sandbox->client_socket_descriptor, &sandbox->client_address);
sandbox_set_as_error(sandbox, SANDBOX_ALLOCATED);
perror(error_message);
sandbox = NULL;
rc = -1;
goto done;
}
void
sandbox_init(struct sandbox *sandbox, struct module *module, int socket_descriptor,
const struct sockaddr *socket_address, uint64_t request_arrival_timestamp, uint64_t admissions_estimate)
{
/* Sets the ID to the value before the increment */
sandbox->id = sandbox_total_postfix_increment();
sandbox->module = module;
module_acquire(sandbox->module);
/* Initialize Parsec control structures */
ps_list_init_d(sandbox);
sandbox->client_socket_descriptor = socket_descriptor;
memcpy(&sandbox->client_address, socket_address, sizeof(struct sockaddr));
sandbox->timestamp_of.request_arrival = request_arrival_timestamp;
sandbox->absolute_deadline = request_arrival_timestamp + module->relative_deadline;
/*
* Admissions Control State
* Assumption: an estimate of 0 should have been interpreted as a rejection
*/
assert(admissions_estimate != 0);
sandbox->admissions_estimate = admissions_estimate;
sandbox_log_allocation(sandbox);
sandbox_set_as_initialized(sandbox, SANDBOX_ALLOCATED);
}
/**
* Allocates a new Sandbox Request and places it on the Global Deque
* @param module the module we want to request
* @param socket_descriptor
* @param socket_address
* @param request_arrival_timestamp the timestamp of when we receives the request from the network (in cycles)
* @param admissions_estimate the timestamp of when we receives the request from the network (in cycles)
* @return the new sandbox request
*/
struct sandbox *
sandbox_new(struct module *module, int socket_descriptor, const struct sockaddr *socket_address,
uint64_t request_arrival_timestamp, uint64_t admissions_estimate)
{
struct sandbox *sandbox = sandbox_allocate();
assert(sandbox);
sandbox_init(sandbox, module, socket_descriptor, socket_address, request_arrival_timestamp,
admissions_estimate);
return sandbox;
}
/**
* Free stack and heap resources.. also any I/O handles.
@ -202,31 +214,14 @@ sandbox_free(struct sandbox *sandbox)
module_release(sandbox->module);
/* Free Sandbox Stack if initial allocation was successful */
if (likely(sandbox->stack.size > 0)) {
assert(sandbox->stack.start != NULL);
/* The stack start is the bottom of the usable stack, but we allocated a guard page below this */
rc = munmap((char *)sandbox->stack.start - PAGE_SIZE, sandbox->stack.size + PAGE_SIZE);
if (unlikely(rc == -1)) {
debuglog("Failed to unmap stack of Sandbox %lu\n", sandbox->id);
goto err_free_stack_failed;
};
}
/* Linear Memory and Guard Page should already have been munmaped and set to NULL */
assert(sandbox->memory == NULL);
/* Free Sandbox Struct and HTTP Request and Response Buffers */
/* Free Sandbox Struct and HTTP Request and Response Buffers
* The linear memory was already freed during the transition from running to error|complete
* struct sandbox | HTTP Request Buffer | HTTP Response Buffer | 4GB of Wasm Linear Memory | Guard Page
* Allocated | Allocated | Allocated | Freed | Freed
*/
/* Linear Memory and Guard Page should already have been munmaped and set to NULL */
assert(sandbox->memory.start == NULL);
errno = 0;
if (likely(sandbox->stack.buffer != NULL)) sandbox_free_stack(sandbox);
free(sandbox);
unsigned long size_to_unmap = round_up_to_page(sizeof(struct sandbox)) + sandbox->module->max_request_size
+ sandbox->module->max_response_size;
munmap(sandbox, size_to_unmap);
if (rc == -1) {
debuglog("Failed to unmap Sandbox %lu\n", sandbox->id);
goto err_free_sandbox_failed;

@ -1,3 +0,0 @@
#include "sandbox_request.h"
_Atomic uint32_t sandbox_request_count = 0;

@ -23,31 +23,6 @@ const char *sandbox_state_labels[SANDBOX_STATE_COUNT] = {
[SANDBOX_ERROR] = "Error"
};
#ifdef LOG_SANDBOX_COUNT
_Atomic uint32_t sandbox_state_count[SANDBOX_STATE_COUNT];
#ifdef SANDBOX_STATE_TOTALS
_Atomic uint32_t sandbox_state_totals[SANDBOX_STATE_COUNT];
#endif
/*
* Function intended to be interactively run in a debugger to look at sandbox totals
* via `call runtime_log_sandbox_states()`
*/
void
runtime_log_sandbox_states()
{
#ifdef LOG_SANDBOX_COUNT
const size_t buffer_size = 1000;
char buffer[buffer_size] = "";
for (int i = 0; i < SANDBOX_STATE_COUNT; i++) {
const size_t tiny_buffer_size = 50;
char tiny_buffer[tiny_buffer_size] = "";
snprintf(tiny_buffer, tiny_buffer_size - 1, "%s: %u\n\t", sandbox_state_stringify(i),
atomic_load(&sandbox_state_count[i]));
strncat(buffer, tiny_buffer, buffer_size - 1 - strlen(buffer));
}
debuglog("%s", buffer);
#else
debuglog("Must compile with LOG_SANDBOX_COUNT for this functionality!\n");
#endif
};

@ -9,11 +9,8 @@ SLEDGE_RT_DIR=${SLEDGE_BASE_DIR}/runtime/
SLEDGE_COMPILETIME_INC=${SLEDGE_RT_DIR}/include
SLEDGE_COMPILETIME_SRC=${SLEDGE_RT_DIR}/compiletime/*.c
ALL=fibonacci empty ekf cifar10 gocr lpd resize
ALL_COPY_DEST=$(ALL:%=../../runtime/bin/%_wasm.so)
.PHONY: all
all: $(ALL_COPY_DEST)
all: fibonacci.install empty.install ekf.install cifar10.install gocr.install lpd.install resize.install
.PHONY: clean
clean:
@ -23,36 +20,9 @@ clean:
@make clean -C ./CMSIS_5_NN/ -f Makefile
@make clean -C ./gocr/src/ -f wasm.mk
@make clean -C ./sod/
@rm -f *.wasm
@rm -f ../../runtime/bin/*.so
fibonacci.wasm:
@make fibonacci.wasm -C ./fibonacci
@cp ./fibonacci/fibonacci.wasm fibonacci.wasm
empty.wasm:
@make empty.wasm -C ./empty
@cp ./empty/empty.wasm empty.wasm
ekf.wasm:
@make gps_ekf_fn.wasm -C ./TinyEKF/extras/c/ -f wasm.mk
@cp ./TinyEKF/extras/c/gps_ekf_fn.wasm ekf.wasm
cifar10.wasm:
@make cifar10.wasm -C ./CMSIS_5_NN/ -f Makefile
@cp ./CMSIS_5_NN/cifar10.wasm cifar10.wasm
gocr.wasm:
@make gocr.wasm -C ./gocr/src/ -f wasm.mk
@cp ./gocr/src/gocr.wasm gocr.wasm
lpd.wasm:
@make dir license_plate_detection.wasm -C ./sod/
@cp ./sod/bin/license_plate_detection.wasm lpd.wasm
resize.wasm:
@make dir resize_image.wasm -C ./sod/
@cp ./sod/bin/resize_image.wasm resize.wasm
%.bc: %.wasm
${AWSMCC} --inline-constant-globals --runtime-globals $^ -o $@
@ -62,23 +32,72 @@ resize.wasm:
../../runtime/bin/%.so: %.so
cp $^ $@
# Fibonacci
./fibonacci/fibonacci.wasm:
@make fibonacci.wasm -C ./fibonacci
fibonacci.wasm: ./fibonacci/fibonacci.wasm
@cp ./fibonacci/fibonacci.wasm fibonacci.wasm
.PHONY: fibonacci.install
fibonacci.install: ../../runtime/bin/fibonacci_wasm.so
# Empty
./empty/empty.wasm:
@make empty.wasm -C ./empty
empty.wasm: ./empty/empty.wasm
@cp ./empty/empty.wasm empty.wasm
.PHONY: empty.install
empty.install: ../../runtime/bin/empty_wasm.so
# EKF
./TinyEKF/extras/c/gps_ekf_fn.wasm:
@make gps_ekf_fn.wasm -C ./TinyEKF/extras/c/ -f wasm.mk
ekf.wasm: ./TinyEKF/extras/c/gps_ekf_fn.wasm
@cp ./TinyEKF/extras/c/gps_ekf_fn.wasm ekf.wasm
.PHONY: ekf.install
ekf.install: ../../runtime/bin/ekf_wasm.so
# CIFAR10
./CMSIS_5_NN/cifar10.wasm:
@make cifar10.wasm -C ./CMSIS_5_NN/ -f Makefile
cifar10.wasm: ./CMSIS_5_NN/cifar10.wasm
@cp ./CMSIS_5_NN/cifar10.wasm cifar10.wasm
.PHONY: cifar10.install
cifar10.install: ../../runtime/bin/cifar10_wasm.so
# GOCR
./gocr/src/gocr.wasm:
@make gocr.wasm -C ./gocr/src/ -f wasm.mk
gocr.wasm: ./gocr/src/gocr.wasm
@cp ./gocr/src/gocr.wasm gocr.wasm
.PHONY: gocr.install
gocr.install: ../../runtime/bin/gocr_wasm.so
# LPD
./sod/bin/license_plate_detection.wasm:
@make dir license_plate_detection.wasm -C ./sod/
lpd.wasm: ./sod/bin/license_plate_detection.wasm
@cp ./sod/bin/license_plate_detection.wasm lpd.wasm
.PHONY: lpd.install
lpd.install: ../../runtime/bin/lpd_wasm.so
# Resize
./sod/bin/resize_image.wasm:
@make dir resize_image.wasm -C ./sod/
resize.wasm: ./sod/bin/resize_image.wasm
@cp ./sod/bin/resize_image.wasm resize.wasm
.PHONY: resize.install
resize.install: ../../runtime/bin/resize_wasm.so

Loading…
Cancel
Save