Support TCP (TODO: http parsing based on module specification)

* Major:
 - Moved sandbox data struct into linear memory of a sandbox.
 - (Should work well with other sandboxing mechanisms).
 - Removed all mallocs from within uv usage, using a pre-allocated
   memory within sandbox data-struct for read/write.
 - TODO: convert prints on stdout to a string and send it out with
   function response body.
 - TODO: for each function loaded, what is the request and response
   http header/body specification.
    To start, keep headers minimal.
              body either: json, plain.
 - TODO: track TIMEOUTS. If we go beyond that, return a HTTP 408?
main
phani 5 years ago
parent 9654c1f2fc
commit 8a3e050282

@ -22,8 +22,9 @@ CACHE_LINESIZE := $(shell getconf LEVEL1_DCACHE_LINESIZE)
NCORES_CONF := $(shell getconf _NPROCESSORS_CONF)
#todo:cross-compile
CFLAGS += -DCACHELINE_SIZE=${CACHE_LINESIZE}
#CFLAGS += -DNCORES=${NCORES_CONF}
CFLAGS += -DNCORES=4
CFLAGS += -DNCORES=${NCORES_CONF}
#CFLAGS += -DNCORES=4
CFLAGS += -DPAGE_SIZE=$(shell getconf PAGESIZE)
MAKE= make --no-print-directory
@ -43,7 +44,7 @@ else ifeq ($(USE_MEM),USE_MEM_VM)
CFILES += ${RTDIR}/memory/64bit_nix.c
endif
all: clean runtime tools
all: clean runtime #tools
runtime:
@echo "Compiling runtime"
@ -59,7 +60,7 @@ clean:
@echo "Cleaning up runtime"
@rm -f ${RUNTIME}
# @echo "Cleaning up tools"
@${MAKE} -C tools clean
# @${MAKE} -C tools clean
fetch:
@git submodule update --init --recursive

@ -5,39 +5,43 @@
#include "types.h"
struct module {
char name[MOD_NAME_MAX]; //not sure if i care for now.
char path[MOD_PATH_MAX]; //to dlopen if it has not been opened already.
char name[MOD_NAME_MAX];
char path[MOD_PATH_MAX];
void *dl_handle;
mod_main_fn_t entry_fn;
mod_glb_fn_t glb_init_fn;
mod_mem_fn_t mem_init_fn;
mod_tbl_fn_t tbl_init_fn;
i32 nargs; //as per the specification somewhere.
/* i32 nrets; */
struct indirect_table_entry indirect_table[INDIRECT_TABLE_SIZE];
i32 nargs;
u32 stack_size; // a specification?
u64 max_memory; //perhaps a specification of the module.
u64 max_memory; //perhaps a specification of the module. (max 4GB)
u32 timeout; //again part of the module specification.
u32 refcnt; //ref count how many instances exist here.
u32 udpport;
uv_udp_t udpsrv; // udp server to listen to requests.
// stand-alone vs serverless
#ifndef STANDALONE
struct sockaddr_in srvaddr;
// FIXME: for now, per-sandbox. no reason to have it be per-sandbox.
struct indirect_table_entry indirect_table[INDIRECT_TABLE_SIZE];
// TODO: what else?
int srvsock, srvport;
// unfortunately, using UV for accepting connections is not great!
// on_connection, to create a new accepted connection, will have to
// init a tcp handle, which requires a uvloop. cannot use main as
// rest of the connection is handled in sandboxing threads, with per-core(per-thread) tls data-structures.
// so, using direct epoll for accepting connections.
// uv_handle_t srvuv;
unsigned long max_req_sz, max_resp_sz, max_rr_sz; // req/resp from http..
#endif
};
// a runtime resource, perhaps use "malloc" on this?
struct module *module_alloc(char *mod_name, char *mod_path, u32 udp_port, i32 nargs, i32 nrets, u32 stack_sz, u32 max_heap, u32 timeout/*, ...*/);
struct module *module_alloc(char *mod_name, char *mod_path, i32 nargs, u32 stack_sz, u32 max_heap, u32 timeout, int port, int req_sz, int resp_sz);
// frees only if refcnt == 0
void module_free(struct module *mod);
struct module *module_find(char *name);
struct module *module_find_by_name(char *name);
struct module *module_find_by_sock(int sock);
static inline int
module_is_valid(struct module *mod)

@ -4,10 +4,13 @@
#include <uv.h>
#include "sandbox.h"
#include "module.h"
#include <sys/epoll.h> // for epoll_create1(), epoll_ctl(), struct epoll_event
// global queue for stealing (work-stealing-deque)
extern struct deque_sandbox *glb_dq;
extern pthread_mutex_t glbq_mtx;
extern int epfd;
void alloc_linear_memory(void);
void expand_memory(void);
@ -42,18 +45,23 @@ INLINE char *get_function_from_table(u32 idx, u32 type_id);
void stub_init(char *modulename, i32 offset, mod_init_libc_fn_t fn);
void runtime_init(void);
static inline void
runtime_on_alloc(uv_handle_t *h, size_t suggested, uv_buf_t *buf)
{
buf->base = malloc(suggested);
memset(buf->base, 0, suggested);
buf->len = suggested;
}
void runtime_thd_init(void);
extern __thread uv_loop_t uvio;
static inline uv_loop_t *
runtime_uvio(void)
{ return &uvio; }
static unsigned long long int
rdtsc()
{
unsigned long long int ret = 0;
unsigned int cycles_lo;
unsigned int cycles_hi;
__asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi));
ret = (unsigned long long int)cycles_hi << 32 | cycles_lo;
return ret;
}
#endif /* SFRT_RUNTIME_H */

@ -32,9 +32,10 @@ extern void __attribute__((noreturn)) sandbox_switch_preempt(void);
struct sandbox {
sandbox_state_t state;
void *linear_start;
u32 linear_size;
void *linear_start; // after sandbox struct
u32 linear_size; // from after sandbox struct
u32 linear_max_size;
u32 sb_size;
void *stack_start; // guess we need a mechanism for stack allocation.
u32 stack_size; // and to set the size of it.
@ -49,34 +50,36 @@ struct sandbox {
u64 start_time;
struct module *mod; //which module is this an instance of?
//struct indirect_table_entry indirect_table[INDIRECT_TABLE_SIZE];
i32 args_offset; //actual placement of args in the sandbox.
/* i32 ret_offset; //placement of return value(s) in the sandbox. */
void *args; // args from request, must be of module->nargs size.
i32 retval;
struct io_handle handles[SBOX_MAX_OPEN];
#ifndef STANDALONE
struct sockaddr client; //client requesting connection!
uv_udp_t clientuv; //using uv for client request on target runtime thread/core.
int csock;
uv_tcp_t cuv;
#endif
char *read_buf;
ssize_t read_len, read_size;
struct ps_list list;
// track I/O handles?
} CACHE_ALIGNED;
ssize_t rr_data_len; // <= max(mod->max_rr_sz)
char req_resp_data[1]; //of rr_data_sz, following sandbox mem..
} PAGE_ALIGNED;
DEQUE_PROTOTYPE(sandbox, struct sandbox *);
// a runtime resource, malloc on this!
struct sandbox *sandbox_alloc(struct module *mod, char *args, const struct sockaddr *addr);
struct sandbox *sandbox_alloc(struct module *mod, char *args, int sock, const struct sockaddr *addr);
// should free stack and heap resources.. also any I/O handles.
void sandbox_free(struct sandbox *sbox);
// next_sandbox only used in SIGUSR1
extern __thread struct sandbox *current_sandbox;
// next_sandbox only used in SIGUSR1
extern __thread arch_context_t *next_context;
typedef struct sandbox sandbox_t;
@ -106,6 +109,7 @@ sandbox_module(struct sandbox *s)
return s->mod;
}
extern void sandbox_local_end(struct sandbox *s);
static inline void
sandbox_switch(struct sandbox *next)
{
@ -113,11 +117,11 @@ sandbox_switch(struct sandbox *next)
// disable interrupts (signals)
softint_disable();
// switch sandbox (register context & base/bound/table)
struct sandbox *curr = sandbox_current();
arch_context_t *c = curr == NULL ? NULL : &curr->ctxt;
sandbox_current_set(next);
if (curr && curr->state == SANDBOX_RETURNED) sandbox_local_end(curr);
// save current's registers and restore next's registers.
next_context = n;
arch_context_switch(c, n);

@ -26,7 +26,20 @@
#ifndef CACHELINE_SIZE
#define CACHELINE_SIZE 32
#endif
#ifndef PAGE_SIZE
#define PAGE_SIZE (1<<12)
#endif
#define CACHE_ALIGNED __attribute__((aligned(CACHELINE_SIZE)))
#define PAGE_ALIGNED __attribute__((aligned(PAGE_SIZE)))
/* For this family of macros, do NOT pass zero as the pow2 */
#define round_to_pow2(x, pow2) (((unsigned long)(x)) & (~((pow2)-1)))
#define round_up_to_pow2(x, pow2) (round_to_pow2(((unsigned long)x) + (pow2)-1, (pow2)))
#define round_to_page(x) round_to_pow2(x, PAGE_SIZE)
#define round_up_to_page(x) round_up_to_pow2(x, PAGE_SIZE)
// Type alias's so I don't have to write uint32_t a million times
typedef signed char i8;
@ -43,7 +56,8 @@ typedef uint64_t u64;
#define WASM_START_PAGES (1<<8) //16MB
#define WASM_MAX_PAGES (1<<15) //4GB
#define WASM_STACK_SIZE (1<<15) // FIXME: fixed size for now.
#define WASM_STACK_SIZE (1<<14) // 16KB.
#define SBOX_MAX_MEM (1L<<32) // 4GB
// These are per module symbols and I'd need to dlsym for each module. instead just use global constants, see above macros.
// The code generator compiles in the starting number of wasm pages, and the maximum number of pages
@ -134,4 +148,9 @@ typedef enum {
#define SBOX_RESP_STRSZ 32
#define MOD_BACKLOG 100
#define EPOLL_MAX 1024
#define MOD_REQ_RESP_DEFAULT (PAGE_SIZE)
#define QUIESCENSE_TIME (1<<20) //cycles!
#endif /* SFRT_TYPES_H */

@ -61,6 +61,10 @@ wasm_fs_callback(uv_fs_t *req)
u32
wasm_read(i32 filedes, i32 buf_offset, i32 nbyte)
{
if (filedes == 0) {
char* buf = get_memory_ptr_void(buf_offset, nbyte);
return read(filedes, buf, nbyte);
}
int f = io_handle_fd(filedes);
// TODO: read on other file types
uv_fs_t req = UV_FS_REQ_INIT();
@ -82,18 +86,22 @@ wasm_read(i32 filedes, i32 buf_offset, i32 nbyte)
i32
wasm_write(i32 fd, i32 buf_offset, i32 buf_size)
{
if (fd == 1 || fd == 2) {
char* buf = get_memory_ptr_void(buf_offset, buf_size);
return write(fd, buf, buf_size);
}
int f = io_handle_fd(fd);
// TODO: read on other file types
uv_fs_t req = UV_FS_REQ_INIT();
char* buf = get_memory_ptr_void(buf_offset, buf_size);
debuglog("[%p] start[%d:%d, n%d]\n", uv_fs_get_data(&req), fd, f, buf_size);
printf("[%p] start[%d:%d, n%d]\n", uv_fs_get_data(&req), fd, f, buf_size);
uv_buf_t bufv = uv_buf_init(buf, buf_size);
uv_fs_write(runtime_uvio(), &req, f, &bufv, 1, -1, wasm_fs_callback);
sandbox_block();
int ret = uv_fs_get_result(&req);
debuglog("[%p] end[%d]\n", uv_fs_get_data(&req), ret);
printf("[%p] end[%d]\n", uv_fs_get_data(&req), ret);
uv_fs_req_cleanup(&req);
return ret;
@ -149,6 +157,9 @@ wasm_open(i32 path_off, i32 flags, i32 mode)
i32
wasm_close(i32 fd)
{
if (fd >= 0 && fd <= 2) {
return 0;
}
struct sandbox *c = sandbox_current();
int d = io_handle_fd(fd);
union uv_any_handle *h = io_handle_uv_get(fd);
@ -422,6 +433,24 @@ struct wasm_iovec {
i32
wasm_readv(i32 fd, i32 iov_offset, i32 iovcnt)
{
if (fd == 0) {
int len = 0, r = 0;
struct wasm_iovec *iov = get_memory_ptr_void(iov_offset, iovcnt * sizeof(struct wasm_iovec));
for (int i = 0; i < iovcnt; i+= RDWR_VEC_MAX) {
struct iovec bufs[RDWR_VEC_MAX] = { 0 };
int j = 0;
for (j = 0; j < RDWR_VEC_MAX && i + j < iovcnt; j++) {
bufs[j].iov_base = get_memory_ptr_void(iov[i + j].base_offset, iov[i + j].len);
bufs[j].iov_len = iov[i + j].len;
}
r = readv(fd, bufs, j);
if (r <= 0) break;
len += r;
}
return r < 0 ? r : len;
}
// TODO: read on other file types
int gret = 0;
int d = io_handle_fd(fd);
@ -454,6 +483,24 @@ wasm_readv(i32 fd, i32 iov_offset, i32 iovcnt)
i32
wasm_writev(i32 fd, i32 iov_offset, i32 iovcnt)
{
if (fd == 1 || fd == 2) {
int len = 0, r = 0;
struct wasm_iovec *iov = get_memory_ptr_void(iov_offset, iovcnt * sizeof(struct wasm_iovec));
for (int i = 0; i < iovcnt; i+= RDWR_VEC_MAX) {
struct iovec bufs[RDWR_VEC_MAX] = { 0 };
int j = 0;
for (j = 0; j < RDWR_VEC_MAX && i + j < iovcnt; j++) {
bufs[j].iov_base = get_memory_ptr_void(iov[i + j].base_offset, iov[i + j].len);
bufs[j].iov_len = iov[i + j].len;
}
r = writev(fd, bufs, j);
if (r <= 0) break;
len += r;
}
return r < 0 ? r : len;
}
// TODO: read on other file types
int d = io_handle_fd(fd);
int gret = 0;

@ -9,6 +9,8 @@
#include <sandbox.h>
#include <softint.h>
#include <util.h>
#include <sys/time.h>
#include <sys/resource.h>
#define MOD_LINE_MAX 1024
@ -34,12 +36,30 @@ main(int argc, char* argv[])
if (argc != 2) {
usage(argv[0]);
exit(-1);
}
struct rlimit r;
if (getrlimit(RLIMIT_DATA, &r) < 0) {
perror("getrlimit RLIMIT_DATA");
exit(-1);
}
r.rlim_cur = r.rlim_max;
if (setrlimit(RLIMIT_DATA, &r) < 0) {
perror("setrlimit RLIMIT_DATA");
exit(-1);
}
if (getrlimit(RLIMIT_NOFILE, &r) < 0) {
perror("getrlimit RLIMIT_NOFILE");
exit(-1);
}
r.rlim_cur = r.rlim_max;
if (setrlimit(RLIMIT_NOFILE, &r) < 0) {
perror("setrlimit RLIMIT_NOFILE");
exit(-1);
}
ncores = sysconf(_SC_NPROCESSORS_ONLN);
if (ncores > 1) {
u32 x = ncores - 1;
sbox_ncores = SBOX_NCORES;
@ -66,11 +86,11 @@ main(int argc, char* argv[])
runtime_init();
debuglog("Parsing modules file [%s]\n", argv[1]);
if (util_parse_modules_file_json(argv[1])) {
// if (util_parse_modules_file_custom(argv[1])) {
printf("failed to parse modules file[%s]\n", argv[1]);
exit(-1);
}
runtime_thd_init();
for (i = 0; i < sbox_ncores; i++) {
int ret = pthread_create(&rtthd[i], NULL, sandbox_run_func, (void *)&rtthd_ret[i]);
@ -105,9 +125,9 @@ main(int argc, char* argv[])
uv_loop_init(&uvio);
/* in current dir! */
struct module *m = module_alloc(argv[1], argv[1], 0, 0, 0, 0, 0, 0);
struct module *m = module_alloc(argv[1], argv[1], 0, 0, 0, 0, 0, 0, 0);
assert(m);
struct sandbox *s = sandbox_alloc(m, argv[1], NULL);
struct sandbox *s = sandbox_alloc(m, argv[1], 0, NULL);
exit(0);
#endif

@ -11,39 +11,13 @@
void
alloc_linear_memory(void)
{
struct sandbox *curr = sandbox_current();
// Map 4gb + PAGE_SIZE of memory that will fault when accessed
// We allocate the extra page so that reads off the end will also fail
sandbox_lmbase = mmap(NULL, MAX_LINEAR_MEM, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (sandbox_lmbase == MAP_FAILED) {
perror("Mapping of initial unusable region failed");
exit(1);
}
void *map_result = mmap(sandbox_lmbase, WASM_PAGE_SIZE * WASM_START_PAGES, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (map_result == MAP_FAILED) {
perror("Mapping of initial memory failed");
exit(1);
}
sandbox_lmbound = WASM_PAGE_SIZE * WASM_START_PAGES;
curr->linear_start = sandbox_lmbase;
curr->linear_size = sandbox_lmbound;
// mmaped memory in sandbox_alloc.
}
void
free_linear_memory(void *base, u32 bound, u32 max)
{
struct sandbox *curr = sandbox_current();
assert(base && bound);
// cannot free currently executing sandbox's memory
assert(curr == NULL || base != curr->linear_start || base != sandbox_lmbase);
int ret = munmap(base, MAX_LINEAR_MEM);
if (ret) perror("munmap");
// frees on sandbox_free
}
void
@ -52,7 +26,7 @@ expand_memory(void)
struct sandbox *curr = sandbox_current();
// max_pages = 0 => no limit: FIXME
assert(sandbox_lmbound / WASM_PAGE_SIZE < WASM_MAX_PAGES);
assert((curr->sb_size + sandbox_lmbound) / WASM_PAGE_SIZE < WASM_MAX_PAGES);
// Remap the relevant wasm page to readable
char *mem_as_chars = sandbox_lmbase;
char *page_address = &mem_as_chars[sandbox_lmbound];

@ -9,9 +9,8 @@
static struct module *__mod_db[MOD_MAX] = { NULL };
static int __mod_free_off = 0;
// todo: optimize this.. do we care? plus not atomic!!
struct module *
module_find(char *name)
module_find_by_name(char *name)
{
int f = __mod_free_off;
for (int i = 0; i < f; i++) {
@ -21,57 +20,66 @@ module_find(char *name)
return NULL;
}
struct module *
module_find_by_sock(int sock)
{
int f = __mod_free_off;
for (int i = 0; i < f; i++) {
assert(__mod_db[i]);
if (__mod_db[i]->srvsock == sock) return __mod_db[i];
}
return NULL;
}
static inline int
module_add(struct module *m)
{
assert(module_find(m->name) == NULL);
#ifdef STANDALONE
assert(module_find_by_name(m->name) == NULL);
#else
assert(m->srvsock == -1);
#endif
int f = __sync_fetch_and_add(&__mod_free_off, 1);
assert(f < MOD_MAX);
__mod_db[f] = m;
return 0;
}
static void
module_on_recv(uv_udp_t *h, ssize_t nr, const uv_buf_t *rcvbuf, const struct sockaddr *addr, unsigned flags)
{
if (nr <= 0) goto done;
debuglog("MC:%s, %s\n", h->data, rcvbuf->base);
// invoke a function!
struct sandbox *s = util_parse_sandbox_string_json((struct module *)(h->data), rcvbuf->base, addr);
//struct sandbox *s = util_parse_sandbox_string_custom((struct module *)(h->data), rcvbuf->base, addr);
assert(s);
done:
free(rcvbuf->base);
return 0;
}
static void
module_io_init(struct module *m)
static inline void
module_server_init(struct module *m)
{
// TODO: USE_UVIO vs USE_SYSCALL!
int status;
status = uv_udp_init(uv_default_loop(), &m->udpsrv);
assert(status >= 0);
#ifndef STANDALONE
int fd = socket(AF_INET, SOCK_STREAM, 0);
assert(fd > 0);
m->srvaddr.sin_family = AF_INET;
m->srvaddr.sin_addr.s_addr = htonl(INADDR_ANY);
m->srvaddr.sin_port = htons((unsigned short)m->srvport);
int optval = 1;
setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &optval, sizeof(optval));
optval = 1;
setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
if (bind(fd, (struct sockaddr *)&m->srvaddr, sizeof(m->srvaddr)) < 0) {
perror("bind");
assert(0);
}
if (listen(fd, MOD_BACKLOG) < 0) assert(0);
m->srvsock = fd;
debuglog("MIO:%s,%u\n", m->name, m->udpport);
uv_ip4_addr("127.0.0.1", m->udpport, &m->srvaddr);
status = uv_udp_bind(&m->udpsrv, (const struct sockaddr *)&m->srvaddr, 0);
assert(status >= 0);
m->udpsrv.data = (void *)m;
struct epoll_event accept_evt;
accept_evt.data.ptr = (void *)m;
accept_evt.events = EPOLLIN;
status = uv_udp_recv_start(&m->udpsrv, runtime_on_alloc, module_on_recv);
assert(status >= 0);
if (epoll_ctl(epfd, EPOLL_CTL_ADD, m->srvsock, &accept_evt) < 0) assert(0);
#endif
}
struct module *
module_alloc(char *modname, char *modpath, u32 udp_port, i32 nargs, i32 nrets, u32 stacksz, u32 maxheap, u32 timeout)
module_alloc(char *modname, char *modpath, i32 nargs, u32 stacksz, u32 maxheap, u32 timeout, int port, int req_sz, int resp_sz)
{
// FIXME: cannot do this at runtime, we may be interfering with a sandbox's heap!
struct module *mod = (struct module *)malloc(sizeof(struct module));
if (!mod) return NULL;
memset(mod, 0, sizeof(struct module));
@ -95,10 +103,18 @@ module_alloc(char *modname, char *modpath, u32 udp_port, i32 nargs, i32 nrets, u
strncpy(mod->path, modpath, MOD_PATH_MAX);
mod->nargs = nargs;
/* mod->nrets = nrets; */
mod->stack_size = stacksz == 0 ? WASM_STACK_SIZE : stacksz;
mod->stack_size = round_up_to_page(stacksz == 0 ? WASM_STACK_SIZE : stacksz);
mod->max_memory = maxheap == 0 ? ((u64)WASM_PAGE_SIZE * WASM_MAX_PAGES) : maxheap;
mod->timeout = timeout;
#ifndef STANDALONE
mod->srvsock = -1;
mod->srvport = port;
if (req_sz == 0) req_sz = MOD_REQ_RESP_DEFAULT;
if (resp_sz == 0) resp_sz = MOD_REQ_RESP_DEFAULT;
mod->max_req_sz = req_sz;
mod->max_resp_sz = resp_sz;
mod->max_rr_sz = round_up_to_page(req_sz > resp_sz ? req_sz : resp_sz);
#endif
struct indirect_table_entry *cache_tbl = module_indirect_table;
// assumption: modules are created before enabling preemption and before running runtime-sandboxing threads..
@ -107,11 +123,8 @@ module_alloc(char *modname, char *modpath, u32 udp_port, i32 nargs, i32 nrets, u
module_indirect_table = mod->indirect_table;
module_table_init(mod);
module_indirect_table = cache_tbl;
mod->udpport = udp_port;
module_add(mod);
#ifndef STANDALONE
module_io_init(mod);
#endif
module_server_init(mod);
return mod;
@ -132,9 +145,9 @@ module_free(struct module *mod)
if (mod->dl_handle == NULL) return;
if (mod->refcnt) return;
#ifndef STANDALONE
close(mod->srvsock);
#endif
dlclose(mod->dl_handle);
memset(mod, 0, sizeof(struct module));
// FIXME: use global/static memory. cannot interfere with some sandbox's heap!
free(mod);
}

@ -11,6 +11,7 @@
struct deque_sandbox *glb_dq;
pthread_mutex_t glbq_mtx = PTHREAD_MUTEX_INITIALIZER;
int epfd;
// per-thread (per-core) run and completion queue.. (using doubly-linked-lists)
__thread static struct ps_list_head runq;
@ -32,7 +33,7 @@ static inline void
sandbox_local_run(struct sandbox *s)
{
assert(ps_list_singleton_d(s));
fprintf(stderr, "(%d,%lu) %s: run %p, %s\n", sched_getcpu(), pthread_self(), __func__, s, s->mod->name);
// fprintf(stderr, "(%d,%lu) %s: run %p, %s\n", sched_getcpu(), pthread_self(), __func__, s, s->mod->name);
ps_list_head_append_d(&runq, s);
}
@ -84,6 +85,7 @@ sandbox_schedule(void)
s = ps_list_head_first_d(&runq, struct sandbox);
assert(s->state != SANDBOX_RETURNED);
// round-robin
ps_list_rem_d(s);
ps_list_head_append_d(&runq, s);
@ -98,7 +100,7 @@ sandbox_local_free(unsigned int n)
int i = 0;
while (i < n) {
i ++;
i++;
struct sandbox *s = ps_list_head_first_d(&endq, struct sandbox);
if (!s) break;
ps_list_rem_d(s);
@ -107,12 +109,12 @@ sandbox_local_free(unsigned int n)
}
struct sandbox *
sandbox_schedule_uvio(void)
sandbox_schedule_io(void)
{
assert(sandbox_current() == NULL);
sandbox_local_free(1);
if (!in_callback) sandbox_io_nowait();
assert(sandbox_current() == NULL);
softint_disable();
struct sandbox *s = sandbox_schedule();
softint_enable();
@ -169,7 +171,7 @@ sandbox_local_stop(struct sandbox *s)
ps_list_rem_d(s);
}
static inline void
void
sandbox_local_end(struct sandbox *s)
{
assert(ps_list_singleton_d(s));
@ -193,10 +195,10 @@ sandbox_run_func(void *data)
in_callback = 0;
while (1) {
struct sandbox *s = sandbox_schedule_uvio();
struct sandbox *s = sandbox_schedule_io();
while (s) {
sandbox_switch(s);
s = sandbox_schedule_uvio();
s = sandbox_schedule_io();
}
}
@ -225,18 +227,15 @@ sandbox_exit(void)
{
#ifndef STANDALONE
struct sandbox *curr = sandbox_current();
assert(curr);
sandbox_response();
fprintf(stderr, "(%d,%lu) %s: %p, %s exit\n", sched_getcpu(), pthread_self(), __func__, curr, curr->mod->name);
softint_disable();
sandbox_local_stop(curr);
curr->state = SANDBOX_RETURNED;
// free resources from "main function execution", as stack still in use.
sandbox_local_end(curr);
struct sandbox *n = sandbox_schedule();
assert(n != curr);
softint_enable();
//sandbox_local_end(curr);
sandbox_switch(n);
#else
sandbox_switch(NULL);
@ -244,16 +243,36 @@ sandbox_exit(void)
}
void *
runtime_uvio_thdfn(void *d)
runtime_accept_thdfn(void *d)
{
assert(d == (void *)uv_default_loop());
struct epoll_event *epevts = (struct epoll_event *)malloc(EPOLL_MAX * sizeof(struct epoll_event));
int nreqs = 0;
while (1) {
// runs until there are no events..
uv_run(uv_default_loop(), UV_RUN_DEFAULT);
pthread_yield();
int ready = epoll_wait(epfd, epevts, EPOLL_MAX, -1);
for (int i = 0; i < ready; i++) {
if (epevts[i].events & EPOLLERR) {
perror("epoll_wait");
assert(0);
}
struct sockaddr_in client;
socklen_t client_len = sizeof(client);
struct module *m = (struct module *)epevts[i].data.ptr;
assert(m);
int es = m->srvsock;
int s = accept(es, (struct sockaddr *)&client, &client_len);
if (s < 0) {
perror("accept");
assert(0);
}
nreqs++;
struct sandbox *sb = sandbox_alloc(m, m->name, s, (const struct sockaddr *)&client);
assert(sb);
}
}
assert(0);
free(epevts);
return NULL;
}
@ -261,19 +280,26 @@ runtime_uvio_thdfn(void *d)
void
runtime_init(void)
{
epfd = epoll_create1(0);
assert(epfd >= 0);
glb_dq = (struct deque_sandbox *)malloc(sizeof(struct deque_sandbox));
assert(glb_dq);
deque_init_sandbox(glb_dq, SBOX_MAX_REQS);
softint_mask(SIGUSR1);
softint_mask(SIGALRM);
}
void
runtime_thd_init(void)
{
cpu_set_t cs;
CPU_ZERO(&cs);
CPU_SET(MOD_REQ_CORE, &cs);
pthread_t iothd;
int ret = pthread_create(&iothd, NULL, runtime_uvio_thdfn, (void *)uv_default_loop());
int ret = pthread_create(&iothd, NULL, runtime_accept_thdfn, NULL);
assert(ret == 0);
ret = pthread_setaffinity_np(iothd, sizeof(cpu_set_t), &cs);
assert(ret == 0);

@ -6,6 +6,36 @@
#include <signal.h>
#include <uv.h>
static inline struct sandbox *
sandbox_memory_map(struct module *m)
{
unsigned long mem_sz = SBOX_MAX_MEM; // 4GB
unsigned long sb_sz = sizeof(struct sandbox) + m->max_rr_sz;
unsigned long lm_sz = WASM_PAGE_SIZE * WASM_START_PAGES;
if (lm_sz + sb_sz > mem_sz) return NULL;
assert(round_up_to_page(sb_sz) == sb_sz);
unsigned long rw_sz = sb_sz + lm_sz;
void *addr = mmap(NULL, mem_sz + /* guard page */ PAGE_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) return NULL;
void *addr_rw = mmap(addr, sb_sz + lm_sz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (addr_rw == MAP_FAILED) {
munmap(addr, mem_sz + PAGE_SIZE);
return NULL;
}
struct sandbox *s = (struct sandbox *)addr;
// can it include sandbox as well?
s->linear_start = (char *)addr + sb_sz;
s->linear_size = lm_sz;
s->mod = m;
s->sb_size = sb_sz;
module_acquire(m);
return s;
}
static inline void
sandbox_args_setup(i32 argc)
{
@ -32,12 +62,97 @@ sandbox_args_setup(i32 argc)
}
}
static void
sandbox_uvio_init(struct sandbox *c)
static inline void
sb_read_callback(uv_stream_t *s, ssize_t nr, const uv_buf_t *b)
{
struct sandbox *c = s->data;
if (nr > 0) c->rr_data_len += nr;
uv_read_stop(s);
sandbox_wakeup(c);
}
static inline void
sb_write_callback(uv_write_t *w, int status)
{
struct sandbox *c = w->data;
sandbox_wakeup(c);
}
static inline void
sb_alloc_callback(uv_handle_t *h, size_t suggested, uv_buf_t *buf)
{
struct sandbox *c = h->data;
buf->base = (c->req_resp_data + c->rr_data_len);
buf->len = (c->mod->max_rr_sz - c->rr_data_len);
}
static inline void
sb_close_callback(uv_handle_t *s)
{
struct sandbox *c = s->data;
sandbox_wakeup(c);
}
static inline void
sb_shutdown_callback(uv_shutdown_t *req, int status)
{
struct sandbox *c = req->data;
sandbox_wakeup(c);
}
static inline int
sandbox_client_request_get(void)
{
#ifndef STANDALONE
struct sandbox *curr = sandbox_current();
#ifndef USE_UVIO
int r = 0;
r = recv(curr->csock, (curr->req_resp_data), curr->mod->max_req_sz, 0);
if (r < 0) {
perror("recv");
return r;
}
curr->rr_data_len = r;
#else
int r = uv_read_start((uv_stream_t *)&curr->cuv, sb_alloc_callback, sb_read_callback);
sandbox_block();
#endif
// TODO: http_request_parse
return curr->rr_data_len;
#else
return 1;
#endif
}
static inline int
sandbox_client_response_set(void)
{
#ifndef STANDALONE
int ret = uv_udp_init(runtime_uvio(), &c->clientuv);
assert(ret == 0);
struct sandbox *curr = sandbox_current();
strcpy(curr->req_resp_data, "HTTP/1.1 200 OK\r\n\r\n");
// TODO: response set in req_resp_data
curr->rr_data_len = strlen("HTTP/1.1 200 OK\r\n\r\n");
#ifndef USE_UVIO
int r = send(curr->csock, curr->req_resp_data, curr->rr_data_len, 0);
if (r < 0) perror("send");
#else
uv_write_t req = { .data = curr, };
uv_buf_t bu = uv_buf_init(curr->req_resp_data, curr->rr_data_len);
int r = uv_write(&req, (uv_stream_t *)&curr->cuv, &bu, 1, sb_write_callback);
sandbox_block();
#endif
return r;
#else
return 0;
#endif
}
@ -55,7 +170,6 @@ sandbox_entry(void)
}
struct module *curr_mod = sandbox_module(curr);
int argc = module_nargs(curr_mod);
// for stdio
int f = io_handle_open(0);
assert(f == 0);
@ -63,37 +177,54 @@ sandbox_entry(void)
assert(f == 1);
f = io_handle_open(2);
assert(f == 2);
sandbox_args_setup(argc);
sandbox_uvio_init(curr);
alloc_linear_memory();
// perhaps only initialized for the first instance? or TODO!
//module_table_init(curr_mod);
module_memory_init(curr_mod);
#ifndef STANDALONE
#ifdef USE_UVIO
int r = uv_tcp_init(runtime_uvio(), (uv_tcp_t *)&curr->cuv);
assert(r == 0);
curr->cuv.data = curr;
r = uv_tcp_open((uv_tcp_t *)&curr->cuv, curr->csock);
assert(r == 0);
#endif
if (sandbox_client_request_get() > 0)
#endif
{
alloc_linear_memory();
sandbox_args_setup(argc);
// perhaps only initialized for the first instance? or TODO!
//module_table_init(curr_mod);
module_memory_init(curr_mod);
curr->retval = module_entry(curr_mod, argc, curr->args_offset);
curr->retval = module_entry(curr_mod, argc, curr->args_offset);
sandbox_client_response_set();
}
#ifndef STANDALONE
#ifdef USE_UVIO
uv_shutdown_t sr = { .data = curr, };
r = uv_shutdown(&sr, (uv_stream_t *)&curr->cuv, sb_shutdown_callback);
sandbox_block();
uv_close((uv_handle_t *)&curr->cuv, sb_close_callback);
sandbox_block();
#else
close(curr->csock);
#endif
#endif
sandbox_exit();
}
struct sandbox *
sandbox_alloc(struct module *mod, char *args, const struct sockaddr *addr)
sandbox_alloc(struct module *mod, char *args, int sock, const struct sockaddr *addr)
{
if (!module_is_valid(mod)) return NULL;
// FIXME: don't use malloc. huge security problem!
// perhaps, main should be in its own sandbox, when it is not running any sandbox.
struct sandbox *sb = (struct sandbox *)malloc(sizeof(struct sandbox));
struct sandbox *sb = (struct sandbox *)sandbox_memory_map(mod);
if (!sb) return NULL;
memset(sb, 0, sizeof(struct sandbox));
//actual module instantiation!
sb->mod = mod;
module_acquire(mod);
sb->args = (void *)args;
sb->stack_size = mod->stack_size;
sb->stack_start = mmap(NULL, sb->stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN, -1, 0);
@ -101,6 +232,7 @@ sandbox_alloc(struct module *mod, char *args, const struct sockaddr *addr)
perror("mmap");
assert(0);
}
sb->csock = sock;
for (int i = 0; i < SBOX_MAX_OPEN; i++) sb->handles[i].fd = -1;
ps_list_init_d(sb);
if (addr) memcpy(&sb->client, addr, sizeof(struct sockaddr));
@ -111,44 +243,6 @@ sandbox_alloc(struct module *mod, char *args, const struct sockaddr *addr)
return sb;
}
void
sandbox_udp_send_callback(uv_udp_send_t *req, int status)
{
struct sandbox *c = req->data;
c->retval = status;
sandbox_wakeup(c);
}
void
sandbox_response(void)
{
struct sandbox *sb = sandbox_current();
// send response.
#ifndef STANDALONE
int sock = -1, ret;
char resp[SBOX_RESP_STRSZ] = { 0 };
// sends return value only for now!
sprintf(resp, "%d", sb->retval);
#ifdef USE_SYSCALL
// FIXME, with USE_SYSCALL, we should not be using uv at all.
int ret = uv_fileno((uv_handle_t *)&sb->mod->udpsrv, &sock);
assert(ret == 0);
// using system call here because uv_udp_t is in the "module listener thread"'s loop, cannot access here. also dnot want to mess with cross-core/cross-thread uv loop states or structures.
ret = sendto(sock, resp, strlen(resp), 0, &sb->client, sizeof(struct sockaddr));
assert(ret == strlen(resp));
#elif USE_UVIO
uv_udp_send_t req = { .data = sb, };
uv_buf_t b = uv_buf_init(resp, strlen(resp));
ret = uv_udp_send(&req, &sb->clientuv, &b, 1, &sb->client, sandbox_udp_send_callback);
assert(ret == 0);
sandbox_block();
#else
assert(0);
#endif
#endif
}
void
sandbox_free(struct sandbox *sb)
{
@ -163,14 +257,20 @@ sandbox_free(struct sandbox *sb)
module_release(sb->mod);
free(sb->args);
// remove stack! and also heap!
ret = munmap(sb->stack_start, sb->stack_size);
if (ret) perror("munmap");
// TODO free(sb->args);
void *stkaddr = sb->stack_start;
size_t stksz = sb->stack_size;
// depending on the memory type
free_linear_memory(sb->linear_start, sb->linear_size, sb->linear_max_size);
free(sb);
// sb is a danging-ptr!
// mmaped memory includes sandbox structure in there.
ret = munmap(sb, SBOX_MAX_MEM + PAGE_SIZE);
if (ret) perror("munmap sandbox");
// remove stack!
// for some reason, removing stack seem to cause crash in some cases.
// TODO: debug more.
ret = munmap(stkaddr, stksz);
if (ret) perror("munmap stack");
}

@ -121,6 +121,7 @@ softint_handler(int sig, siginfo_t *si, void *u)
alarm_cnt++;
// softints per-core..
if (curr && curr->state == SANDBOX_RETURNED) return;
if (next_context) return;
if (!softint_enabled()) return;
softint_alarm_schedule(u);

@ -65,7 +65,7 @@ util_parse_modules_file_json(char *filename)
char mname[MOD_NAME_MAX] = { 0 };
char mpath[MOD_PATH_MAX] = { 0 };
i32 nargs = 0;
u32 udp_port = 0;
u32 port = 0;
i32 isactive = 0;
for (int j = 1; j < (toks[i].size * 2); j+=2) {
char val[256] = { 0 }, key[32] = { 0 };
@ -77,7 +77,7 @@ util_parse_modules_file_json(char *filename)
} else if (strcmp(key, "path") == 0) {
strcpy(mpath, val);
} else if (strcmp(key, "port") == 0) {
udp_port = atoi(val);
port = atoi(val);
} else if (strcmp(key, "argsize") == 0) {
nargs = atoi(val);
} else if (strcmp(key, "active") == 0) {
@ -90,7 +90,7 @@ util_parse_modules_file_json(char *filename)
// do not load if it is not active
if (isactive == 0) continue;
struct module *m = module_alloc(mname, mpath, udp_port, nargs, 0, 0, 0, 0);
struct module *m = module_alloc(mname, mpath, nargs, 0, 0, 0, port, 0, 0);
assert(m);
nmods++;
}
@ -135,7 +135,7 @@ parse_sandbox_file_custom(char *filename)
int ntoks = 0;
strncpy(mname, tok, MOD_NAME_MAX);
mod = module_find(mname);
mod = module_find_by_name(mname);
assert(mod);
if (mod->nargs > 0) {
args = (char *)malloc(mod->nargs * MOD_ARG_MAX_SZ);
@ -152,7 +152,7 @@ parse_sandbox_file_custom(char *filename)
assert(0);
}
sb = sandbox_alloc(mod, args, NULL);
sb = sandbox_alloc(mod, args, 0, NULL);
assert(sb);
total_boxes++;
@ -202,7 +202,7 @@ util_parse_sandbox_string_json(struct module *mod, char *str, const struct socka
*(args + ((k - 1) * MOD_ARG_MAX_SZ) + g->end - g->start) = '\0';
}
struct sandbox *sb = sandbox_alloc(mod, args, addr);
struct sandbox *sb = sandbox_alloc(mod, args, 0, addr);
assert(sb);
return sb;
@ -225,8 +225,6 @@ util_parse_sandbox_string_custom(struct module *mod, char *str, const struct soc
if (!(tok = strtok_r(src, ":", &src))) return NULL;
if (strcmp(mod->name, tok)) return NULL;
// struct module *mod = module_find(tok);
// if (!mod) return NULL;
assert(mod->nargs >= 0 && mod->nargs < MOD_MAX_ARGS);
char *args = (char *)malloc(mod->nargs * MOD_ARG_MAX_SZ);
@ -238,7 +236,7 @@ util_parse_sandbox_string_custom(struct module *mod, char *str, const struct soc
assert(ntoks < MOD_MAX_ARGS);
}
struct sandbox *sb = sandbox_alloc(mod, args, addr);
struct sandbox *sb = sandbox_alloc(mod, args, 0, addr);
assert(sb);
return sb;
@ -271,7 +269,7 @@ util_parse_modules_file_custom(char *filename)
u32 max_heap = 0;
u32 timeout = 0;
char *tok = NULL, *src = buff;
u32 udp_port = 0;
u32 port = 0;
i32 ntoks = 0;
src = util_remove_spaces(src);
@ -279,7 +277,7 @@ util_parse_modules_file_custom(char *filename)
while ((tok = strtok_r(src, ":", &src))) {
switch(ntoks) {
case MOD_ARG_MODPATH: strncpy(mpath, tok, MOD_PATH_MAX); break;
case MOD_ARG_MODPORT: udp_port = atoi(tok);
case MOD_ARG_MODPORT: port = atoi(tok);
case MOD_ARG_MODNAME: strncpy(mname, tok, MOD_NAME_MAX); break;
case MOD_ARG_MODNARGS: nargs = atoi(tok); break;
default: break;
@ -288,7 +286,7 @@ util_parse_modules_file_custom(char *filename)
}
assert(ntoks >= MOD_ARG_MAX);
struct module *m = module_alloc(mname, mpath, udp_port, nargs, 0, 0, 0, 0);
struct module *m = module_alloc(mname, mpath, nargs, 0, 0, 0, port, 0, 0);
assert(m);
nmods++;

@ -2,7 +2,7 @@ include Makefile.inc
BENCH_DIR=../../silverfish/code_benches/
TESTS=forever filesys sockserver sockclient
TESTS=forever filesys sockserver sockclient empty
TESTSRT=$(TESTS:%=%_rt)
BENCHES=adpcm basic_math binarytrees bitcount blowfish crc dijkstra fft function_pointers \
gsm libjpeg mandelbrot patricia pgp qsort rsynth sha sqlite stringsearch susan
@ -20,13 +20,13 @@ susan_CFLAGS=-Wno-everything
.PHONY: all clean rttests sftests
all: rttests sftests
@echo "Compilation done!"
sftests: $(BENCHESSF)
rttests: $(TESTSRT)
all: rttests sftests
@echo "Compilation done!"
clean:
@rm -rf ${TMP_DIR}
@rm -f ${BIN_DIR}/*_wasm.so

@ -0,0 +1,10 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
int
main(int argc, char **argv)
{
printf("hello\n");
return 0;
}

@ -0,0 +1,7 @@
{
"active" : "yes",
"name" : "empty",
"path" : "empty_wasm.so",
"port" : 10000,
"argsize" : 1
}
Loading…
Cancel
Save