chore: namespace runtime.c

main
Sean McBride 5 years ago
parent 70d87fcb51
commit 2d9a3925cd

@ -15,7 +15,7 @@ typedef uint64_t reg_t;
* This is the slowpath switch to a preempted sandbox!
* SIGUSR1 on the current thread and restore mcontext there!
*/
extern void __attribute__((noreturn)) sandbox_switch_preempt(void);
extern void __attribute__((noreturn)) worker_thread__sandbox_switch_preempt(void);
struct arch_context {
reg_t regs[ARCH_NREGS];

@ -34,7 +34,7 @@ typedef uint64_t reg_t;
* This is the slowpath switch to a preempted sandbox!
* SIGUSR1 on the current thread and restore mcontext there!
*/
extern void __attribute__((noreturn)) sandbox_switch_preempt(void);
extern void __attribute__((noreturn)) worker_thread__sandbox_switch_preempt(void);
struct arch_context {
reg_t regs[ARCH_NREGS];
@ -131,7 +131,7 @@ arch_context_switch(arch_context_t *ca, arch_context_t *na)
"movq 40(%%rbx), %%rsp\n\t"
"jmpq *128(%%rbx)\n\t"
"1:\n\t"
"call sandbox_switch_preempt\n\t"
"call worker_thread__sandbox_switch_preempt\n\t"
".align 8\n\t"
"2:\n\t"
"movq $0, 40(%%rbx)\n\t"

@ -21,7 +21,7 @@ INLINE char *get_memory_ptr_for_runtime(u32 offset, u32 bounds_check);
void runtime__initialize(void);
void listener_thread__initialize(void);
void stub_init(i32 offset);
void *worker_thread_main(void *return_code);
void *worker_thread__main(void *return_code);
/**
* TODO: ???

@ -27,7 +27,7 @@ typedef enum
* This is the slowpath switch to a preempted sandbox!
* SIGUSR1 on the current thread and restore mcontext there!
*/
extern void __attribute__((noreturn)) sandbox_switch_preempt(void);
extern void __attribute__((noreturn)) worker_thread__sandbox_switch_preempt(void);
// TODO: linear_memory_max_size is not really used
@ -83,7 +83,7 @@ extern __thread struct sandbox *worker_thread__current_sandbox;
extern __thread arch_context_t *worker_thread__next_context;
typedef struct sandbox sandbox_t;
extern void add_sandbox_to_completion_queue(struct sandbox *sandbox);
extern void worker_thread__completion_queue__add_sandbox(struct sandbox *sandbox);
/***************************
* Sandbox *
@ -217,12 +217,12 @@ sandbox__get_libuv_handle(struct sandbox *sandbox, int handle_index)
void * sandbox_worker_main(void *data);
struct sandbox *get_next_sandbox_from_local_run_queue(int interrupt);
void block_current_sandbox(void);
struct sandbox *worker_thread__get_next_sandbox(int interrupt);
void worker_thread__block_current_sandbox(void);
void worker_thread__wakeup_sandbox(sandbox_t *sb);
// called in sandbox_main() before and after fn() execution
// for http request/response processing using uvio
void sandbox_block_http(void);
void worker_thread__process_io(void);
void sandbox_response(void);
// should be the entry-point for each sandbox so it can do per-sandbox mem/etc init.

@ -123,7 +123,7 @@ wasm_read(i32 filedes, i32 buf_offset, i32 nbyte)
debuglog("[%p] start[%d:%d, n%d]\n", uv_fs_get_data(&req), filedes, f, nbyte);
uv_buf_t bufv = uv_buf_init(buffer, nbyte);
uv_fs_read(get_thread_libuv_handle(), &req, f, &bufv, 1, -1, wasm_fs_callback);
block_current_sandbox();
worker_thread__block_current_sandbox();
int ret = uv_fs_get_result(&req);
debuglog("[%p] end[%d]\n", uv_fs_get_data(&req), ret);
@ -154,7 +154,7 @@ wasm_write(i32 file_descriptor, i32 buf_offset, i32 buf_size)
uv_buf_t bufv = uv_buf_init(buffer, buf_size);
uv_fs_write(get_thread_libuv_handle(), &req, f, &bufv, 1, -1, wasm_fs_callback);
block_current_sandbox();
worker_thread__block_current_sandbox();
int ret = uv_fs_get_result(&req);
uv_fs_req_cleanup(&req);
@ -197,7 +197,7 @@ wasm_open(i32 path_off, i32 flags, i32 mode)
debuglog("[%p] start[%s:%d:%d]\n", uv_fs_get_data(&req), path, flags, modified_flags);
uv_fs_open(get_thread_libuv_handle(), &req, path, modified_flags, mode, wasm_fs_callback);
block_current_sandbox();
worker_thread__block_current_sandbox();
int ret = uv_fs_get_result(&req);
debuglog("[%p] end[%d]\n", uv_fs_get_data(&req), ret);
@ -234,7 +234,7 @@ wasm_close(i32 file_descriptor)
uv_fs_t req = UV_FS_REQ_INIT();
debuglog("[%p] file[%d,%d]\n", uv_fs_get_data(&req), file_descriptor, d);
uv_fs_close(get_thread_libuv_handle(), &req, d, wasm_fs_callback);
block_current_sandbox();
worker_thread__block_current_sandbox();
int ret = uv_fs_get_result(&req);
debuglog("[%p] end[%d]\n", uv_fs_get_data(&req), ret);
@ -531,7 +531,7 @@ wasm_readv(i32 file_descriptor, i32 iov_offset, i32 iovcnt)
}
debuglog("[%p] start[%d,%d, n%d:%d]\n", uv_fs_get_data(&req), file_descriptor, d, i, j);
uv_fs_read(get_thread_libuv_handle(), &req, d, bufs, j, -1, wasm_fs_callback);
block_current_sandbox();
worker_thread__block_current_sandbox();
int ret = uv_fs_get_result(&req);
debuglog("[%p] end[%d]\n", uv_fs_get_data(&req), ret);
@ -578,7 +578,7 @@ wasm_writev(i32 file_descriptor, i32 iov_offset, i32 iovcnt)
}
debuglog("[%p] start[%d,%d, n%d:%d]\n", uv_fs_get_data(&req), file_descriptor, d, i, j);
uv_fs_write(get_thread_libuv_handle(), &req, d, bufs, j, -1, wasm_fs_callback);
block_current_sandbox();
worker_thread__block_current_sandbox();
int ret = uv_fs_get_result(&req);
debuglog("[%p] end[%d]\n", uv_fs_get_data(&req), ret);
@ -640,7 +640,7 @@ wasm_fsync(u32 file_descriptor)
uv_fs_t req = UV_FS_REQ_INIT();
debuglog("[%p] start[%d,%d]\n", uv_fs_get_data(&req), file_descriptor, d);
uv_fs_fsync(get_thread_libuv_handle(), &req, d, wasm_fs_callback);
block_current_sandbox();
worker_thread__block_current_sandbox();
int ret = uv_fs_get_result(&req);
debuglog("[%p] end[%d]\n", uv_fs_get_data(&req), ret);
@ -668,7 +668,7 @@ wasm_unlink(u32 path_str_offset)
uv_fs_t req = UV_FS_REQ_INIT();
debuglog("[%p] start[%s]\n", uv_fs_get_data(&req), str);
uv_fs_unlink(get_thread_libuv_handle(), &req, str, wasm_fs_callback);
block_current_sandbox();
worker_thread__block_current_sandbox();
int ret = uv_fs_get_result(&req);
debuglog("[%p] end[%d]\n", uv_fs_get_data(&req), ret);
@ -740,7 +740,7 @@ wasm_fchown(i32 file_descriptor, u32 owner, u32 group)
uv_fs_t req = UV_FS_REQ_INIT();
debuglog("[%p] start[%d,%d]\n", uv_fs_get_data(&req), file_descriptor, d);
uv_fs_fchown(get_thread_libuv_handle(), &req, d, owner, group, wasm_fs_callback);
block_current_sandbox();
worker_thread__block_current_sandbox();
int ret = uv_fs_get_result(&req);
debuglog("[%p] end[%d]\n", uv_fs_get_data(&req), ret);
@ -816,7 +816,7 @@ wasm_connect(i32 sockfd, i32 sockaddr_offset, i32 addrlen)
debuglog("[%p] connect\n", c);
int r = uv_tcp_connect(&req, (uv_tcp_t *)h, get_memory_ptr_void(sockaddr_offset, addrlen),
wasm_connect_callback);
block_current_sandbox();
worker_thread__block_current_sandbox();
debuglog("[%p] %d\n", c, c->return_value);
return c->return_value;
@ -910,7 +910,7 @@ wasm_listen(i32 sockfd, i32 backlog)
assert(t == UV_TCP);
int r = uv_listen((uv_stream_t *)h, backlog, wasm_connection_callback);
block_current_sandbox();
worker_thread__block_current_sandbox();
debuglog("[%p] %d\n", c, c->return_value);
return c->return_value;
@ -983,7 +983,7 @@ wasm_sendto(i32 file_descriptor, i32 buff_offset, i32 len, i32 flags, i32 sockad
uv_buf_t b = uv_buf_init(buffer, len);
debuglog("[%p] tcp\n", c);
int ret = uv_write(&req, (uv_stream_t *)h, &b, 1, wasm_write_callback);
block_current_sandbox();
worker_thread__block_current_sandbox();
debuglog("[%p] %d\n", c, c->return_value);
return c->return_value;
@ -995,7 +995,7 @@ wasm_sendto(i32 file_descriptor, i32 buff_offset, i32 len, i32 flags, i32 sockad
debuglog("[%p] udp\n", c);
// TODO: sockaddr!
int r = uv_udp_send(&req, (uv_udp_t *)h, &b, 1, NULL, wasm_udp_send_callback);
block_current_sandbox();
worker_thread__block_current_sandbox();
debuglog("[%p] %d\n", c, c->return_value);
return c->return_value;
@ -1041,7 +1041,7 @@ wasm_recvfrom(i32 file_descriptor, i32 buff_offset, i32 size, i32 flags, i32 soc
((uv_stream_t *)h)->data = c;
debuglog("[%p] tcp\n", c);
int r = uv_read_start((uv_stream_t *)h, wasm_alloc_callback, wasm_read_callback);
block_current_sandbox();
worker_thread__block_current_sandbox();
debuglog("[%p] %d\n", c, c->return_value);
if (c->return_value == -EIO) {
// TODO: buffer errors??
@ -1052,7 +1052,7 @@ wasm_recvfrom(i32 file_descriptor, i32 buff_offset, i32 size, i32 flags, i32 soc
((uv_udp_t *)h)->data = c;
debuglog("[%p] udp\n", c);
int r = uv_udp_recv_start((uv_udp_t *)h, wasm_alloc_callback, wasm_udp_recv_callback);
block_current_sandbox();
worker_thread__block_current_sandbox();
debuglog("[%p] %d\n", c, c->return_value);
if (c->return_value == -EIO) {
// TODO: buffer errors??

@ -114,7 +114,7 @@ void
start_worker_threads()
{
for (int i = 0; i < total_worker_processors; i++) {
int ret = pthread_create(&worker_threads[i], NULL, worker_thread_main,
int ret = pthread_create(&worker_threads[i], NULL, worker_thread__main,
(void *)&worker_threads_argument[i]);
if (ret) {
errno = ret;

@ -168,7 +168,7 @@ static inline void worker_thread__run_queue__add_sandbox(struct sandbox *sandbox
* @return void
*/
static inline void
switch_to_sandbox(struct sandbox *next_sandbox)
worker_thread__switch_to_sandbox(struct sandbox *next_sandbox)
{
arch_context_t *next_register_context = next_sandbox == NULL ? NULL : &next_sandbox->ctxt;
softint__disable();
@ -176,7 +176,7 @@ switch_to_sandbox(struct sandbox *next_sandbox)
arch_context_t *current_register_context = current_sandbox == NULL ? NULL : &current_sandbox->ctxt;
current_sandbox__set(next_sandbox);
// If the current sandbox we're switching from is in a RETURNED state, add to completion queue
if (current_sandbox && current_sandbox->state == RETURNED) add_sandbox_to_completion_queue(current_sandbox);
if (current_sandbox && current_sandbox->state == RETURNED) worker_thread__completion_queue__add_sandbox(current_sandbox);
worker_thread__next_context = next_register_context;
arch_context_switch(current_register_context, next_register_context);
softint__enable();
@ -205,25 +205,25 @@ done:
* Mark the currently executing sandbox as blocked, remove it from the local runqueue, and pull the sandbox at the head of the runqueue
**/
void
block_current_sandbox(void)
worker_thread__block_current_sandbox(void)
{
assert(worker_thread__is_in_callback == 0);
softint__disable();
struct sandbox *current_sandbox = current_sandbox__get();
ps_list_rem_d(current_sandbox);
current_sandbox->state = BLOCKED;
struct sandbox *next_sandbox = get_next_sandbox_from_local_run_queue(0);
struct sandbox *next_sandbox = worker_thread__get_next_sandbox(0);
debuglog("[%p: %next_sandbox, %p: %next_sandbox]\n", current_sandbox, current_sandbox->module->name, next_sandbox, next_sandbox ? next_sandbox->module->name : "");
softint__enable();
switch_to_sandbox(next_sandbox);
worker_thread__switch_to_sandbox(next_sandbox);
}
/**
* TODO: What is this doing?
* Execute I/O
**/
void
sandbox_block_http(void)
worker_thread__process_io(void)
{
#ifdef USE_HTTP_UVIO
#ifdef USE_HTTP_SYNC
@ -232,7 +232,7 @@ sandbox_block_http(void)
// async block!
uv_run(get_thread_libuv_handle(), UV_RUN_DEFAULT);
#else /* USE_HTTP_SYNC */
block_current_sandbox();
worker_thread__block_current_sandbox();
#endif /* USE_HTTP_UVIO */
#else
assert(0);
@ -243,7 +243,7 @@ sandbox_block_http(void)
/**
* TODO: What is this doing?
**/
void __attribute__((noinline)) __attribute__((noreturn)) sandbox_switch_preempt(void)
void __attribute__((noinline)) __attribute__((noreturn)) worker_thread__sandbox_switch_preempt(void)
{
pthread_kill(pthread_self(), SIGUSR1);
@ -258,7 +258,7 @@ void __attribute__((noinline)) __attribute__((noreturn)) sandbox_switch_preempt(
* @return the number of sandbox requests pulled
*/
static inline int
pull_sandbox_requests_from_global_runqueue(void)
worker_thread__pull_and_process_sandbox_requests(void)
{
int total_sandboxes_pulled = 0;
@ -284,7 +284,7 @@ pull_sandbox_requests_from_global_runqueue(void)
* Run all outstanding events in the local thread's libuv event loop
**/
void
execute_libuv_event_loop(void)
worker_thread__execute_libuv_event_loop(void)
{
worker_thread__is_in_callback = 1;
int n = uv_run(get_thread_libuv_handle(), UV_RUN_NOWAIT), i = 0;
@ -313,7 +313,7 @@ worker_thread__run_queue__add_sandbox(struct sandbox *sandbox)
* @param sandbox sandbox
**/
static inline void
remove_sandbox_from_local_run_queue(struct sandbox *sandbox)
worker_thread__run_queue__remove_sandbox(struct sandbox *sandbox)
{
ps_list_rem_d(sandbox);
}
@ -325,14 +325,14 @@ remove_sandbox_from_local_run_queue(struct sandbox *sandbox)
* @return the sandbox to execute or NULL if none are available
**/
struct sandbox *
get_next_sandbox_from_local_run_queue(int in_interrupt)
worker_thread__get_next_sandbox(int in_interrupt)
{
// If the thread local runqueue is empty and we're not running in the context of an interupt,
// pull a fresh batch of sandbox requests from the global queue
if (ps_list_head_empty(&worker_thread__run_queue)) {
// this is in an interrupt context, don't steal work here!
if (in_interrupt) return NULL;
if (pull_sandbox_requests_from_global_runqueue() == 0) {
if (worker_thread__pull_and_process_sandbox_requests() == 0) {
// debuglog("[null: null]\n");
return NULL;
}
@ -354,7 +354,7 @@ get_next_sandbox_from_local_run_queue(int in_interrupt)
* @param sandbox
**/
void
add_sandbox_to_completion_queue(struct sandbox *sandbox)
worker_thread__completion_queue__add_sandbox(struct sandbox *sandbox)
{
assert(ps_list_singleton_d(sandbox));
ps_list_head_append_d(&worker_thread__completion_queue, sandbox);
@ -367,7 +367,7 @@ add_sandbox_to_completion_queue(struct sandbox *sandbox)
* @return void
*/
static inline void
free_sandboxes_from_completion_queue(unsigned int number_to_free)
worker_thread__completion_queue__free_sandboxes(unsigned int number_to_free)
{
for (int i = 0; i < number_to_free; i++) {
if (ps_list_head_empty(&worker_thread__completion_queue)) break;
@ -384,17 +384,17 @@ free_sandboxes_from_completion_queue(unsigned int number_to_free)
* @return sandbox or NULL
**/
struct sandbox *
worker_thread_single_loop(void)
worker_thread__single_loop(void)
{
assert(current_sandbox__get() == NULL);
// Try to free one sandbox from the completion queue
free_sandboxes_from_completion_queue(1);
worker_thread__completion_queue__free_sandboxes(1);
// Execute libuv callbacks
if (!worker_thread__is_in_callback) execute_libuv_event_loop();
if (!worker_thread__is_in_callback) worker_thread__execute_libuv_event_loop();
// Get and return the sandbox at the head of the thread local runqueue
softint__disable();
struct sandbox *sandbox = get_next_sandbox_from_local_run_queue(0);
struct sandbox *sandbox = worker_thread__get_next_sandbox(0);
softint__enable();
assert(sandbox == NULL || sandbox->state == RUNNABLE);
return sandbox;
@ -406,7 +406,7 @@ worker_thread_single_loop(void)
* @param return_code - argument provided by pthread API. We set to -1 on error
**/
void *
worker_thread_main(void *return_code)
worker_thread__main(void *return_code)
{
arch_context_init(&worker_thread__base_context, 0, 0);
@ -422,10 +422,10 @@ worker_thread_main(void *return_code)
worker_thread__is_in_callback = 0;
while (true) {
struct sandbox *sandbox = worker_thread_single_loop();
struct sandbox *sandbox = worker_thread__single_loop();
while (sandbox) {
switch_to_sandbox(sandbox);
sandbox = worker_thread_single_loop();
worker_thread__switch_to_sandbox(sandbox);
sandbox = worker_thread__single_loop();
}
}
@ -447,14 +447,14 @@ current_sandbox__exit(void)
struct sandbox *current_sandbox = current_sandbox__get();
assert(current_sandbox);
softint__disable();
remove_sandbox_from_local_run_queue(current_sandbox);
worker_thread__run_queue__remove_sandbox(current_sandbox);
current_sandbox->state = RETURNED;
struct sandbox *next_sandbox = get_next_sandbox_from_local_run_queue(0);
struct sandbox *next_sandbox = worker_thread__get_next_sandbox(0);
assert(next_sandbox != current_sandbox);
softint__enable();
// free resources from "main function execution", as stack still in use.
// unmap linear memory only!
munmap(current_sandbox->linear_memory_start, SBOX_MAX_MEM + PAGE_SIZE);
switch_to_sandbox(next_sandbox);
worker_thread__switch_to_sandbox(next_sandbox);
}

@ -90,7 +90,7 @@ current_sandbox__receive_and_parse_client_request(void)
}
#else
int r = uv_read_start((uv_stream_t *)&curr->client_libuv_stream, libuv_callbacks__on_allocate_setup_request_response_data, libuv_callbacks__on_read_parse_http_request);
sandbox_block_http();
worker_thread__process_io();
if (curr->request_response_data_length == 0) return 0;
#endif
return 1;
@ -156,7 +156,7 @@ done:
};
uv_buf_t bufv = uv_buf_init(curr->request_response_data, sndsz);
int r = uv_write(&req, (uv_stream_t *)&curr->client_libuv_stream, &bufv, 1, libuv_callbacks__on_write_wakeup_sandbox);
sandbox_block_http();
worker_thread__process_io();
#endif
return 0;
}
@ -170,8 +170,8 @@ sandbox_main(void)
{
struct sandbox *current_sandbox = current_sandbox__get();
// FIXME: is this right? this is the first time this sandbox is running.. so it wont
// return to switch_to_sandbox() api..
// we'd potentially do what we'd in switch_to_sandbox() api here for cleanup..
// return to worker_thread__switch_to_sandbox() api..
// we'd potentially do what we'd in worker_thread__switch_to_sandbox() api here for cleanup..
if (!softint__is_enabled()) {
arch_context_init(&current_sandbox->ctxt, 0, 0);
worker_thread__next_context = NULL;
@ -239,7 +239,7 @@ sandbox_main(void)
#ifdef USE_HTTP_UVIO
uv_close((uv_handle_t *)&current_sandbox->client_libuv_stream, libuv_callbacks__on_close_wakeup_sakebox);
sandbox_block_http();
worker_thread__process_io();
#else
close(current_sandbox->client_socket_descriptor);
#endif

@ -126,7 +126,7 @@ softint__schedule_alarm(void *user_context_raw)
if (curr == NULL) goto done;
// find a next sandbox to run..
struct sandbox *next = get_next_sandbox_from_local_run_queue(1);
struct sandbox *next = worker_thread__get_next_sandbox(1);
if (next == NULL) goto done;
if (next == curr) goto done; // only this sandbox to schedule.. return to it!
// save the current sandbox, state from user_context!

Loading…
Cancel
Save