chore: partial namespace of runtime

main
Sean McBride 5 years ago
parent ef0056a511
commit 70d87fcb51

@ -23,7 +23,7 @@ struct arch_context {
};
typedef struct arch_context arch_context_t;
extern __thread arch_context_t base_context;
extern __thread arch_context_t worker_thread__base_context;
static inline void
arch_context_init(arch_context_t *actx, reg_t ip, reg_t sp)

@ -42,12 +42,12 @@ struct arch_context {
};
typedef struct arch_context arch_context_t;
extern __thread arch_context_t base_context;
extern __thread arch_context_t worker_thread__base_context;
static void
arch_mcontext_save(arch_context_t *ctx, mcontext_t *mc)
{
assert(ctx != &base_context);
assert(ctx != &worker_thread__base_context);
ctx->regs[5] = 0;
memcpy(&ctx->mctx, mc, sizeof(mcontext_t));
@ -56,7 +56,7 @@ arch_mcontext_save(arch_context_t *ctx, mcontext_t *mc)
static int
arch_mcontext_restore(mcontext_t *mc, arch_context_t *ctx)
{
assert(ctx != &base_context);
assert(ctx != &worker_thread__base_context);
// if ctx->regs[5] is set, this was last in a user-level context switch state!
// else restore mcontext..
@ -107,12 +107,12 @@ arch_context_switch(arch_context_t *ca, arch_context_t *na)
if (!ca) {
assert(na);
// switching from "no sandbox to execute" state to "executing a sandbox"
ca = &base_context;
ca = &worker_thread__base_context;
} else if (!na) {
assert(ca);
// switching from "executing a sandbox" to "no execution" state.
na = &base_context;
na = &worker_thread__base_context;
} else {
assert(na && ca);

@ -13,7 +13,7 @@ extern http_parser_settings runtime__http_parser_settings;
static inline struct sandbox *
current_sandbox__get(void)
{
return current_sandbox;
return worker_thread__current_sandbox;
}
/**
@ -24,7 +24,7 @@ static inline void
current_sandbox__set(struct sandbox *sandbox)
{
// FIXME: critical-section.
current_sandbox = sandbox;
worker_thread__current_sandbox = sandbox;
if (sandbox == NULL) return;
// Thread Local State about the Current Sandbox

@ -155,7 +155,7 @@ http_parser_settings__on_msg_end(http_parser *parser)
/**
* The settings global with the Callback Functions for HTTP Events
*/
void
static inline void
http_parser_settings__register_callbacks(http_parser_settings *settings)
{
settings->on_url = http_parser_settings__on_url;
@ -167,10 +167,14 @@ http_parser_settings__register_callbacks(http_parser_settings *settings)
settings->on_message_complete = http_parser_settings__on_msg_end;
}
/**
* This is really the only function that should have to be called to setup this structure
**/
void
http_parser_settings__initialize(http_parser_settings *settings)
{
http_parser_settings_init(settings);
http_parser_settings__register_callbacks(settings);
}
#endif /* SRFT_HTTP_PARSER_SETTINGS_H */

@ -35,7 +35,7 @@ libuv_callbacks__on_read_parse_http_request(uv_stream_t *stream, ssize_t number_
// When the entire message has been read, stop the stream and wakeup the sandbox
uv_read_stop(stream);
wakeup_sandbox(sandbox);
worker_thread__wakeup_sandbox(sandbox);
}
/**
@ -46,7 +46,7 @@ static inline void
libuv_callbacks__on_close_wakeup_sakebox(uv_handle_t *stream)
{
struct sandbox *sandbox = stream->data;
wakeup_sandbox(sandbox);
worker_thread__wakeup_sandbox(sandbox);
}
/**
@ -58,7 +58,7 @@ static inline void
libuv_callbacks__on_shutdown_wakeup_sakebox(uv_shutdown_t *req, int status)
{
struct sandbox *sandbox = req->data;
wakeup_sandbox(sandbox);
worker_thread__wakeup_sandbox(sandbox);
}
/**
@ -76,7 +76,7 @@ libuv_callbacks__on_write_wakeup_sandbox(uv_write_t *write, int status)
uv_shutdown(&sandbox->client_libuv_shutdown_request, (uv_stream_t *)&sandbox->client_libuv_stream, libuv_callbacks__on_shutdown_wakeup_sakebox);
return;
}
wakeup_sandbox(sandbox);
worker_thread__wakeup_sandbox(sandbox);
}
static inline void

@ -11,15 +11,15 @@
extern int runtime__epoll_file_descriptor;
extern struct deque_sandbox *runtime__global_deque;
extern pthread_mutex_t runtime__global_deque_mutex;
extern __thread uv_loop_t runtime__uvio_handle;
extern __thread uv_loop_t worker_thread__uvio_handle;
void alloc_linear_memory(void);
void expand_memory(void);
void free_linear_memory(void *base, u32 bound, u32 max);
INLINE char *get_function_from_table(u32 idx, u32 type_id);
INLINE char *get_memory_ptr_for_runtime(u32 offset, u32 bounds_check);
void initialize_runtime(void);
void initialize_listener_thread(void);
void runtime__initialize(void);
void listener_thread__initialize(void);
void stub_init(i32 offset);
void *worker_thread_main(void *return_code);
@ -62,7 +62,7 @@ get_memory_string(u32 offset)
static inline uv_loop_t *
get_thread_libuv_handle(void)
{
return &runtime__uvio_handle;
return &worker_thread__uvio_handle;
}
#endif /* SFRT_RUNTIME_H */

@ -78,9 +78,9 @@ struct sandbox {
char request_response_data[1]; // of rr_data_sz, following sandbox mem..
} PAGE_ALIGNED;
extern __thread struct sandbox *current_sandbox;
extern __thread struct sandbox *worker_thread__current_sandbox;
// next_sandbox only used in SIGUSR1
extern __thread arch_context_t *next_context;
extern __thread arch_context_t *worker_thread__next_context;
typedef struct sandbox sandbox_t;
extern void add_sandbox_to_completion_queue(struct sandbox *sandbox);
@ -219,7 +219,7 @@ sandbox__get_libuv_handle(struct sandbox *sandbox, int handle_index)
void * sandbox_worker_main(void *data);
struct sandbox *get_next_sandbox_from_local_run_queue(int interrupt);
void block_current_sandbox(void);
void wakeup_sandbox(sandbox_t *sb);
void worker_thread__wakeup_sandbox(sandbox_t *sb);
// called in sandbox_main() before and after fn() execution
// for http request/response processing using uvio
void sandbox_block_http(void);

@ -96,7 +96,7 @@ static void
wasm_fs_callback(uv_fs_t *req)
{
debuglog("[%p]\n", req->data);
wakeup_sandbox((sandbox_t *)req->data);
worker_thread__wakeup_sandbox((sandbox_t *)req->data);
}
// We define our own syscall numbers, because WASM uses x86_64 values even on systems that are not x86_64
@ -762,7 +762,7 @@ wasm_connection_callback(uv_stream_t *srv, int status)
sandbox_t *s = srv->data;
debuglog(" [%p]\n", s);
s->return_value = status;
wakeup_sandbox(s);
worker_thread__wakeup_sandbox(s);
}
static void
@ -772,7 +772,7 @@ wasm_connect_callback(uv_connect_t *req, int status)
sandbox_t *s = req->data;
debuglog(" [%p]\n", s);
s->return_value = status;
wakeup_sandbox(s);
worker_thread__wakeup_sandbox(s);
}
i32
@ -929,7 +929,7 @@ wasm_read_callback(uv_stream_t *s, ssize_t nread, const uv_buf_t *buffer)
c->read_length = nread;
debuglog("[%p] %ld\n", c, c->read_length);
uv_read_stop(s);
wakeup_sandbox(c);
worker_thread__wakeup_sandbox(c);
}
void
@ -939,7 +939,7 @@ wasm_write_callback(uv_write_t *req, int status)
c->return_value = status;
debuglog("[%p] %d\n", c, status);
wakeup_sandbox(c);
worker_thread__wakeup_sandbox(c);
}
void
@ -952,7 +952,7 @@ wasm_udp_recv_callback(uv_udp_t *h, ssize_t nread, const uv_buf_t *buffer, const
c->read_length = nread;
debuglog("[%p] %ld\n", c, c->read_length);
uv_udp_recv_stop(h);
wakeup_sandbox(c);
worker_thread__wakeup_sandbox(c);
}
void
@ -962,7 +962,7 @@ wasm_udp_send_callback(uv_udp_send_t *req, int status)
c->return_value = status;
debuglog("[%p] %d\n", c, status);
wakeup_sandbox(c);
worker_thread__wakeup_sandbox(c);
}
i32

@ -157,7 +157,7 @@ main(int argc, char **argv)
set_resource_limits_to_max();
allocate_available_cores();
process_nostio();
initialize_runtime();
runtime__initialize();
debuglog("Parsing modules file [%s]\n", argv[1]);
if (module__new_from_json(argv[1])) {
@ -165,6 +165,6 @@ main(int argc, char **argv)
exit(-1);
}
initialize_listener_thread();
listener_thread__initialize();
start_worker_threads();
}

@ -32,7 +32,7 @@ http_parser_settings runtime__http_parser_settings;
* Initialize runtime global state, mask signals, and init http parser
*/
void
initialize_runtime(void)
runtime__initialize(void)
{
runtime__epoll_file_descriptor = epoll_create1(0);
assert(runtime__epoll_file_descriptor >= 0);
@ -49,7 +49,6 @@ initialize_runtime(void)
// Initialize http_parser_settings global
http_parser_settings__initialize(&runtime__http_parser_settings);
http_parser_settings__register_callbacks(&runtime__http_parser_settings);
}
/********************************
@ -67,7 +66,7 @@ initialize_runtime(void)
*
*/
void *
listener_thread_main(void *dummy)
listener_thread__main(void *dummy)
{
struct epoll_event *epoll_events = (struct epoll_event *)malloc(EPOLL_MAX * sizeof(struct epoll_event));
int total_requests = 0;
@ -115,7 +114,7 @@ listener_thread_main(void *dummy)
* Initializes the listener thread, pinned to core 0, and starts to listen for requests
*/
void
initialize_listener_thread(void)
listener_thread__initialize(void)
{
cpu_set_t cs;
@ -123,7 +122,7 @@ initialize_listener_thread(void)
CPU_SET(MOD_REQ_CORE, &cs);
pthread_t listener_thread;
int ret = pthread_create(&listener_thread, NULL, listener_thread_main, NULL);
int ret = pthread_create(&listener_thread, NULL, listener_thread__main, NULL);
assert(ret == 0);
ret = pthread_setaffinity_np(listener_thread, sizeof(cpu_set_t), &cs);
assert(ret == 0);
@ -138,30 +137,30 @@ initialize_listener_thread(void)
* Worker Thread State *
**************************/
__thread static struct ps_list_head local_run_queue;
__thread static struct ps_list_head local_completion_queue;
__thread static struct ps_list_head worker_thread__run_queue;
__thread static struct ps_list_head worker_thread__completion_queue;
// current sandbox that is active..
__thread sandbox_t *current_sandbox = NULL;
__thread sandbox_t *worker_thread__current_sandbox = NULL;
// context pointer to switch to when this thread gets a SIGUSR1
__thread arch_context_t *next_context = NULL;
__thread arch_context_t *worker_thread__next_context = NULL;
// context of the runtime thread before running sandboxes or to resume its "main".
__thread arch_context_t base_context;
__thread arch_context_t worker_thread__base_context;
// libuv i/o loop handle per sandboxing thread!
__thread uv_loop_t runtime__uvio_handle;
__thread uv_loop_t worker_thread__uvio_handle;
// Flag to signify if the thread is currently running callbacks in the libuv event loop
static __thread unsigned int in_callback;
static __thread unsigned int worker_thread__is_in_callback;
/**************************************************
* Worker Thread Logic
*************************************************/
static inline void add_sandbox_to_local_run_queue(struct sandbox *sandbox);
static inline void worker_thread__run_queue__add_sandbox(struct sandbox *sandbox);
/**
* @brief Switches to the next sandbox, placing the current sandbox of the completion queue if in RETURNED state
@ -178,7 +177,7 @@ switch_to_sandbox(struct sandbox *next_sandbox)
current_sandbox__set(next_sandbox);
// If the current sandbox we're switching from is in a RETURNED state, add to completion queue
if (current_sandbox && current_sandbox->state == RETURNED) add_sandbox_to_completion_queue(current_sandbox);
next_context = next_register_context;
worker_thread__next_context = next_register_context;
arch_context_switch(current_register_context, next_register_context);
softint__enable();
}
@ -188,7 +187,7 @@ switch_to_sandbox(struct sandbox *next_sandbox)
* @param sandbox the sandbox to check and update if blocked
**/
void
wakeup_sandbox(sandbox_t *sandbox)
worker_thread__wakeup_sandbox(sandbox_t *sandbox)
{
softint__disable();
debuglog("[%p: %s]\n", sandbox, sandbox->module->name);
@ -196,7 +195,7 @@ wakeup_sandbox(sandbox_t *sandbox)
assert(sandbox->state == BLOCKED);
assert(ps_list_singleton_d(sandbox));
sandbox->state = RUNNABLE;
ps_list_head_append_d(&local_run_queue, sandbox);
ps_list_head_append_d(&worker_thread__run_queue, sandbox);
done:
softint__enable();
}
@ -208,7 +207,7 @@ done:
void
block_current_sandbox(void)
{
assert(in_callback == 0);
assert(worker_thread__is_in_callback == 0);
softint__disable();
struct sandbox *current_sandbox = current_sandbox__get();
ps_list_rem_d(current_sandbox);
@ -274,7 +273,7 @@ pull_sandbox_requests_from_global_runqueue(void)
free(sandbox_request);
// Set the sandbox as runnable and place on the local runqueue
sandbox->state = RUNNABLE;
add_sandbox_to_local_run_queue(sandbox);
worker_thread__run_queue__add_sandbox(sandbox);
total_sandboxes_pulled++;
}
@ -287,26 +286,26 @@ pull_sandbox_requests_from_global_runqueue(void)
void
execute_libuv_event_loop(void)
{
in_callback = 1;
worker_thread__is_in_callback = 1;
int n = uv_run(get_thread_libuv_handle(), UV_RUN_NOWAIT), i = 0;
while (n > 0) {
n--;
uv_run(get_thread_libuv_handle(), UV_RUN_NOWAIT);
}
in_callback = 0;
worker_thread__is_in_callback = 0;
}
/**
* Append the sandbox to the local_run_queue
* Append the sandbox to the worker_thread__run_queue
* @param sandbox sandbox to add
*/
static inline void
add_sandbox_to_local_run_queue(struct sandbox *sandbox)
worker_thread__run_queue__add_sandbox(struct sandbox *sandbox)
{
assert(ps_list_singleton_d(sandbox));
// fprintf(stderr, "(%d,%lu) %s: run %p, %s\n", sched_getcpu(), pthread_self(), __func__, s,
// s->module->name);
ps_list_head_append_d(&local_run_queue, sandbox);
ps_list_head_append_d(&worker_thread__run_queue, sandbox);
}
/**
@ -330,7 +329,7 @@ get_next_sandbox_from_local_run_queue(int in_interrupt)
{
// If the thread local runqueue is empty and we're not running in the context of an interupt,
// pull a fresh batch of sandbox requests from the global queue
if (ps_list_head_empty(&local_run_queue)) {
if (ps_list_head_empty(&worker_thread__run_queue)) {
// this is in an interrupt context, don't steal work here!
if (in_interrupt) return NULL;
if (pull_sandbox_requests_from_global_runqueue() == 0) {
@ -341,11 +340,11 @@ get_next_sandbox_from_local_run_queue(int in_interrupt)
// Execute Round Robin Scheduling Logic
// Grab the sandbox at the head of the thread local runqueue, add it to the end, and return it
struct sandbox *sandbox = ps_list_head_first_d(&local_run_queue, struct sandbox);
struct sandbox *sandbox = ps_list_head_first_d(&worker_thread__run_queue, struct sandbox);
// We are assuming that any sandboxed in the RETURNED state should have been pulled from the local runqueue by now!
assert(sandbox->state != RETURNED);
ps_list_rem_d(sandbox);
ps_list_head_append_d(&local_run_queue, sandbox);
ps_list_head_append_d(&worker_thread__run_queue, sandbox);
debuglog("[%p: %s]\n", sandbox, sandbox->module->name);
return sandbox;
}
@ -358,7 +357,7 @@ void
add_sandbox_to_completion_queue(struct sandbox *sandbox)
{
assert(ps_list_singleton_d(sandbox));
ps_list_head_append_d(&local_completion_queue, sandbox);
ps_list_head_append_d(&worker_thread__completion_queue, sandbox);
}
@ -371,8 +370,8 @@ static inline void
free_sandboxes_from_completion_queue(unsigned int number_to_free)
{
for (int i = 0; i < number_to_free; i++) {
if (ps_list_head_empty(&local_completion_queue)) break;
struct sandbox *sandbox = ps_list_head_first_d(&local_completion_queue, struct sandbox);
if (ps_list_head_empty(&worker_thread__completion_queue)) break;
struct sandbox *sandbox = ps_list_head_first_d(&worker_thread__completion_queue, struct sandbox);
if (!sandbox) break;
ps_list_rem_d(sandbox);
sandbox__free(sandbox);
@ -391,7 +390,7 @@ worker_thread_single_loop(void)
// Try to free one sandbox from the completion queue
free_sandboxes_from_completion_queue(1);
// Execute libuv callbacks
if (!in_callback) execute_libuv_event_loop();
if (!worker_thread__is_in_callback) execute_libuv_event_loop();
// Get and return the sandbox at the head of the thread local runqueue
softint__disable();
@ -409,18 +408,18 @@ worker_thread_single_loop(void)
void *
worker_thread_main(void *return_code)
{
arch_context_init(&base_context, 0, 0);
arch_context_init(&worker_thread__base_context, 0, 0);
ps_list_head_init(&local_run_queue);
ps_list_head_init(&local_completion_queue);
ps_list_head_init(&worker_thread__run_queue);
ps_list_head_init(&worker_thread__completion_queue);
softint__is_disabled = 0;
next_context = NULL;
worker_thread__next_context = NULL;
#ifndef PREEMPT_DISABLE
softint__unmask(SIGALRM);
softint__unmask(SIGUSR1);
#endif
uv_loop_init(&runtime__uvio_handle);
in_callback = 0;
uv_loop_init(&worker_thread__uvio_handle);
worker_thread__is_in_callback = 0;
while (true) {
struct sandbox *sandbox = worker_thread_single_loop();

@ -174,7 +174,7 @@ sandbox_main(void)
// we'd potentially do what we'd in switch_to_sandbox() api here for cleanup..
if (!softint__is_enabled()) {
arch_context_init(&current_sandbox->ctxt, 0, 0);
next_context = NULL;
worker_thread__next_context = NULL;
softint__enable();
}
struct module *current_module = sandbox__get_module(current_sandbox);

@ -80,7 +80,7 @@ softint__handle_signals(int signal_type, siginfo_t *signal_info, void *user_cont
softint__SIGALRM_count++;
// softint__supported_signals per-core..
if (curr && curr->state == RETURNED) return;
if (next_context) return;
if (worker_thread__next_context) return;
if (!softint__is_enabled()) return;
softint__schedule_alarm(user_context_raw);
@ -90,17 +90,17 @@ softint__handle_signals(int signal_type, siginfo_t *signal_info, void *user_cont
// make sure sigalrm doesn't mess this up if nested..
assert(!softint__is_enabled());
/* we set current before calling pthread_kill! */
assert(next_context && (&curr->ctxt == next_context));
assert(worker_thread__next_context && (&curr->ctxt == worker_thread__next_context));
assert(signal_info->si_code == SI_TKILL);
// debuglog("usr1:%d\n", softint__SIGUSR_count);
softint__SIGUSR_count++;
// do not save current sandbox.. it is in co-operative switch..
// pick the next from "next_context"..
// pick the next from "worker_thread__next_context"..
// assert its "sp" to be zero in regs..
// memcpy from next context..
arch_mcontext_restore(&user_context->uc_mcontext, &curr->ctxt);
next_context = NULL;
worker_thread__next_context = NULL;
softint__enable();
break;
}
@ -139,7 +139,7 @@ softint__schedule_alarm(void *user_context_raw)
// reset if SIGALRM happens before SIGUSR1 and if don't preempt..OR
// perhaps switch here for SIGUSR1 and see if we can clear that signal
// so it doesn't get called on SIGALRM return..
// next_context = NULL;
// worker_thread__next_context = NULL;
done:
softint__enable();

Loading…
Cancel
Save