|
|
|
@ -33,8 +33,6 @@ extern enum SCHEDULER scheduler;
|
|
|
|
|
static inline struct sandbox *
|
|
|
|
|
scheduler_edf_get_next()
|
|
|
|
|
{
|
|
|
|
|
assert(!software_interrupt_is_enabled());
|
|
|
|
|
|
|
|
|
|
/* Get the deadline of the sandbox at the head of the local request queue */
|
|
|
|
|
struct sandbox * local = local_runqueue_get_next();
|
|
|
|
|
uint64_t local_deadline = local == NULL ? UINT64_MAX : local->absolute_deadline;
|
|
|
|
@ -53,7 +51,6 @@ scheduler_edf_get_next()
|
|
|
|
|
|
|
|
|
|
assert(global->state == SANDBOX_INITIALIZED);
|
|
|
|
|
sandbox_set_as_runnable(global, SANDBOX_INITIALIZED);
|
|
|
|
|
local_runqueue_add(global);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -70,8 +67,6 @@ err_allocate:
|
|
|
|
|
static inline struct sandbox *
|
|
|
|
|
scheduler_fifo_get_next()
|
|
|
|
|
{
|
|
|
|
|
assert(!software_interrupt_is_enabled());
|
|
|
|
|
|
|
|
|
|
struct sandbox * sandbox = local_runqueue_get_next();
|
|
|
|
|
struct sandbox_request *sandbox_request = NULL;
|
|
|
|
|
|
|
|
|
@ -84,7 +79,6 @@ scheduler_fifo_get_next()
|
|
|
|
|
if (!sandbox) goto err_allocate;
|
|
|
|
|
|
|
|
|
|
sandbox_set_as_runnable(sandbox, SANDBOX_INITIALIZED);
|
|
|
|
|
local_runqueue_add(sandbox);
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
done:
|
|
|
|
@ -151,12 +145,12 @@ scheduler_runqueue_initialize()
|
|
|
|
|
static inline void
|
|
|
|
|
scheduler_preempt(ucontext_t *user_context)
|
|
|
|
|
{
|
|
|
|
|
// If FIFO, just return
|
|
|
|
|
/* If FIFO, just return
|
|
|
|
|
* TODO: Should this RR? */
|
|
|
|
|
if (scheduler == SCHEDULER_FIFO) return;
|
|
|
|
|
|
|
|
|
|
assert(scheduler == SCHEDULER_EDF);
|
|
|
|
|
assert(user_context != NULL);
|
|
|
|
|
assert(!software_interrupt_is_enabled());
|
|
|
|
|
|
|
|
|
|
/* Process epoll to make sure that all runnable jobs are considered for execution */
|
|
|
|
|
worker_thread_execute_epoll_loop();
|
|
|
|
@ -171,6 +165,10 @@ scheduler_preempt(ucontext_t *user_context)
|
|
|
|
|
/* If current equals return, we are already running earliest deadline, so resume execution */
|
|
|
|
|
if (current == next) return;
|
|
|
|
|
|
|
|
|
|
#ifdef LOG_PREEMPTION
|
|
|
|
|
debuglog("Preempting sandbox %lu to run sandbox %lu\n", current->id, next->id);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/* Save the context of the currently executing sandbox before switching from it */
|
|
|
|
|
sandbox_set_as_runnable(current, SANDBOX_RUNNING);
|
|
|
|
|
arch_mcontext_save(¤t->ctxt, &user_context->uc_mcontext);
|
|
|
|
@ -178,12 +176,10 @@ scheduler_preempt(ucontext_t *user_context)
|
|
|
|
|
/* Update current_sandbox to the next sandbox */
|
|
|
|
|
assert(next->state == SANDBOX_RUNNABLE);
|
|
|
|
|
sandbox_set_as_running(next, SANDBOX_RUNNABLE);
|
|
|
|
|
current_sandbox_set(next);
|
|
|
|
|
|
|
|
|
|
/* Update the current deadline of the worker thread */
|
|
|
|
|
runtime_worker_threads_deadline[worker_thread_idx] = next->absolute_deadline;
|
|
|
|
|
|
|
|
|
|
/* Restore the context of this sandbox */
|
|
|
|
|
/* A sandbox cannot be preempted by a slow context because this was in the
|
|
|
|
|
* runqueue during the last scheduling decision. */
|
|
|
|
|
assert(next->ctxt.variant == ARCH_CONTEXT_VARIANT_FAST);
|
|
|
|
|
arch_context_restore_new(&user_context->uc_mcontext, &next->ctxt);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -198,53 +194,49 @@ scheduler_print(enum SCHEDULER variant)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
|
scheduler_log_sandbox_switch(struct sandbox *current_sandbox, struct sandbox *next_sandbox)
|
|
|
|
|
{
|
|
|
|
|
#ifdef LOG_CONTEXT_SWITCHES
|
|
|
|
|
if (current_sandbox == NULL) {
|
|
|
|
|
/* Switching from "Base Context" */
|
|
|
|
|
debuglog("Base Context (@%p) (%s) > Sandbox %lu (@%p) (%s)\n", &worker_thread_base_context,
|
|
|
|
|
arch_context_variant_print(worker_thread_base_context.variant), next_sandbox->id,
|
|
|
|
|
&next_sandbox->ctxt, arch_context_variant_print(next_sandbox->ctxt.variant));
|
|
|
|
|
} else {
|
|
|
|
|
debuglog("Sandbox %lu (@%p) (%s) > Sandbox %lu (@%p) (%s)\n", current_sandbox->id,
|
|
|
|
|
¤t_sandbox->ctxt, arch_context_variant_print(current_sandbox->ctxt.variant),
|
|
|
|
|
next_sandbox->id, &next_sandbox->ctxt, arch_context_variant_print(next_sandbox->ctxt.variant));
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* @brief Switches to the next sandbox, placing the current sandbox on the completion queue if in SANDBOX_RETURNED state
|
|
|
|
|
* @brief Switches to the next sandbox, placing the current sandbox on the completion queue if in
|
|
|
|
|
* SANDBOX_RETURNED state
|
|
|
|
|
* @param next_sandbox The Sandbox Context to switch to
|
|
|
|
|
*/
|
|
|
|
|
static inline void
|
|
|
|
|
scheduler_switch_to(struct sandbox *next_sandbox)
|
|
|
|
|
{
|
|
|
|
|
/* Assumption: The caller disables interrupts */
|
|
|
|
|
assert(!software_interrupt_is_enabled());
|
|
|
|
|
|
|
|
|
|
assert(next_sandbox != NULL);
|
|
|
|
|
assert(next_sandbox->state == SANDBOX_RUNNABLE);
|
|
|
|
|
struct arch_context *next_context = &next_sandbox->ctxt;
|
|
|
|
|
|
|
|
|
|
/* Get the old sandbox we're switching from.
|
|
|
|
|
* This is null if switching from base context
|
|
|
|
|
*/
|
|
|
|
|
struct sandbox *current_sandbox = current_sandbox_get();
|
|
|
|
|
struct arch_context *current_context = NULL;
|
|
|
|
|
if (current_sandbox != NULL) current_context = ¤t_sandbox->ctxt;
|
|
|
|
|
|
|
|
|
|
assert(next_sandbox != current_sandbox);
|
|
|
|
|
|
|
|
|
|
/* If not the current sandbox (which would be in running state), should be runnable */
|
|
|
|
|
assert(next_sandbox->state == SANDBOX_RUNNABLE);
|
|
|
|
|
|
|
|
|
|
/* Update the worker's absolute deadline */
|
|
|
|
|
runtime_worker_threads_deadline[worker_thread_idx] = next_sandbox->absolute_deadline;
|
|
|
|
|
|
|
|
|
|
if (current_sandbox == NULL) {
|
|
|
|
|
/* Switching from "Base Context" */
|
|
|
|
|
#ifdef LOG_CONTEXT_SWITCHES
|
|
|
|
|
debuglog("Base Context (@%p) (%s) > Sandbox %lu (@%p) (%s)\n", &worker_thread_base_context,
|
|
|
|
|
arch_context_variant_print(worker_thread_base_context.variant), next_sandbox->id, next_context,
|
|
|
|
|
arch_context_variant_print(next_context->variant));
|
|
|
|
|
#endif
|
|
|
|
|
} else {
|
|
|
|
|
#ifdef LOG_CONTEXT_SWITCHES
|
|
|
|
|
debuglog("Sandbox %lu (@%p) (%s) > Sandbox %lu (@%p) (%s)\n", current_sandbox->id,
|
|
|
|
|
¤t_sandbox->ctxt, arch_context_variant_print(current_sandbox->ctxt.variant),
|
|
|
|
|
next_sandbox->id, &next_sandbox->ctxt, arch_context_variant_print(next_context->variant));
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
struct arch_context *current_context = NULL;
|
|
|
|
|
if (current_sandbox != NULL) {
|
|
|
|
|
current_context = ¤t_sandbox->ctxt;
|
|
|
|
|
sandbox_exit(current_sandbox);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
scheduler_log_sandbox_switch(current_sandbox, next_sandbox);
|
|
|
|
|
sandbox_set_as_running(next_sandbox, next_sandbox->state);
|
|
|
|
|
current_sandbox_set(next_sandbox);
|
|
|
|
|
arch_context_switch(current_context, next_context);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -255,19 +247,12 @@ scheduler_switch_to(struct sandbox *next_sandbox)
|
|
|
|
|
static inline void
|
|
|
|
|
scheduler_yield()
|
|
|
|
|
{
|
|
|
|
|
assert(!software_interrupt_is_enabled());
|
|
|
|
|
|
|
|
|
|
struct sandbox *current_sandbox = current_sandbox_get();
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
|
if (current_sandbox != NULL) {
|
|
|
|
|
assert(current_sandbox->state < SANDBOX_STATE_COUNT);
|
|
|
|
|
assert(current_sandbox->stack_size == current_sandbox->module->stack_size);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/* Assumption: Base Context should never switch to Base Context */
|
|
|
|
|
assert(current_sandbox != NULL);
|
|
|
|
|
|
|
|
|
|
struct arch_context *current_context = ¤t_sandbox->ctxt;
|
|
|
|
|
|
|
|
|
|
/* Assumption: Base Context should never switch to Base Context */
|
|
|
|
|
assert(current_context != &worker_thread_base_context);
|
|
|
|
|
|
|
|
|
|
#ifdef LOG_CONTEXT_SWITCHES
|
|
|
|
@ -278,8 +263,10 @@ scheduler_yield()
|
|
|
|
|
|
|
|
|
|
sandbox_exit(current_sandbox);
|
|
|
|
|
current_sandbox_set(NULL);
|
|
|
|
|
assert(worker_thread_base_context.variant == ARCH_CONTEXT_VARIANT_FAST);
|
|
|
|
|
runtime_worker_threads_deadline[worker_thread_idx] = UINT64_MAX;
|
|
|
|
|
|
|
|
|
|
/* Assumption: Base Worker context should never be preempted */
|
|
|
|
|
assert(worker_thread_base_context.variant == ARCH_CONTEXT_VARIANT_FAST);
|
|
|
|
|
arch_context_switch(current_context, &worker_thread_base_context);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -293,13 +280,6 @@ scheduler_block(void)
|
|
|
|
|
/* Remove the sandbox we were just executing from the runqueue and mark as blocked */
|
|
|
|
|
struct sandbox *current_sandbox = current_sandbox_get();
|
|
|
|
|
|
|
|
|
|
/* We might either have blocked in start reading the request or while executing within the WebAssembly
|
|
|
|
|
* entrypoint. The preemptable flag on the context is used to differentiate. In either case, we should
|
|
|
|
|
* have disabled interrupts.
|
|
|
|
|
*/
|
|
|
|
|
if (current_sandbox->ctxt.preemptable) software_interrupt_disable();
|
|
|
|
|
assert(!software_interrupt_is_enabled());
|
|
|
|
|
|
|
|
|
|
assert(current_sandbox->state == SANDBOX_RUNNING);
|
|
|
|
|
sandbox_set_as_blocked(current_sandbox, SANDBOX_RUNNING);
|
|
|
|
|
generic_thread_dump_lock_overhead();
|
|
|
|
|