refactor: Cleanup deferred sigalrm handling

master
Sean McBride 3 years ago
parent ce132b7bcc
commit 72e6c3e043

@ -27,7 +27,6 @@ sandbox_set_as_preempted(struct sandbox *sandbox, sandbox_state_t last_state)
switch (last_state) { switch (last_state) {
case SANDBOX_RUNNING_SYS: { case SANDBOX_RUNNING_SYS: {
current_sandbox_set(NULL);
break; break;
} }
default: { default: {

@ -29,7 +29,6 @@ sandbox_set_as_running_sys(struct sandbox *sandbox, sandbox_state_t last_state)
} }
case SANDBOX_RUNNABLE: { case SANDBOX_RUNNABLE: {
assert(sandbox); assert(sandbox);
current_sandbox_set(sandbox);
/* Does not handle context switch because the caller knows if we need to use fast or slow switched. We /* Does not handle context switch because the caller knows if we need to use fast or slow switched. We
* can fix this by breakout out SANDBOX_RUNNABLE and SANDBOX_PREEMPTED */ * can fix this by breakout out SANDBOX_RUNNABLE and SANDBOX_PREEMPTED */
break; break;

@ -25,7 +25,6 @@ sandbox_set_as_running_user(struct sandbox *sandbox, sandbox_state_t last_state)
} }
case SANDBOX_PREEMPTED: { case SANDBOX_PREEMPTED: {
assert(sandbox); assert(sandbox);
current_sandbox_set(sandbox);
break; break;
} }
default: { default: {

@ -180,12 +180,14 @@ scheduler_preemptive_switch_to(ucontext_t *interrupted_context, struct sandbox *
case ARCH_CONTEXT_VARIANT_FAST: { case ARCH_CONTEXT_VARIANT_FAST: {
assert(next->state == SANDBOX_RUNNABLE); assert(next->state == SANDBOX_RUNNABLE);
arch_context_restore_fast(&interrupted_context->uc_mcontext, &next->ctxt); arch_context_restore_fast(&interrupted_context->uc_mcontext, &next->ctxt);
current_sandbox_set(next);
sandbox_set_as_running_sys(next, SANDBOX_RUNNABLE); sandbox_set_as_running_sys(next, SANDBOX_RUNNABLE);
break; break;
} }
case ARCH_CONTEXT_VARIANT_SLOW: { case ARCH_CONTEXT_VARIANT_SLOW: {
assert(next->state == SANDBOX_PREEMPTED); assert(next->state == SANDBOX_PREEMPTED);
arch_context_restore_slow(&interrupted_context->uc_mcontext, &next->ctxt); arch_context_restore_slow(&interrupted_context->uc_mcontext, &next->ctxt);
current_sandbox_set(next);
sandbox_set_as_running_user(next, SANDBOX_PREEMPTED); sandbox_set_as_running_user(next, SANDBOX_PREEMPTED);
break; break;
} }
@ -207,8 +209,6 @@ scheduler_preemptive_sched(ucontext_t *interrupted_context)
{ {
assert(interrupted_context != NULL); assert(interrupted_context != NULL);
software_interrupt_deferred_sigalrm_clear();
/* Process epoll to make sure that all runnable jobs are considered for execution */ /* Process epoll to make sure that all runnable jobs are considered for execution */
scheduler_execute_epoll_loop(); scheduler_execute_epoll_loop();
@ -232,12 +232,10 @@ scheduler_preemptive_sched(ucontext_t *interrupted_context)
debuglog("Preempting sandbox %lu to run sandbox %lu\n", current->id, next->id); debuglog("Preempting sandbox %lu to run sandbox %lu\n", current->id, next->id);
#endif #endif
scheduler_log_sandbox_switch(current, next);
/* Preempt executing sandbox */ /* Preempt executing sandbox */
scheduler_log_sandbox_switch(current, next);
sandbox_preempt(current); sandbox_preempt(current);
arch_context_save_slow(&current->ctxt, &interrupted_context->uc_mcontext); arch_context_save_slow(&current->ctxt, &interrupted_context->uc_mcontext);
scheduler_preemptive_switch_to(interrupted_context, next); scheduler_preemptive_switch_to(interrupted_context, next);
} }
@ -259,13 +257,14 @@ scheduler_cooperative_switch_to(struct sandbox *next_sandbox)
switch (next_sandbox->state) { switch (next_sandbox->state) {
case SANDBOX_RUNNABLE: { case SANDBOX_RUNNABLE: {
assert(next_context->variant == ARCH_CONTEXT_VARIANT_FAST); assert(next_context->variant == ARCH_CONTEXT_VARIANT_FAST);
current_sandbox_set(next_sandbox);
sandbox_set_as_running_sys(next_sandbox, SANDBOX_RUNNABLE); sandbox_set_as_running_sys(next_sandbox, SANDBOX_RUNNABLE);
break; break;
} }
case SANDBOX_PREEMPTED: { case SANDBOX_PREEMPTED: {
assert(next_context->variant == ARCH_CONTEXT_VARIANT_SLOW); assert(next_context->variant == ARCH_CONTEXT_VARIANT_SLOW);
/* arch_context_switch triggers a SIGUSR1, which transitions next_sandbox to running_user */
current_sandbox_set(next_sandbox); current_sandbox_set(next_sandbox);
/* arch_context_switch triggers a SIGUSR1, which transitions next_sandbox to running_user */
break; break;
} }
default: { default: {
@ -286,7 +285,8 @@ scheduler_cooperative_sched()
/* Assumption: only called by the "base context" */ /* Assumption: only called by the "base context" */
assert(current_sandbox_get() == NULL); assert(current_sandbox_get() == NULL);
software_interrupt_deferred_sigalrm_clear(); /* Deferred signals should have been cleared by this point */
assert(deferred_sigalrm == 0);
/* Try to wakeup sleeping sandboxes */ /* Try to wakeup sleeping sandboxes */
scheduler_execute_epoll_loop(); scheduler_execute_epoll_loop();
@ -308,3 +308,15 @@ scheduler_worker_would_preempt(int worker_idx)
uint64_t global_deadline = global_request_scheduler_peek(); uint64_t global_deadline = global_request_scheduler_peek();
return global_deadline < local_deadline; return global_deadline < local_deadline;
} }
static inline void
scheduler_switch_to_base_context(struct arch_context *current_context)
{
/* Clear any deferred sigalrms we hit while cleaning up sandbox. We'll run the scheduler cooperatively
in the base context */
software_interrupt_deferred_sigalrm_clear();
/* Assumption: Base Worker context should never be preempted */
assert(worker_thread_base_context.variant == ARCH_CONTEXT_VARIANT_FAST);
arch_context_switch(current_context, &worker_thread_base_context);
}

@ -71,22 +71,23 @@ software_interrupt_unmask_signal(int signal)
extern thread_local _Atomic volatile sig_atomic_t deferred_sigalrm; extern thread_local _Atomic volatile sig_atomic_t deferred_sigalrm;
static inline void
software_interrupt_deferred_sigalrm_clear()
{
software_interrupt_counts_deferred_sigalrm_max_update(deferred_sigalrm);
atomic_store(&deferred_sigalrm, 0);
}
static inline void static inline void
software_interrupt_deferred_sigalrm_replay() software_interrupt_deferred_sigalrm_replay()
{ {
if (deferred_sigalrm > 0) { if (deferred_sigalrm > 0) {
software_interrupt_deferred_sigalrm_clear();
software_interrupt_counts_deferred_sigalrm_replay_increment(); software_interrupt_counts_deferred_sigalrm_replay_increment();
raise(SIGALRM); raise(SIGALRM);
} }
} }
static inline void
software_interrupt_deferred_sigalrm_clear()
{
software_interrupt_counts_deferred_sigalrm_max_update(deferred_sigalrm);
atomic_store(&deferred_sigalrm, 0);
}
/************************* /*************************
* Exports from software_interrupt.c * * Exports from software_interrupt.c *
************************/ ************************/

@ -34,6 +34,8 @@ void
current_sandbox_sleep() current_sandbox_sleep()
{ {
struct sandbox *sandbox = current_sandbox_get(); struct sandbox *sandbox = current_sandbox_get();
current_sandbox_set(NULL);
assert(sandbox != NULL); assert(sandbox != NULL);
struct arch_context *current_context = &sandbox->ctxt; struct arch_context *current_context = &sandbox->ctxt;
@ -52,11 +54,7 @@ current_sandbox_sleep()
sandbox_state_stringify(sandbox->state)); sandbox_state_stringify(sandbox->state));
} }
current_sandbox_set(NULL); scheduler_switch_to_base_context(current_context);
/* Assumption: Base Worker context should never be preempted */
assert(worker_thread_base_context.variant == ARCH_CONTEXT_VARIANT_FAST);
arch_context_switch(current_context, &worker_thread_base_context);
} }
/** /**
@ -91,11 +89,9 @@ current_sandbox_exit()
} }
/* Do not access sandbox after this, as it is on the completion queue! */ /* Do not access sandbox after this, as it is on the completion queue! */
/* Assumption: Base Worker context should never be preempted */ scheduler_switch_to_base_context(current_context);
assert(worker_thread_base_context.variant == ARCH_CONTEXT_VARIANT_FAST);
arch_context_switch(current_context, &worker_thread_base_context);
/* The schduler should never switch back to completed sandboxes */ /* The scheduler should never switch back to completed sandboxes */
assert(0); assert(0);
} }

@ -24,8 +24,8 @@
#include "software_interrupt.h" #include "software_interrupt.h"
#include "software_interrupt_counts.h" #include "software_interrupt_counts.h"
thread_local _Atomic volatile sig_atomic_t handler_depth; thread_local _Atomic volatile sig_atomic_t handler_depth = 0;
thread_local _Atomic volatile sig_atomic_t deferred_sigalrm; thread_local _Atomic volatile sig_atomic_t deferred_sigalrm = 0;
/*************************************** /***************************************
* Externs * Externs
@ -37,6 +37,12 @@ extern pthread_t *runtime_worker_threads;
* Private Static Inlines * * Private Static Inlines *
*************************/ *************************/
static inline bool
worker_thread_is_running_cooperative_scheduler(void)
{
return current_sandbox_get() == NULL;
}
static inline void static inline void
defer_sigalrm() defer_sigalrm()
{ {
@ -108,6 +114,8 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
case SIGALRM: { case SIGALRM: {
propagate_sigalrm(signal_info); propagate_sigalrm(signal_info);
if (worker_thread_is_running_cooperative_scheduler()) break;
if (sandbox_is_preemptable(current_sandbox)) { if (sandbox_is_preemptable(current_sandbox)) {
scheduler_preemptive_sched(interrupted_context); scheduler_preemptive_sched(interrupted_context);
} else { } else {

Loading…
Cancel
Save