feat: Simplify softint disable logic

main
Sean McBride 4 years ago
parent 758a1425b5
commit b924ed812d

@ -6,7 +6,6 @@
#include "arch/common.h"
#include "current_sandbox.h"
#include "software_interrupt.h"
#define ARCH_SIG_JMP_OFF 0x100 /* Based on code generated! */
@ -22,7 +21,6 @@ static inline void
arch_context_init(struct arch_context *actx, reg_t ip, reg_t sp)
{
assert(actx != NULL);
assert(!software_interrupt_is_enabled());
if (ip == 0 && sp == 0) {
actx->variant = ARCH_CONTEXT_VARIANT_UNUSED;
@ -46,9 +44,6 @@ arch_context_init(struct arch_context *actx, reg_t ip, reg_t sp)
static inline int
arch_context_switch(struct arch_context *a, struct arch_context *b)
{
/* Assumption: Software Interrupts are disabled by caller */
assert(!software_interrupt_is_enabled());
#ifndef NDEBUG
/*
* Assumption: In the case of a slow context switch, the caller
@ -85,12 +80,6 @@ arch_context_switch(struct arch_context *a, struct arch_context *b)
reg_t *a_registers = a->regs, *b_registers = b->regs;
assert(a_registers && b_registers);
/* If switching back to a sandbox context marked as preemptable, reenable
* interrupts before jumping
* TODO: What if we receive a signal inside the inline assembly?
*/
if (b->preemptable) software_interrupt_enable();
asm volatile("mov x0, sp\n\t"
"adr x1, reset%=\n\t"
"str x1, [%[a], 8]\n\t"

@ -1,7 +1,6 @@
#pragma once
#include "arch/common.h"
#include "software_interrupt.h"
/*
* This header is the single entry point into the arch_context code.
@ -42,8 +41,6 @@
static inline void
arch_mcontext_restore(mcontext_t *active_context, struct arch_context *sandbox_context)
{
assert(!software_interrupt_is_enabled());
assert(active_context != NULL);
assert(sandbox_context != NULL);
@ -67,9 +64,6 @@ arch_mcontext_restore(mcontext_t *active_context, struct arch_context *sandbox_c
static inline void
arch_mcontext_save(struct arch_context *sandbox_context, const mcontext_t *active_context)
{
/* Assumption: Only called indirectly via signal handler, so interrupts should be disabled */
assert(!software_interrupt_is_enabled());
assert(sandbox_context != NULL);
assert(active_context != NULL);

@ -1,7 +1,6 @@
#pragma once
#include "arch/common.h"
#include "software_interrupt.h"
/**
* Initializes a context, zeros out registers, and sets the Instruction and
@ -14,7 +13,6 @@ static inline void
arch_context_init(struct arch_context *actx, reg_t ip, reg_t sp)
{
assert(actx != NULL);
assert(!software_interrupt_is_enabled());
if (ip == 0 && sp == 0) {
actx->variant = ARCH_CONTEXT_VARIANT_UNUSED;
@ -58,7 +56,6 @@ arch_context_init(struct arch_context *actx, reg_t ip, reg_t sp)
static inline void
arch_context_restore_new(mcontext_t *active_context, struct arch_context *sandbox_context)
{
assert(!software_interrupt_is_enabled());
assert(active_context != NULL);
assert(sandbox_context != NULL);
@ -87,9 +84,6 @@ arch_context_restore_new(mcontext_t *active_context, struct arch_context *sandbo
static inline int
arch_context_switch(struct arch_context *a, struct arch_context *b)
{
/* Assumption: Software Interrupts are disabled by caller */
assert(!software_interrupt_is_enabled());
/* if both a and b are NULL, there is no state change */
assert(a != NULL || b != NULL);
@ -115,13 +109,6 @@ arch_context_switch(struct arch_context *a, struct arch_context *b)
reg_t *a_registers = a->regs, *b_registers = b->regs;
assert(a_registers && b_registers);
/* If fast switching back to a sandbox context marked as preemptable, reenable
* interrupts before jumping. If this is a slow context switch, defer renabling until
* arch_mcontext_restore
* TODO: What if we receive a signal inside the inline assemly?
*/
if (b->variant == ARCH_CONTEXT_VARIANT_FAST && b->preemptable) software_interrupt_enable();
asm volatile(
/* Create a new stack frame */
"pushq %%rbp\n\t" /* stack[stack_len++] = base_pointer */

@ -5,7 +5,6 @@
#include "arch/getcycles.h"
#include "runtime.h"
#include "software_interrupt.h"
typedef ck_spinlock_mcs_t lock_t;
@ -30,7 +29,6 @@ typedef ck_spinlock_mcs_t lock_t;
*/
#define LOCK_LOCK_WITH_BOOKKEEPING(lock, unique_variable_name) \
assert(listener_thread_is_running() || !software_interrupt_is_enabled()); \
struct ck_spinlock_mcs _hygiene_##unique_variable_name##_node; \
uint64_t _hygiene_##unique_variable_name##_pre = __getcycles(); \
ck_spinlock_mcs_lock((lock), &(_hygiene_##unique_variable_name##_node)); \
@ -45,8 +43,7 @@ typedef ck_spinlock_mcs_t lock_t;
* @param lock - the address of the lock
* @param unique_variable_name - a unique prefix to hygienically namespace an associated lock/unlock pair
*/
#define LOCK_UNLOCK_WITH_BOOKKEEPING(lock, unique_variable_name) \
assert(listener_thread_is_running() || !software_interrupt_is_enabled()); \
#define LOCK_UNLOCK_WITH_BOOKKEEPING(lock, unique_variable_name) \
ck_spinlock_mcs_unlock(lock, &(_hygiene_##unique_variable_name##_node));
/**

@ -9,7 +9,6 @@
#include "admissions_info.h"
#include "http.h"
#include "panic.h"
#include "software_interrupt.h"
#include "types.h"
/* Wasm initialization functions generated by the compiler */
@ -161,9 +160,6 @@ module_initialize_memory(struct module *module)
static inline void
module_validate(struct module *module)
{
/* Assumption: Software Interrupts are disabled by caller */
assert(!software_interrupt_is_enabled());
if (!module) {
panic("module %p | module is unexpectedly NULL\n", module);
} else if (!module->dynamic_library_handle) {

@ -85,7 +85,6 @@ priority_queue_is_empty(struct priority_queue *self)
{
assert(self != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(listener_thread_is_running() || !software_interrupt_is_enabled());
return self->size == 0;
}
@ -164,7 +163,6 @@ priority_queue_percolate_down(struct priority_queue *self, int parent_index)
assert(self->get_priority_fn != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(!listener_thread_is_running());
assert(!software_interrupt_is_enabled());
bool update_highest_value = parent_index == 1;
@ -211,7 +209,6 @@ priority_queue_dequeue_if_earlier_nolock(struct priority_queue *self, void **deq
assert(dequeued_element != NULL);
assert(self->get_priority_fn != NULL);
assert(!listener_thread_is_running());
assert(!software_interrupt_is_enabled());
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
int return_code;
@ -289,7 +286,6 @@ static inline void
priority_queue_free(struct priority_queue *self)
{
assert(self != NULL);
assert(listener_thread_is_running() || !software_interrupt_is_enabled());
free(self);
}
@ -303,7 +299,6 @@ priority_queue_length_nolock(struct priority_queue *self)
{
assert(self != NULL);
assert(!listener_thread_is_running());
assert(!software_interrupt_is_enabled());
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
return self->size;
@ -332,7 +327,6 @@ priority_queue_enqueue_nolock(struct priority_queue *self, void *value)
{
assert(self != NULL);
assert(value != NULL);
assert(listener_thread_is_running() || !software_interrupt_is_enabled());
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
int rc;
@ -377,7 +371,6 @@ priority_queue_delete_nolock(struct priority_queue *self, void *value)
assert(self != NULL);
assert(value != NULL);
assert(!listener_thread_is_running());
assert(!software_interrupt_is_enabled());
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
for (int i = 1; i <= self->size; i++) {
@ -444,7 +437,6 @@ priority_queue_top_nolock(struct priority_queue *self, void **dequeued_element)
assert(dequeued_element != NULL);
assert(self->get_priority_fn != NULL);
assert(!listener_thread_is_running());
assert(!software_interrupt_is_enabled());
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
int return_code;

@ -21,14 +21,12 @@ sandbox_exit(struct sandbox *exiting_sandbox)
/*
* We draw a distinction between RETURNED and COMPLETED because a sandbox cannot add itself to the
* completion queue
* TODO: I think this executes when running inside the sandbox, as it hasn't yet yielded
*/
sandbox_set_as_complete(exiting_sandbox, SANDBOX_RETURNED);
break;
case SANDBOX_BLOCKED:
/* Cooperative yield, so just break */
break;
case SANDBOX_ERROR:
/* Terminal State, so just break */
break;
default:
panic("Cooperatively switching from a sandbox in a non-terminal %s state\n",

@ -1,5 +1,6 @@
#pragma once
#include <errno.h>
#include <stdatomic.h>
#include <stdbool.h>
#include <stdint.h>

@ -20,7 +20,6 @@ static inline void
sandbox_set_as_blocked(struct sandbox *sandbox, sandbox_state_t last_state)
{
assert(sandbox);
assert(!software_interrupt_is_enabled());
uint64_t now = __getcycles();
uint64_t duration_of_last_state = now - sandbox->last_state_change_timestamp;

@ -10,7 +10,6 @@
#include "sandbox_state.h"
#include "sandbox_summarize_page_allocations.h"
#include "sandbox_types.h"
#include "software_interrupt.h"
/**
* Transitions a sandbox from the SANDBOX_RETURNED state to the SANDBOX_COMPLETE state.
@ -23,7 +22,6 @@ static inline void
sandbox_set_as_complete(struct sandbox *sandbox, sandbox_state_t last_state)
{
assert(sandbox);
assert(!software_interrupt_is_enabled());
uint64_t now = __getcycles();
uint64_t duration_of_last_state = now - sandbox->last_state_change_timestamp;
@ -42,18 +40,22 @@ sandbox_set_as_complete(struct sandbox *sandbox, sandbox_state_t last_state)
}
}
uint64_t sandbox_id = sandbox->id;
sandbox->state = SANDBOX_COMPLETE;
sandbox_print_perf(sandbox);
sandbox_summarize_page_allocations(sandbox);
sandbox->last_state_change_timestamp = now;
sandbox->state = SANDBOX_COMPLETE;
/* State Change Bookkeeping */
sandbox_state_log_transition(sandbox->id, last_state, SANDBOX_COMPLETE);
runtime_sandbox_total_increment(SANDBOX_COMPLETE);
runtime_sandbox_total_decrement(last_state);
/* Admissions Control Post Processing */
admissions_info_update(&sandbox->module->admissions_info, sandbox->running_duration);
admissions_control_subtract(sandbox->admissions_estimate);
/* Terminal State Logging */
sandbox_print_perf(sandbox);
sandbox_summarize_page_allocations(sandbox);
/* Do not touch sandbox state after adding to completion queue to avoid use-after-free bugs */
local_completion_queue_add(sandbox);
/* State Change Bookkeeping */
sandbox_state_log_transition(sandbox_id, last_state, SANDBOX_COMPLETE);
runtime_sandbox_total_increment(SANDBOX_COMPLETE);
runtime_sandbox_total_decrement(last_state);
}

@ -27,7 +27,6 @@ static inline void
sandbox_set_as_error(struct sandbox *sandbox, sandbox_state_t last_state)
{
assert(sandbox);
assert(!software_interrupt_is_enabled());
uint64_t now = __getcycles();
uint64_t duration_of_last_state = now - sandbox->last_state_change_timestamp;

@ -7,7 +7,6 @@
#include "arch/context.h"
#include "current_sandbox.h"
#include "ps_list.h"
#include "software_interrupt.h"
#include "sandbox_request.h"
#include "sandbox_types.h"
@ -22,7 +21,6 @@ static inline void
sandbox_set_as_initialized(struct sandbox *sandbox, struct sandbox_request *sandbox_request,
uint64_t allocation_timestamp)
{
assert(!software_interrupt_is_enabled());
assert(sandbox != NULL);
assert(sandbox->state == SANDBOX_ALLOCATED);
assert(sandbox_request != NULL);

@ -9,12 +9,11 @@
#include "sandbox_functions.h"
#include "sandbox_state.h"
#include "sandbox_types.h"
#include "software_interrupt.h"
/**
* Transitions a sandbox to the SANDBOX_RETURNED state.
* This occurs when a sandbox is executing and runs to completion.
* Automatically removes the sandbox from the runqueue and unmaps linear memory.
* Automatically removes the sandbox from the runqueue and frees linear memory.
* Because the stack is still in use, freeing the stack is deferred until later
* @param sandbox the blocking sandbox
* @param last_state the state the sandbox is transitioning from. This is expressed as a constant to
@ -24,7 +23,6 @@ static inline void
sandbox_set_as_returned(struct sandbox *sandbox, sandbox_state_t last_state)
{
assert(sandbox);
assert(!software_interrupt_is_enabled());
uint64_t now = __getcycles();
uint64_t duration_of_last_state = now - sandbox->last_state_change_timestamp;

@ -4,8 +4,8 @@
#include <stdint.h>
#include "arch/getcycles.h"
#include "local_runqueue.h"
#include "panic.h"
#include "software_interrupt.h"
#include "sandbox_types.h"
/**
@ -23,7 +23,6 @@ static inline void
sandbox_set_as_runnable(struct sandbox *sandbox, sandbox_state_t last_state)
{
assert(sandbox);
assert(!software_interrupt_is_enabled());
uint64_t now = __getcycles();
uint64_t duration_of_last_state = now - sandbox->last_state_change_timestamp;
@ -33,14 +32,17 @@ sandbox_set_as_runnable(struct sandbox *sandbox, sandbox_state_t last_state)
switch (last_state) {
case SANDBOX_INITIALIZED: {
sandbox->initializing_duration += duration_of_last_state;
local_runqueue_add(sandbox);
break;
}
case SANDBOX_BLOCKED: {
sandbox->blocked_duration += duration_of_last_state;
local_runqueue_add(sandbox);
break;
}
case SANDBOX_RUNNING: {
sandbox->running_duration += duration_of_last_state;
/* No need to add to runqueue, as already on it */
break;
}
default: {

@ -5,14 +5,12 @@
#include "arch/getcycles.h"
#include "panic.h"
#include "software_interrupt.h"
#include "sandbox_types.h"
static inline void
sandbox_set_as_running(struct sandbox *sandbox, sandbox_state_t last_state)
{
assert(sandbox);
assert(!software_interrupt_is_enabled());
uint64_t now = __getcycles();
uint64_t duration_of_last_state = now - sandbox->last_state_change_timestamp;
@ -22,6 +20,9 @@ sandbox_set_as_running(struct sandbox *sandbox, sandbox_state_t last_state)
switch (last_state) {
case SANDBOX_RUNNABLE: {
sandbox->runnable_duration += duration_of_last_state;
current_sandbox_set(sandbox);
runtime_worker_threads_deadline[worker_thread_idx] = sandbox->absolute_deadline;
/* Does not handle context switch because the caller knows if we need to use fast or slow switched */
break;
}
default: {

@ -33,8 +33,6 @@ extern enum SCHEDULER scheduler;
static inline struct sandbox *
scheduler_edf_get_next()
{
assert(!software_interrupt_is_enabled());
/* Get the deadline of the sandbox at the head of the local request queue */
struct sandbox * local = local_runqueue_get_next();
uint64_t local_deadline = local == NULL ? UINT64_MAX : local->absolute_deadline;
@ -53,7 +51,6 @@ scheduler_edf_get_next()
assert(global->state == SANDBOX_INITIALIZED);
sandbox_set_as_runnable(global, SANDBOX_INITIALIZED);
local_runqueue_add(global);
}
}
@ -70,8 +67,6 @@ err_allocate:
static inline struct sandbox *
scheduler_fifo_get_next()
{
assert(!software_interrupt_is_enabled());
struct sandbox * sandbox = local_runqueue_get_next();
struct sandbox_request *sandbox_request = NULL;
@ -84,7 +79,6 @@ scheduler_fifo_get_next()
if (!sandbox) goto err_allocate;
sandbox_set_as_runnable(sandbox, SANDBOX_INITIALIZED);
local_runqueue_add(sandbox);
};
done:
@ -151,12 +145,12 @@ scheduler_runqueue_initialize()
static inline void
scheduler_preempt(ucontext_t *user_context)
{
// If FIFO, just return
/* If FIFO, just return
* TODO: Should this RR? */
if (scheduler == SCHEDULER_FIFO) return;
assert(scheduler == SCHEDULER_EDF);
assert(user_context != NULL);
assert(!software_interrupt_is_enabled());
/* Process epoll to make sure that all runnable jobs are considered for execution */
worker_thread_execute_epoll_loop();
@ -171,6 +165,10 @@ scheduler_preempt(ucontext_t *user_context)
/* If current equals return, we are already running earliest deadline, so resume execution */
if (current == next) return;
#ifdef LOG_PREEMPTION
debuglog("Preempting sandbox %lu to run sandbox %lu\n", current->id, next->id);
#endif
/* Save the context of the currently executing sandbox before switching from it */
sandbox_set_as_runnable(current, SANDBOX_RUNNING);
arch_mcontext_save(&current->ctxt, &user_context->uc_mcontext);
@ -178,12 +176,10 @@ scheduler_preempt(ucontext_t *user_context)
/* Update current_sandbox to the next sandbox */
assert(next->state == SANDBOX_RUNNABLE);
sandbox_set_as_running(next, SANDBOX_RUNNABLE);
current_sandbox_set(next);
/* Update the current deadline of the worker thread */
runtime_worker_threads_deadline[worker_thread_idx] = next->absolute_deadline;
/* Restore the context of this sandbox */
/* A sandbox cannot be preempted by a slow context because this was in the
* runqueue during the last scheduling decision. */
assert(next->ctxt.variant == ARCH_CONTEXT_VARIANT_FAST);
arch_context_restore_new(&user_context->uc_mcontext, &next->ctxt);
}
@ -198,53 +194,49 @@ scheduler_print(enum SCHEDULER variant)
}
}
static inline void
scheduler_log_sandbox_switch(struct sandbox *current_sandbox, struct sandbox *next_sandbox)
{
#ifdef LOG_CONTEXT_SWITCHES
if (current_sandbox == NULL) {
/* Switching from "Base Context" */
debuglog("Base Context (@%p) (%s) > Sandbox %lu (@%p) (%s)\n", &worker_thread_base_context,
arch_context_variant_print(worker_thread_base_context.variant), next_sandbox->id,
&next_sandbox->ctxt, arch_context_variant_print(next_sandbox->ctxt.variant));
} else {
debuglog("Sandbox %lu (@%p) (%s) > Sandbox %lu (@%p) (%s)\n", current_sandbox->id,
&current_sandbox->ctxt, arch_context_variant_print(current_sandbox->ctxt.variant),
next_sandbox->id, &next_sandbox->ctxt, arch_context_variant_print(next_sandbox->ctxt.variant));
}
#endif
}
/**
* @brief Switches to the next sandbox, placing the current sandbox on the completion queue if in SANDBOX_RETURNED state
* @brief Switches to the next sandbox, placing the current sandbox on the completion queue if in
* SANDBOX_RETURNED state
* @param next_sandbox The Sandbox Context to switch to
*/
static inline void
scheduler_switch_to(struct sandbox *next_sandbox)
{
/* Assumption: The caller disables interrupts */
assert(!software_interrupt_is_enabled());
assert(next_sandbox != NULL);
assert(next_sandbox->state == SANDBOX_RUNNABLE);
struct arch_context *next_context = &next_sandbox->ctxt;
/* Get the old sandbox we're switching from.
* This is null if switching from base context
*/
struct sandbox * current_sandbox = current_sandbox_get();
struct arch_context *current_context = NULL;
if (current_sandbox != NULL) current_context = &current_sandbox->ctxt;
struct sandbox *current_sandbox = current_sandbox_get();
assert(next_sandbox != current_sandbox);
/* If not the current sandbox (which would be in running state), should be runnable */
assert(next_sandbox->state == SANDBOX_RUNNABLE);
/* Update the worker's absolute deadline */
runtime_worker_threads_deadline[worker_thread_idx] = next_sandbox->absolute_deadline;
if (current_sandbox == NULL) {
/* Switching from "Base Context" */
#ifdef LOG_CONTEXT_SWITCHES
debuglog("Base Context (@%p) (%s) > Sandbox %lu (@%p) (%s)\n", &worker_thread_base_context,
arch_context_variant_print(worker_thread_base_context.variant), next_sandbox->id, next_context,
arch_context_variant_print(next_context->variant));
#endif
} else {
#ifdef LOG_CONTEXT_SWITCHES
debuglog("Sandbox %lu (@%p) (%s) > Sandbox %lu (@%p) (%s)\n", current_sandbox->id,
&current_sandbox->ctxt, arch_context_variant_print(current_sandbox->ctxt.variant),
next_sandbox->id, &next_sandbox->ctxt, arch_context_variant_print(next_context->variant));
#endif
struct arch_context *current_context = NULL;
if (current_sandbox != NULL) {
current_context = &current_sandbox->ctxt;
sandbox_exit(current_sandbox);
}
scheduler_log_sandbox_switch(current_sandbox, next_sandbox);
sandbox_set_as_running(next_sandbox, next_sandbox->state);
current_sandbox_set(next_sandbox);
arch_context_switch(current_context, next_context);
}
@ -255,19 +247,12 @@ scheduler_switch_to(struct sandbox *next_sandbox)
static inline void
scheduler_yield()
{
assert(!software_interrupt_is_enabled());
struct sandbox *current_sandbox = current_sandbox_get();
#ifndef NDEBUG
if (current_sandbox != NULL) {
assert(current_sandbox->state < SANDBOX_STATE_COUNT);
assert(current_sandbox->stack_size == current_sandbox->module->stack_size);
}
#endif
/* Assumption: Base Context should never switch to Base Context */
assert(current_sandbox != NULL);
struct arch_context *current_context = &current_sandbox->ctxt;
/* Assumption: Base Context should never switch to Base Context */
assert(current_context != &worker_thread_base_context);
#ifdef LOG_CONTEXT_SWITCHES
@ -278,8 +263,10 @@ scheduler_yield()
sandbox_exit(current_sandbox);
current_sandbox_set(NULL);
assert(worker_thread_base_context.variant == ARCH_CONTEXT_VARIANT_FAST);
runtime_worker_threads_deadline[worker_thread_idx] = UINT64_MAX;
/* Assumption: Base Worker context should never be preempted */
assert(worker_thread_base_context.variant == ARCH_CONTEXT_VARIANT_FAST);
arch_context_switch(current_context, &worker_thread_base_context);
}
@ -293,13 +280,6 @@ scheduler_block(void)
/* Remove the sandbox we were just executing from the runqueue and mark as blocked */
struct sandbox *current_sandbox = current_sandbox_get();
/* We might either have blocked in start reading the request or while executing within the WebAssembly
* entrypoint. The preemptable flag on the context is used to differentiate. In either case, we should
* have disabled interrupts.
*/
if (current_sandbox->ctxt.preemptable) software_interrupt_disable();
assert(!software_interrupt_is_enabled());
assert(current_sandbox->state == SANDBOX_RUNNING);
sandbox_set_as_blocked(current_sandbox, SANDBOX_RUNNING);
generic_thread_dump_lock_overhead();

@ -17,7 +17,6 @@
* Externs *
***********/
extern __thread volatile sig_atomic_t software_interrupt_is_disabled;
extern _Atomic __thread volatile sig_atomic_t software_interrupt_deferred_sigalrm;
extern _Atomic volatile sig_atomic_t software_interrupt_deferred_sigalrm_max[RUNTIME_WORKER_THREAD_CORE_COUNT];
@ -25,41 +24,6 @@ extern _Atomic volatile sig_atomic_t software_interrupt_deferred_sigalr
* Public Static Inlines *
************************/
static inline void
software_interrupt_disable(void)
{
if (__sync_bool_compare_and_swap(&software_interrupt_is_disabled, 0, 1) == false) {
panic("Recursive call to software_interrupt_disable\n");
}
}
/**
* Enables signals
*/
static inline void
software_interrupt_enable(void)
{
if (__sync_bool_compare_and_swap(&software_interrupt_is_disabled, 1, 0) == false) {
panic("Recursive call to software_interrupt_enable\n");
}
if (software_interrupt_deferred_sigalrm > 0) {
// TODO: Atomic set?
software_interrupt_deferred_sigalrm_max[worker_thread_idx] = software_interrupt_deferred_sigalrm;
software_interrupt_deferred_sigalrm = 0;
// TODO: REPLAY sigalrm;
}
}
/**
* @returns boolean if signals are enabled
*/
static inline int
software_interrupt_is_enabled(void)
{
return (software_interrupt_is_disabled == 0);
}
/**
* Masks a signal on the current thread
* @param signal - the signal you want to mask

@ -11,7 +11,6 @@
#include "sandbox_set_as_runnable.h"
#include "sandbox_state.h"
#include "sandbox_types.h"
#include "software_interrupt.h"
#include "worker_thread.h"
@ -21,7 +20,6 @@
static inline void
worker_thread_execute_epoll_loop(void)
{
assert(!software_interrupt_is_enabled());
while (true) {
struct epoll_event epoll_events[RUNTIME_MAX_EPOLL_EVENTS];
int descriptor_count = epoll_wait(worker_thread_epoll_file_descriptor, epoll_events,
@ -43,7 +41,6 @@ worker_thread_execute_epoll_loop(void)
if (sandbox->state == SANDBOX_BLOCKED) {
sandbox_set_as_runnable(sandbox, SANDBOX_BLOCKED);
local_runqueue_add(sandbox);
}
} else if (epoll_events[i].events & (EPOLLERR | EPOLLHUP)) {
/* Mystery: This seems to never fire. Why? Issue #130 */

@ -40,7 +40,6 @@ void
admissions_info_update(struct admissions_info *self, uint64_t execution_duration)
{
#ifdef ADMISSIONS_CONTROL
assert(!software_interrupt_is_enabled());
struct perf_window *perf_window = &self->perf_window;
LOCK_LOCK(&self->perf_window.lock);

@ -4,7 +4,6 @@
#include <stdlib.h>
#include "panic.h"
#include "software_interrupt.h"
/**
* Called by the inline assembly in arch_context_switch to send a SIGUSR1 in order to restore a previously preempted
@ -14,7 +13,6 @@
*/
void __attribute__((noinline)) __attribute__((noreturn)) arch_context_restore_preempted(void)
{
assert(!software_interrupt_is_enabled());
pthread_kill(pthread_self(), SIGUSR1);
panic("Unexpectedly reached code after sending self SIGUSR1\n");
}

@ -6,8 +6,8 @@
#include "sandbox_set_as_returned.h"
#include "sandbox_setup_arguments.h"
#include "scheduler.h"
#include "software_interrupt.h"
// /* current sandbox that is active.. */
__thread struct sandbox *worker_thread_current_sandbox = NULL;
__thread struct sandbox_context_cache local_sandbox_context_cache = {
@ -20,12 +20,24 @@ static inline void
current_sandbox_enable_preemption(struct sandbox *sandbox)
{
#ifdef LOG_PREEMPTION
debuglog("Sandbox %lu - enabling preemption\n", sandbox->id);
debuglog("Sandbox %lu - enabling preemption - Missed %d SIGALRM\n", sandbox->id,
software_interrupt_deferred_sigalrm);
fflush(stderr);
#endif
assert(sandbox->ctxt.preemptable == false);
sandbox->ctxt.preemptable = true;
software_interrupt_enable();
if (__sync_bool_compare_and_swap(&sandbox->ctxt.preemptable, 0, 1) == false) {
panic("Recursive call to current_sandbox_enable_preemption\n");
}
if (software_interrupt_deferred_sigalrm > 0) {
/* Update Max */
if (software_interrupt_deferred_sigalrm > software_interrupt_deferred_sigalrm_max[worker_thread_idx]) {
software_interrupt_deferred_sigalrm_max[worker_thread_idx] =
software_interrupt_deferred_sigalrm;
}
software_interrupt_deferred_sigalrm = 0;
// TODO: Replay. Does the replay need to be before or after enabling preemption?
}
}
static inline void
@ -35,9 +47,9 @@ current_sandbox_disable_preemption(struct sandbox *sandbox)
debuglog("Sandbox %lu - disabling preemption\n", sandbox->id);
fflush(stderr);
#endif
assert(sandbox->ctxt.preemptable == true);
software_interrupt_disable();
sandbox->ctxt.preemptable = false;
if (__sync_bool_compare_and_swap(&sandbox->ctxt.preemptable, 1, 0) == false) {
panic("Recursive call to current_sandbox_disable_preemption\n");
}
}
/**
@ -48,8 +60,6 @@ current_sandbox_disable_preemption(struct sandbox *sandbox)
void
current_sandbox_start(void)
{
assert(!software_interrupt_is_enabled());
struct sandbox *sandbox = current_sandbox_get();
assert(sandbox != NULL);
assert(sandbox->state == SANDBOX_RUNNING);

@ -1,4 +1,5 @@
#include <assert.h>
#include <errno.h>
#include "global_request_scheduler.h"
#include "listener_thread.h"
@ -33,7 +34,6 @@ global_request_scheduler_minheap_add(void *sandbox_request)
int
global_request_scheduler_minheap_remove(struct sandbox_request **removed_sandbox_request)
{
assert(!software_interrupt_is_enabled());
return priority_queue_dequeue(global_request_scheduler_minheap, (void **)removed_sandbox_request);
}
@ -46,7 +46,6 @@ int
global_request_scheduler_minheap_remove_if_earlier(struct sandbox_request **removed_sandbox_request,
uint64_t target_deadline)
{
assert(!software_interrupt_is_enabled());
return priority_queue_dequeue_if_earlier(global_request_scheduler_minheap, (void **)removed_sandbox_request,
target_deadline);
}

@ -48,8 +48,7 @@ local_runqueue_list_get_next()
/* Execute Round Robin Scheduling Logic */
struct sandbox *next_sandbox = local_runqueue_list_remove_and_return();
assert(next_sandbox == NULL || next_sandbox->state != SANDBOX_RETURNED);
local_runqueue_add(next_sandbox);
assert(next_sandbox->state == SANDBOX_RUNNABLE);
return next_sandbox;
}

@ -10,7 +10,6 @@
#include "panic.h"
#include "priority_queue.h"
#include "sandbox_functions.h"
#include "software_interrupt.h"
#include "runtime.h"
__thread static struct priority_queue *local_runqueue_minheap;
@ -33,8 +32,6 @@ local_runqueue_minheap_is_empty()
void
local_runqueue_minheap_add(struct sandbox *sandbox)
{
assert(!software_interrupt_is_enabled());
int return_code = priority_queue_enqueue_nolock(local_runqueue_minheap, sandbox);
/* TODO: propagate RC to caller. Issue #92 */
if (return_code == -ENOSPC) panic("Thread Runqueue is full!\n");
@ -47,7 +44,6 @@ local_runqueue_minheap_add(struct sandbox *sandbox)
static void
local_runqueue_minheap_delete(struct sandbox *sandbox)
{
assert(!software_interrupt_is_enabled());
assert(sandbox != NULL);
int rc = priority_queue_delete_nolock(local_runqueue_minheap, sandbox);
@ -65,8 +61,6 @@ local_runqueue_minheap_delete(struct sandbox *sandbox)
struct sandbox *
local_runqueue_minheap_get_next()
{
assert(!software_interrupt_is_enabled());
/* Get the deadline of the sandbox at the head of the local request queue */
struct sandbox *next = NULL;
int rc = priority_queue_top_nolock(local_runqueue_minheap, (void **)&next);
@ -82,7 +76,6 @@ local_runqueue_minheap_get_next()
void
local_runqueue_minheap_initialize()
{
assert(software_interrupt_is_disabled);
/* Initialize local state */
local_runqueue_minheap = priority_queue_initialize(256, false, sandbox_get_priority);

@ -94,7 +94,6 @@ sandbox_allocate_stack(struct sandbox *sandbox)
{
assert(sandbox);
assert(sandbox->module);
assert(!software_interrupt_is_enabled());
errno = 0;
char *addr = mmap(NULL, sandbox->module->stack_size + /* guard page */ PAGE_SIZE, PROT_NONE,
@ -128,9 +127,6 @@ err_stack_allocation_failed:
struct sandbox *
sandbox_allocate(struct sandbox_request *sandbox_request)
{
/* Assumption: Caller has disabled software interrupts */
assert(!software_interrupt_is_enabled());
/* Validate Arguments */
assert(sandbox_request != NULL);
module_validate(sandbox_request->module);

@ -34,7 +34,6 @@ static uint64_t software_interrupt_interval_duration_in_cycles;
__thread _Atomic static volatile sig_atomic_t software_interrupt_SIGALRM_kernel_count = 0;
__thread _Atomic static volatile sig_atomic_t software_interrupt_SIGALRM_thread_count = 0;
__thread _Atomic static volatile sig_atomic_t software_interrupt_SIGUSR_count = 0;
__thread volatile sig_atomic_t software_interrupt_is_disabled = 1;
__thread _Atomic volatile sig_atomic_t software_interrupt_deferred_sigalrm = 0;
__thread _Atomic volatile sig_atomic_t software_interrupt_signal_depth = 0;
@ -79,6 +78,7 @@ sigalrm_propagate_workers(siginfo_t *signal_info)
/* If using EDF, conditionally send signals. If not, broadcast */
switch (runtime_sigalrm_handler) {
case RUNTIME_SIGALRM_HANDLER_TRIAGED: {
assert(scheduler == SCHEDULER_EDF);
uint64_t local_deadline = runtime_worker_threads_deadline[i];
uint64_t global_deadline = global_request_scheduler_peek();
if (global_deadline < local_deadline) pthread_kill(runtime_worker_threads[i], SIGALRM);
@ -100,66 +100,6 @@ sigalrm_propagate_workers(siginfo_t *signal_info)
}
}
/**
* SIGALRM is the preemption signal that occurs every quantum of execution
* @param signal_info data structure containing signal info
* @param user_context userland context
* @param current_sandbox the sanbox active on the worker thread
*/
static inline void
sigalrm_handler(siginfo_t *signal_info, ucontext_t *user_context, struct sandbox *current_sandbox)
{
/* A worker thread received a SIGALRM when interrupts were disabled, so defer until they are reenabled */
if (!software_interrupt_is_enabled()) {
// Don't increment if kernel? The first worker gets tons of these...
atomic_fetch_add(&software_interrupt_deferred_sigalrm, 1);
return;
}
/* A worker thread received a SIGALRM while running a preemptable sandbox, so preempt */
assert(current_sandbox->ctxt.preemptable);
software_interrupt_disable();
assert(current_sandbox != NULL);
assert(current_sandbox->state != SANDBOX_RETURNED);
/* Preempt */
scheduler_preempt(user_context);
return;
}
/**
* SIGUSR1 restores a preempted sandbox using mcontext
* @param signal_info data structure containing signal info
* @param user_context userland context
* @param current_sandbox the sanbox active on the worker thread
*/
static inline void
sigusr1_handler(siginfo_t *signal_info, ucontext_t *user_context, struct sandbox *current_sandbox)
{
/* Assumption: Caller disables interrupt before triggering SIGUSR1 */
assert(!software_interrupt_is_enabled());
/* Assumption: Caller sets current_sandbox to the preempted sandbox */
assert(current_sandbox);
/* Extra checks to verify that preemption properly set context state */
assert(current_sandbox->ctxt.variant == ARCH_CONTEXT_VARIANT_SLOW);
atomic_fetch_add(&software_interrupt_SIGUSR_count, 1);
#ifdef LOG_PREEMPTION
debuglog("Total SIGUSR1 Received: %d\n", software_interrupt_SIGUSR_count);
debuglog("Restoring sandbox: %lu, Stack %llu\n", current_sandbox->id,
current_sandbox->ctxt.mctx.gregs[REG_RSP]);
#endif
arch_mcontext_restore(&user_context->uc_mcontext, &current_sandbox->ctxt);
return;
}
/**
* Validates that the thread running the signal handler is a known worker thread
*/
@ -182,27 +122,49 @@ software_interrupt_validate_worker()
static inline void
software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void *user_context_raw)
{
/* If the runtime has preemption disabled, and we receive a signal, panic */
if (unlikely(!runtime_preemption_enabled)) {
panic("Unexpectedly invoked signal handlers with preemption disabled\n");
}
/* Only workers should receive signals */
assert(!listener_thread_is_running());
assert(software_interrupt_signal_depth < 2);
/* Signals should be masked if runtime has disabled them */
assert(runtime_preemption_enabled);
/* Signals should not nest */
/* TODO: Better atomic instruction here to check and set? */
assert(software_interrupt_signal_depth == 0);
atomic_fetch_add(&software_interrupt_signal_depth, 1);
software_interrupt_validate_worker();
ucontext_t * user_context = (ucontext_t *)user_context_raw;
struct sandbox *current_sandbox = current_sandbox_get();
switch (signal_type) {
case SIGALRM: {
sigalrm_handler(signal_info, user_context, current_sandbox);
break;
sigalrm_propagate_workers(signal_info);
if (current_sandbox == NULL || current_sandbox->ctxt.preemptable == false) {
/* Cannot preempt, so defer signal
* TODO: First worker gets tons of kernel sigalrms, should these be treated the same?
* When current_sandbox is NULL, we are looping through the scheduler, so sigalrm is redundant
* Maybe track time of last scheduling decision? i.e. when scheduler_get_next was last called.
*/
atomic_fetch_add(&software_interrupt_deferred_sigalrm, 1);
} else {
/* A worker thread received a SIGALRM while running a preemptable sandbox, so preempt */
assert(current_sandbox->state == SANDBOX_RUNNING);
scheduler_preempt(user_context);
}
goto done;
}
case SIGUSR1: {
assert(!software_interrupt_is_enabled());
sigusr1_handler(signal_info, user_context, current_sandbox);
break;
assert(current_sandbox);
assert(current_sandbox->ctxt.variant == ARCH_CONTEXT_VARIANT_SLOW);
atomic_fetch_add(&software_interrupt_SIGUSR_count, 1);
#ifdef LOG_PREEMPTION
debuglog("Total SIGUSR1 Received: %d\n", software_interrupt_SIGUSR_count);
debuglog("Restoring sandbox: %lu, Stack %llu\n", current_sandbox->id,
current_sandbox->ctxt.mctx.gregs[REG_RSP]);
#endif
arch_mcontext_restore(&user_context->uc_mcontext, &current_sandbox->ctxt);
goto done;
}
default: {
switch (signal_info->si_code) {
@ -216,13 +178,8 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
}
}
}
done:
atomic_fetch_sub(&software_interrupt_signal_depth, 1);
/* Reenable software interrupts if we restored a preemptable sandbox
* We explicitly call current_sandbox_get becaue it might have been changed by a handler
*/
current_sandbox = current_sandbox_get();
if (current_sandbox && current_sandbox->ctxt.preemptable) software_interrupt_enable();
}
/********************

@ -40,9 +40,6 @@ __thread int worker_thread_idx;
void *
worker_thread_main(void *argument)
{
/* The base worker thread should start with software interrupts disabled */
assert(software_interrupt_is_disabled);
/* Set base context as running */
worker_thread_base_context.variant = ARCH_CONTEXT_VARIANT_RUNNING;
@ -70,8 +67,6 @@ worker_thread_main(void *argument)
/* Begin Worker Execution Loop */
struct sandbox *next_sandbox = NULL;
while (true) {
assert(!software_interrupt_is_enabled());
/* Assumption: current_sandbox should be unset at start of loop */
assert(current_sandbox_get() == NULL);
@ -80,7 +75,6 @@ worker_thread_main(void *argument)
/* Switch to a sandbox if one is ready to run */
next_sandbox = scheduler_get_next();
if (next_sandbox != NULL) { scheduler_switch_to(next_sandbox); }
assert(!software_interrupt_is_enabled());
/* Clear the completion queue */
local_completion_queue_free();

Loading…
Cancel
Save