fix: correct assorted bugs

main
Sean McBride 4 years ago
parent 36d649e098
commit cbfad57af9

@ -32,6 +32,10 @@ admissions_info_initialize(struct admissions_info *self, int percentile, uint64_
self->percentile = percentile;
self->control_index = PERF_WINDOW_BUFFER_SIZE * percentile / 100;
#ifdef LOG_ADMISSIONS_CONTROL
debuglog("Percentile: %d\n", self->percentile);
debuglog("Control Index: %d\n", self->control_index);
#endif
#endif
}

@ -1,6 +1,8 @@
#pragma once
#include <assert.h>
#include "arch/common.h"
#include "current_sandbox.h"
#define ARCH_SIG_JMP_OFF 0x100 /* Based on code generated! */
@ -16,6 +18,7 @@ static inline void
arch_context_init(struct arch_context *actx, reg_t ip, reg_t sp)
{
assert(actx != NULL);
assert(!software_interrupt_is_enabled());
if (ip == 0 && sp == 0) {
actx->variant = ARCH_CONTEXT_VARIANT_UNUSED;
@ -68,6 +71,17 @@ arch_context_switch(struct arch_context *a, struct arch_context *b)
/* Assumption: Software Interrupts are disabled by caller */
assert(!software_interrupt_is_enabled());
#ifndef NDEBUG
/*
* Assumption: In the case of a slow context switch, the caller
* set current_sandbox to the sandbox containing the target context
*/
if (b->variant == ARCH_CONTEXT_VARIANT_SLOW) {
struct sandbox *current = current_sandbox_get();
assert(current != NULL && b == &current->ctxt);
}
#endif
/* if both a and b are NULL, there is no state change */
assert(a != NULL || b != NULL);

@ -32,13 +32,15 @@ static void __attribute__((noinline)) arch_context_init(struct arch_context *act
*/
asm volatile("movq %%rsp, %%rbx\n\t" /* Temporarily save pointer of active stack to B */
"movq %%rax, %%rsp\n\t" /* Set active stack to stack pointer in A(C variable sp) */
"pushq %%rax\n\t" /* Push A(C variable sp) onto the stack at sp */
"pushq %%rax\n\t" /* Push A register (C variable sp) onto the stack at sp */
"movq %%rsp, %%rax\n\t" /* Write the incremented stack pointer to A(C variable sp) */
"movq %%rbx, %%rsp\n\t" /* Restore original stack saved in B */
: "=a"(sp)
: "a"(sp)
: "memory", "cc", "rbx");
}
// FIXME: Is the klobber list correct?
// : "memory", "cc", "rbx", "rsi", "rdi");
actx->regs[UREG_SP] = sp;
actx->regs[UREG_IP] = ip;
@ -71,6 +73,33 @@ arch_context_restore(mcontext_t *active_context, struct arch_context *sandbox_co
active_context->gregs[REG_RIP] = sandbox_context->regs[UREG_IP] + ARCH_SIG_JMP_OFF;
}
/**
* Load a new sandbox that preempted an existing sandbox, restoring only the
* instruction pointer and stack pointer registers.
* I am unclear about setting the BP
* @param active_context - the context of the current worker thread
* @param sandbox_context - the context that we want to restore
*/
static inline void
arch_context_restore_new(mcontext_t *active_context, struct arch_context *sandbox_context)
{
assert(active_context != NULL);
assert(sandbox_context != NULL);
/* Assumption: Base Context is only ever used by arch_context_switch */
assert(sandbox_context != &worker_thread_base_context);
assert(sandbox_context->regs[UREG_SP]);
assert(sandbox_context->regs[UREG_IP]);
/* Transitioning from Fast -> Running */
assert(sandbox_context->variant == ARCH_CONTEXT_VARIANT_FAST);
sandbox_context->variant = ARCH_CONTEXT_VARIANT_RUNNING;
active_context->gregs[REG_RSP] = sandbox_context->regs[UREG_SP];
active_context->gregs[REG_RIP] = sandbox_context->regs[UREG_IP];
}
/**
* @param a - the registers and context of the thing running
* @param b - the registers and context of what we're switching to

@ -14,5 +14,6 @@
void __attribute__((noinline)) __attribute__((noreturn)) arch_context_restore_preempted(void)
{
pthread_kill(pthread_self(), SIGUSR1);
// if (unlikely(rc != 0)) panic("%s\n", strerror(rc));
panic("Unexpectedly reached code after sending self SIGUSR1\n");
}

@ -185,7 +185,7 @@ local_runqueue_minheap_preempt(ucontext_t *user_context)
* user-level context switch state, so do not enable software interrupts.
* TODO: Review the interrupt logic here. Issue #63
*/
arch_context_restore(&user_context->uc_mcontext, &next_sandbox->ctxt);
arch_context_restore_new(&user_context->uc_mcontext, &next_sandbox->ctxt);
should_enable_software_interrupt = false;
}
done:

@ -373,7 +373,6 @@ sandbox_allocate_memory(struct module *module)
error_message = "sandbox_allocate_memory - memory allocation failed";
goto alloc_failed;
}
sandbox = (struct sandbox *)addr;
/* Set the struct sandbox, HTTP Req/Resp buffer, and the initial Wasm Pages as read/write */
errno = 0;
@ -384,6 +383,8 @@ sandbox_allocate_memory(struct module *module)
goto set_rw_failed;
}
sandbox = (struct sandbox *)addr_rw;
/* Populate Sandbox members */
sandbox->state = SANDBOX_UNINITIALIZED;
sandbox->linear_memory_start = (char *)addr + sandbox_size;
@ -411,12 +412,23 @@ sandbox_allocate_stack(struct sandbox *sandbox)
{
assert(sandbox);
assert(sandbox->module);
assert(!software_interrupt_is_enabled());
errno = 0;
char *addr = mmap(NULL, sandbox->module->stack_size + /* guard page */ PAGE_SIZE, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) goto err_stack_allocation_failed;
/* Set the struct sandbox, HTTP Req/Resp buffer, and the initial Wasm Pages as read/write */
errno = 0;
char *addr_rw = mmap(addr + /* guard page */ PAGE_SIZE, sandbox->module->stack_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
/* TODO: Fix leak here */
if (addr_rw == MAP_FAILED) goto err_stack_allocation_failed;
sandbox->stack_start = addr_rw;
sandbox->stack_size = sandbox->module->stack_size;
sandbox->stack_start = mmap(NULL, sandbox->stack_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN, -1, 0);
if (sandbox->stack_start == MAP_FAILED) goto err_stack_allocation_failed;
done:
return 0;
@ -450,8 +462,9 @@ sandbox_set_as_initialized(struct sandbox *sandbox, struct sandbox_request *sand
sandbox->state = SANDBOX_SET_AS_INITIALIZED;
/* Initialize the sandbox's context, stack, and instruction pointer */
/* stack_start points to the bottom of the usable stack, so add stack_size to get to top */
arch_context_init(&sandbox->ctxt, (reg_t)current_sandbox_main,
(reg_t)(sandbox->stack_start + sandbox->stack_size));
(reg_t)sandbox->stack_start + sandbox->stack_size);
/* Initialize file descriptors to -1 */
for (int i = 0; i < SANDBOX_MAX_IO_HANDLE_COUNT; i++) sandbox->io_handles[i].file_descriptor = -1;
@ -875,13 +888,11 @@ sandbox_free(struct sandbox *sandbox)
module_release(sandbox->module);
void * stkaddr = sandbox->stack_start;
size_t stksz = sandbox->stack_size;
/* Free Sandbox Stack */
errno = 0;
rc = munmap(stkaddr, stksz);
/* The stack start is the bottom of the usable stack, but we allocated a guard page below this */
rc = munmap((char *)sandbox->stack_start - PAGE_SIZE, sandbox->stack_size + PAGE_SIZE);
if (rc == -1) {
debuglog("Failed to unmap stack of Sandbox %lu\n", sandbox->id);
goto err_free_stack_failed;

@ -129,6 +129,8 @@ sigusr1_handler(siginfo_t *signal_info, ucontext_t *user_context, struct sandbox
#ifdef LOG_PREEMPTION
debuglog("Total SIGUSR1 Received: %d\n", software_interrupt_SIGUSR_count);
debuglog("Restoring sandbox: %lu, Stack %llu\n", current_sandbox->id,
current_sandbox->ctxt.mctx.gregs[REG_RSP]);
#endif
arch_mcontext_restore(&user_context->uc_mcontext, &current_sandbox->ctxt);
@ -176,7 +178,7 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
case SIGUSR1: {
return sigusr1_handler(signal_info, user_context, current_sandbox);
}
default:
default: {
if (signal_info->si_code == SI_TKILL) {
panic("Unexpectedly received signal %d from a thread kill, but we have no handler\n",
signal_type);
@ -184,6 +186,7 @@ software_interrupt_handle_signals(int signal_type, siginfo_t *signal_info, void
panic("Unexpectedly received signal %d from the kernel, but we have no handler\n", signal_type);
}
}
}
#endif
}

@ -1,3 +1,4 @@
#include <assert.h>
#include <errno.h>
#include <pthread.h>
#include <signal.h>
@ -102,24 +103,34 @@ worker_thread_switch_to_sandbox(struct sandbox *next_sandbox)
sandbox_set_as_running(next_sandbox, next_sandbox->state);
#ifdef LOG_CONTEXT_SWITCHES
debuglog("Base Context (%s) > Sandbox %lu (%s)\n",
arch_context_variant_print(worker_thread_base_context.variant), next_sandbox->id,
debuglog("Base Context (@%p) (%s) > Sandbox %lu (@%p) (%s)\n", &worker_thread_base_context,
arch_context_variant_print(worker_thread_base_context.variant), next_sandbox->id, next_context,
arch_context_variant_print(next_context->variant));
#endif
/* Assumption: If a slow context switch, current sandbox should be set to the target */
assert(next_context->variant != ARCH_CONTEXT_VARIANT_SLOW
|| &current_sandbox_get()->ctxt == next_context);
arch_context_switch(NULL, next_context);
} else {
/* Set the current sandbox to the next */
assert(next_sandbox != current_sandbox);
struct arch_context *current_context = &current_sandbox->ctxt;
#ifdef LOG_CONTEXT_SWITCHES
debuglog("Sandbox %lu (@%p) (%s) > Sandbox %lu (@%p) (%s)\n", current_sandbox->id,
&current_sandbox->ctxt, arch_context_variant_print(current_sandbox->ctxt.variant),
next_sandbox->id, &next_sandbox->ctxt, arch_context_variant_print(next_context->variant));
#endif
worker_thread_transition_exiting_sandbox(current_sandbox);
sandbox_set_as_running(next_sandbox, next_sandbox->state);
struct arch_context *current_context = &current_sandbox->ctxt;
#ifdef LOG_CONTEXT_SWITCHES
debuglog("Sandbox %lu > Sandbox %lu\n", current_sandbox->id, next_sandbox->id);
#ifndef NDEBUG
assert(next_context->variant != ARCH_CONTEXT_VARIANT_SLOW
|| &current_sandbox_get()->ctxt == next_context);
#endif
/* Switch to the associated context. */
@ -146,22 +157,21 @@ worker_thread_switch_to_base_context()
}
#endif
worker_thread_transition_exiting_sandbox(current_sandbox);
/* Assumption: Base Context should never switch to Base Context */
assert(current_sandbox != NULL);
assert(&current_sandbox->ctxt != &worker_thread_base_context);
current_sandbox_set(NULL);
struct arch_context *current_context = &current_sandbox->ctxt;
assert(current_context != &worker_thread_base_context);
#ifdef LOG_CONTEXT_SWITCHES
debuglog("Sandbox %lu (%s) > Base Context (%s)\n", current_sandbox->id,
arch_context_variant_print(current_sandbox->ctxt.variant),
debuglog("Sandbox %lu (@%p) (%s)> Base Context (@%p) (%s)\n", current_sandbox->id, current_context,
arch_context_variant_print(current_sandbox->ctxt.variant), &worker_thread_base_context,
arch_context_variant_print(worker_thread_base_context.variant));
#endif
arch_context_switch(&current_sandbox->ctxt, &worker_thread_base_context);
worker_thread_transition_exiting_sandbox(current_sandbox);
current_sandbox_set(NULL);
assert(worker_thread_base_context.variant == ARCH_CONTEXT_VARIANT_FAST);
arch_context_switch(current_context, &worker_thread_base_context);
software_interrupt_enable();
}
@ -282,7 +292,9 @@ worker_thread_main(void *return_code)
worker_thread_start_timestamp = __getcycles();
worker_thread_lock_duration = 0;
/* Initialize Base Context */
/* Initialize Base Context as unused
* The SP and IP are populated during the first FAST switch away
*/
arch_context_init(&worker_thread_base_context, 0, 0);
/* Initialize Runqueue Variant */

Loading…
Cancel
Save