feat: error check context variants

main
Sean McBride 4 years ago
parent 9c74fffd38
commit fdba1b1de8

@ -6,7 +6,7 @@
/**
* Initializes a context, zeros out registers, and sets the Instruction and
* Stack pointers
* Stack pointers. Sets variant to unused if ip and sp are 0, fast otherwise.
* @param actx arch_context to init
* @param ip value to set instruction pointer to
* @param sp value to set stack pointer to
@ -15,47 +15,91 @@
static inline void
arch_context_init(struct arch_context *actx, reg_t ip, reg_t sp)
{
assert(actx->variant == arch_context_unused);
assert(actx != NULL);
if (ip == 0 && sp == 0) {
actx->variant = arch_context_unused;
} else {
actx->variant = arch_context_fast;
}
actx->regs[ureg_rsp] = sp;
actx->regs[ureg_rip] = ip;
actx->variant = arch_context_fast;
}
/**
* @param current - the registers and context of the thing running
* @param next - the registers and context of what we're switching to
* Restore a sandbox saved using a fastpath switch, restoring only the
* instruction pointer and stack pointer registers rather than
* a full mcontext, so it is less expensive than arch_mcontext_restore.
* @param active_context - the context of the current worker thread
* @param sandbox_context - the context that we want to restore
*/
static void
arch_context_restore(mcontext_t *active_context, struct arch_context *sandbox_context)
{
assert(active_context != NULL);
assert(sandbox_context != NULL);
/* Assumption: Base Context is only ever used by arch_context_switch */
assert(sandbox_context != &worker_thread_base_context);
assert(sandbox_context->sp);
assert(sandbox_context->pc);
/* Transitioning from Fast -> Running */
assert(sandbox_context->variant == arch_context_fast);
sandbox_context->variant = arch_context_running;
active_context->sp = sandbox_context->regs[ureg_rsp];
active_context->pc = sandbox_context->regs[ureg_rip] + ARCH_SIG_JMP_OFF;
}
/**
* @param a - the registers and context of the thing running
* @param b - the registers and context of what we're switching to
* @return always returns 0, indicating success
*
* NULL in either of these values indicates the "no sandbox to execute" state,
* which defaults to resuming execution of main
*/
static inline int
arch_context_switch(struct arch_context *current, struct arch_context *next)
arch_context_switch(struct arch_context *a, struct arch_context *b)
{
/* Assumption: Software Interrupts are disabled by caller */
assert(!software_interrupt_is_enabled());
/* if both current and next are NULL, there is no state change */
assert(current != NULL || next != NULL);
/* if both a and b are NULL, there is no state change */
assert(a != NULL || b != NULL);
/* Assumption: The caller does not switch to itself */
assert(current != next);
assert(a != b);
/* Set any NULLs to worker_thread_base_context to resume execution of main */
if (current == NULL) current = &worker_thread_base_context;
if (next == NULL) next = &worker_thread_base_context;
if (a == NULL) a = &worker_thread_base_context;
if (b == NULL) b = &worker_thread_base_context;
/* A Transition {Unused, Running} -> Fast */
assert(a->variant == arch_context_unused || a->variant == arch_context_running);
/* B Transition {Fast, Slow} -> Running */
assert(b->variant == arch_context_fast || b->variant == arch_context_slow);
/* Assumption: Fastpath state is well formed */
if (b->variant == arch_context_fast) {
assert(b->regs[ureg_rip] != 0);
assert(b->regs[ureg_rsp] != 0);
}
reg_t *current_registers = current->regs, *next_registers = next->regs;
assert(current_registers && next_registers);
reg_t *a_registers = a->regs, *b_registers = b->regs;
assert(a_registers && b_registers);
asm volatile("mov x0, sp\n\t"
"adr x1, reset%=\n\t"
"str x1, [%[current], 8]\n\t"
"str x0, [%[current]]\n\t"
"ldr x2, [%[next]]\n\t"
"str x1, [%[a], 8]\n\t"
"str x0, [%[a]]\n\t"
"ldr x2, [%[b]]\n\t"
"cbz x2, slow%=\n\t"
"ldr x3, [%[next], 8]\n\t"
"ldr x3, [%[b], 8]\n\t"
"mov sp, x2\n\t"
"br x3\n\t"
"slow%=:\n\t"
@ -63,12 +107,11 @@ arch_context_switch(struct arch_context *current, struct arch_context *next)
".align 8\n\t"
"reset%=:\n\t"
"mov x1, #0\n\t"
"str x1, [%[next]]\n\t"
"str x1, [%[b]]\n\t"
".align 8\n\t"
"exit%=:\n\t"
:
: [ current ] "r"(current_registers), [ next ] "r"(next_registers),
[ slowpath ] "r"(&arch_context_restore_preempted)
: [ a ] "r"(a_registers), [ b ] "r"(b_registers), [ slowpath ] "r"(&arch_context_restore_preempted)
: "memory", "cc", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12",
"x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15");

@ -36,6 +36,23 @@ typedef enum
arch_context_running = 3 /* Context is executing and content is out of date */
} arch_context_t;
static inline char *
arch_context_print(arch_context_t context)
{
switch (context) {
case arch_context_unused:
return "Unused";
case arch_context_fast:
return "Fast";
case arch_context_slow:
return "Slow";
case arch_context_running:
return "Running";
default:
panic("Encountered unexpected arch_context variant\n");
}
}
struct arch_context {
arch_context_t variant;

@ -43,40 +43,17 @@ arch_mcontext_restore(mcontext_t *active_context, struct arch_context *sandbox_c
assert(active_context != NULL);
assert(sandbox_context != NULL);
/* Validate that the sandbox_context is well formed */
assert(sandbox_context->variant == arch_context_slow);
assert(sandbox_context->mctx.gregs[REG_RSP] != 0);
assert(sandbox_context->mctx.gregs[REG_RIP] != 0);
/* Assumption: Base Context is only ever used by arch_context_switch */
assert(sandbox_context != &worker_thread_base_context);
/* Transitioning from Slow -> Running */
assert(sandbox_context->variant == arch_context_slow);
sandbox_context->variant = arch_context_running;
/* Restore mcontext */
memcpy(active_context, &sandbox_context->mctx, sizeof(mcontext_t));
sandbox_context->variant = arch_context_running;
}
/**
* Restore a sandbox saved using a fastpath switch, restoring only the
* instruction pointer and stack pointer registers rather than
* a full mcontext, so it is less expensive than arch_mcontext_restore.
* @param active_context - the context of the current worker thread
* @param sandbox_context - the context that we want to restore
*/
static void
arch_context_restore(mcontext_t *active_context, struct arch_context *sandbox_context)
{
assert(active_context != NULL);
assert(sandbox_context != NULL);
assert(sandbox_context->variant == arch_context_fast);
assert(sandbox_context != &worker_thread_base_context);
assert(sandbox_context->regs[ureg_rip]);
assert(sandbox_context->regs[ureg_rsp]);
active_context->gregs[REG_RSP] = sandbox_context->regs[ureg_rsp];
active_context->gregs[REG_RIP] = sandbox_context->regs[ureg_rip] + ARCH_SIG_JMP_OFF;
sandbox_context->variant = arch_context_running;
}
/**
* Save the full mcontext of the currently executing process
@ -86,19 +63,17 @@ arch_context_restore(mcontext_t *active_context, struct arch_context *sandbox_co
static inline void
arch_mcontext_save(struct arch_context *sandbox_context, const mcontext_t *active_context)
{
assert(sandbox_context != NULL);
assert(active_context != NULL);
/* Assumption: Only called indirectly via signal handler, so interrupts should be disabled */
assert(!software_interrupt_is_enabled());
assert(sandbox_context != NULL);
assert(active_context != NULL);
/* Assumption: The base context should never be modified */
assert(sandbox_context != &worker_thread_base_context);
/* Assumption: The executing process has sane IP and SP values */
assert(active_context->gregs[REG_RIP] != 0);
assert(active_context->gregs[REG_RSP] != 0);
/* Transitioning from {Unused, Running} -> Slow */
assert(sandbox_context->variant == arch_context_unused || sandbox_context->variant == arch_context_running);
sandbox_context->variant = arch_context_slow;
/* Copy mcontext */

@ -1,19 +1,26 @@
#pragma once
#include "arch/common.h"
#include "types.h"
#define ARCH_SIG_JMP_OFF 8
/**
* Initializes a context, zeros out registers, and sets the Instruction and
* Stack pointers
* Stack pointers. Sets variant to unused if ip and sp are 0, fast otherwise.
* @param actx arch_context to init
* @param ip value to set instruction pointer to
* @param sp value to set stack pointer to
*/
static void __attribute__((noinline)) arch_context_init(struct arch_context *actx, reg_t ip, reg_t sp)
{
assert(actx != NULL);
if (ip == 0 && sp == 0) {
actx->variant = arch_context_unused;
} else {
actx->variant = arch_context_fast;
}
if (sp) {
/*
* context_switch conventions: bp is expected to be on top of the stack
@ -35,44 +42,73 @@ static void __attribute__((noinline)) arch_context_init(struct arch_context *act
actx->regs[ureg_rsp] = sp;
actx->regs[ureg_rip] = ip;
actx->variant = arch_context_fast;
}
/**
* Restore a sandbox saved using a fastpath switch, restoring only the
* instruction pointer and stack pointer registers rather than
* a full mcontext, so it is less expensive than arch_mcontext_restore.
* @param active_context - the context of the current worker thread
* @param sandbox_context - the context that we want to restore
*/
static void
arch_context_restore(mcontext_t *active_context, struct arch_context *sandbox_context)
{
assert(active_context != NULL);
assert(sandbox_context != NULL);
/* Assumption: Base Context is only ever used by arch_context_switch */
assert(sandbox_context != &worker_thread_base_context);
assert(sandbox_context->regs[ureg_rsp]);
assert(sandbox_context->regs[ureg_rip]);
/* Transitioning from Fast -> Running */
assert(sandbox_context->variant == arch_context_fast);
sandbox_context->variant = arch_context_running;
active_context->gregs[REG_RSP] = sandbox_context->regs[ureg_rsp];
active_context->gregs[REG_RIP] = sandbox_context->regs[ureg_rip] + ARCH_SIG_JMP_OFF;
}
/**
* @param current - the registers and context of the thing running
* @param next - the registers and context of what we're switching to
* @param a - the registers and context of the thing running
* @param b - the registers and context of what we're switching to
* @return always returns 0, indicating success
*
* NULL in either of these values indicates the "no sandbox to execute" state,
* which defaults to resuming execution of main
*/
static inline int
arch_context_switch(struct arch_context *current, struct arch_context *next)
arch_context_switch(struct arch_context *a, struct arch_context *b)
{
/* Assumption: Software Interrupts are disabled by caller */
assert(software_interrupt_is_disabled);
if (next->variant == arch_context_fast && (next->regs[ureg_rip] == 0 || next->regs[ureg_rsp] == 0)) {
debuglog("Next Context was Fast Variant, but data was invalid.");
assert(0);
}
/* if both current and next are NULL, there is no state change */
assert(current != NULL || next != NULL);
/* if both a and b are NULL, there is no state change */
assert(a != NULL || b != NULL);
/* Assumption: The caller does not switch to itself */
assert(current != next);
assert(a != b);
/* Set any NULLs to worker_thread_base_context to resume execution of main */
if (current == NULL) current = &worker_thread_base_context;
if (next == NULL) next = &worker_thread_base_context;
if (a == NULL) a = &worker_thread_base_context;
if (b == NULL) b = &worker_thread_base_context;
/* A Transition {Unused, Running} -> Fast */
assert(a->variant == arch_context_unused || a->variant == arch_context_running);
/* B Transition {Fast, Slow} -> Running */
assert(b->variant == arch_context_fast || b->variant == arch_context_slow);
/* Assumption: The context we are switching to should have saved a context in some form */
assert(next->variant == arch_context_fast || next->variant == arch_context_slow);
/* Assumption: Fastpath state is well formed */
if (b->variant == arch_context_fast) {
assert(b->regs[ureg_rip] != 0);
assert(b->regs[ureg_rsp] != 0);
}
reg_t *current_registers = current->regs, *next_registers = next->regs;
assert(current_registers && next_registers);
reg_t *a_registers = a->regs, *b_registers = b->regs;
assert(a_registers && b_registers);
asm volatile(
/* Create a new stack frame */
@ -80,31 +116,40 @@ arch_context_switch(struct arch_context *current, struct arch_context *next)
"movq %%rsp, %%rbp\n\t" /* base_pointer = stack_pointer. Start new Frame */
/*
* Save the IP and stack pointer to the context of the sandbox we're switching from
* Save Context A as a fastpath context switch
* Save the active stack pointer to context A.
* Set context A's IP to the absolute address of label 2f
*/
"movq $2f, 8(%%rax)\n\t" /* Write the address of label 2 to current_registers[1] (instruction_pointer). */
"movq %%rsp, (%%rax)\n\t" /* current_registers[0] (stack_pointer) = stack_pointer */
"movq $1, (%%rcx)\n\t" /* current->variant = arch_context_fast; */
"movq $2f, 8(%%rax)\n\t" /* Write the address of label 2 to context a's IP. */
"movq %%rsp, (%%rax)\n\t" /* a_registers[0] (stack_pointer) = stack_pointer */
"movq $1, (%%rcx)\n\t" /* a->variant = arch_context_fast; */
/*
* Check if the variant of the context we're trying to switch to is SLOW (mcontext-based)
* If it is, jump to label 1 to restore the preempted sandbox
* Execute a fastpath or slowpath context switch based on context B's variant
*
* If slow (mcontext-based), jump to label 1 to restore via a signal handler
* Otherwise, fall through and execute fast path.
*/
"cmpq $2, (%%rdx)\n\t" /* if (next->variant == arch_context_slow); */
"cmpq $2, (%%rdx)\n\t" /* if (b->variant == arch_context_slow); */
"je 1f\n\t" /* goto 1; restore the existing sandbox using mcontext */
/*
* Fast Path
* We can just write update the stack pointer and jump to the target instruction
*/
"movq (%%rbx), %%rsp\n\t" /* stack_pointer = next_registers[0] (stack_pointer) */
"jmpq *8(%%rbx)\n\t" /* immediate jump to next_registers[1] (instruction_pointer) */
"movq (%%rbx), %%rsp\n\t" /* stack_pointer = b_registers[0] (stack_pointer) */
"jmpq *8(%%rbx)\n\t" /* immediate jump to b_registers[1] (instruction_pointer) */
/*
* This is after the context switch. B is now the active context.
*/
/*
* Slow Path
* If the variant is arch_context_slow, that means the sandbox was preempted and we need to
* fallback to a full mcontext-based context switch. We do this by invoking
* arch_context_restore_preempted, which fires a SIGUSR1 signal. The SIGUSR1 signal handler
* arch_context_restore_preempted, which fires a SIGUSR1 signal. The SIGUSR1 signal handler
* executes the mcontext-based context switch.
*/
"1:\n\t"
@ -112,19 +157,20 @@ arch_context_switch(struct arch_context *current, struct arch_context *next)
".align 8\n\t"
/*
* This label was written to the instruction pointer of the sandbox that was switched away from
* These labels are used to switch
* This label is what is saved as the IP of a context that was saved using a fastpath context switch
* When this is resumed
* The sandbox either resumes at label 2 or 3 depending on if an offset of 8 is used.
*/
"2:\n\t"
"movq $3, (%%rdx)\n\t" /* next->variant = arch_context_running; */
"movq $3, (%%rdx)\n\t" /* b->variant = arch_context_running; */
".align 8\n\t"
/* This label is used in conjunction with a static offset */
"3:\n\t"
/* TODO: Should we set next->variant = arch_context_slow here?;*/
"popq %%rbp\n\t" /* base_pointer = stack[--stack_len]; Base Pointer is restored */
:
: "a"(current_registers), "b"(next_registers), "c"(&current->variant), "d"(&next->variant)
: "a"(a_registers), "b"(b_registers), "c"(&a->variant), "d"(&b->variant)
: "memory", "cc", "rsi", "rdi", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "xmm0", "xmm1", "xmm2",
"xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14",
"xmm15");

@ -170,15 +170,11 @@ local_runqueue_minheap_preempt(ucontext_t *user_context)
/*
* Restore the context of this new sandbox
* If last in a user-level context switch state,
* do not enable software interrupts.
* user-level context switch state, so do not enable software interrupts.
* TODO: Review the interrupt logic here
*/
if (next_sandbox->ctxt.variant == arch_context_slow) {
arch_mcontext_restore(&user_context->uc_mcontext, &next_sandbox->ctxt);
} else {
arch_context_restore(&user_context->uc_mcontext, &next_sandbox->ctxt);
should_enable_software_interrupt = false;
}
arch_context_restore(&user_context->uc_mcontext, &next_sandbox->ctxt);
should_enable_software_interrupt = false;
}
done:
if (should_enable_software_interrupt) software_interrupt_enable();

@ -58,10 +58,12 @@ worker_thread_switch_to_sandbox(struct sandbox *next_sandbox)
if (current_sandbox == NULL) {
/* Switching from "Base Context" */
current_sandbox_set(next_sandbox);
debuglog("Base Context > Sandbox %lu (%d variant)\n", next_sandbox->request_arrival_timestamp,
next_context->variant);
debuglog("Base Context (%s) > Sandbox %lu (%s)\n",
arch_context_print(worker_thread_base_context.variant),
next_sandbox->request_arrival_timestamp, arch_context_print(next_context->variant));
current_sandbox_set(next_sandbox);
arch_context_switch(NULL, next_context);
} else {
@ -70,11 +72,11 @@ worker_thread_switch_to_sandbox(struct sandbox *next_sandbox)
struct arch_context *current_context = &current_sandbox->ctxt;
current_sandbox_set(next_sandbox);
debuglog("Sandbox %lu > Sandbox %lu\n", current_sandbox->request_arrival_timestamp,
next_sandbox->request_arrival_timestamp);
current_sandbox_set(next_sandbox);
/* Switch to the associated context. */
arch_context_switch(current_context, next_context);
}
@ -101,7 +103,9 @@ worker_thread_switch_to_base_context()
current_sandbox_set(NULL);
debuglog("Sandbox %lu > Base Context\n", current_sandbox->request_arrival_timestamp);
debuglog("Sandbox %lu (%s) > Base Context (%s)\n", current_sandbox->request_arrival_timestamp,
arch_context_print(current_sandbox->ctxt.variant),
arch_context_print(worker_thread_base_context.variant));
arch_context_switch(&current_sandbox->ctxt, &worker_thread_base_context);

Loading…
Cancel
Save