refactor: DRY arch_context code

main
Sean McBride 4 years ago
parent dd06d57a02
commit 236e39b263

@ -1,96 +1,58 @@
#pragma once
#include <stdint.h>
#include <unistd.h>
#include <ucontext.h>
#include "arch/common.h"
#include "arch_context.h"
#define ARCH_NREGS (2) /* SP + PC only */
#define ARCH_SIG_JMP_OFF 0x100 /* Based on code generated! */
/**
* ARM64 code. Currently Unimplemented
* Initializes a context, zeros out registers, and sets the Instruction and
* Stack pointers
* @param actx arch_context to init
* @param ip value to set instruction pointer to
* @param sp value to set stack pointer to
*/
typedef uint64_t reg_t;
struct arch_context {
reg_t regs[ARCH_NREGS];
mcontext_t mctx;
};
extern __thread struct arch_context worker_thread_base_context;
/* Initialized a context, zeroing out registers and setting the Instruction and Stack pointers */
static inline void
arch_context_init(struct arch_context *actx, reg_t ip, reg_t sp)
{
memset(&actx->mctx, 0, sizeof(mcontext_t));
memset((void *)actx->regs, 0, sizeof(reg_t) * ARCH_NREGS);
*(actx->regs) = sp;
*(actx->regs + 1) = ip;
}
static int
arch_mcontext_restore(mcontext_t *mc, struct arch_context *ctx)
{
assert(ctx != &worker_thread_base_context);
/*
* if ctx->regs[0] is set, this was last in a user-level context switch state!
* else restore mcontext..
*/
if (ctx->regs[0]) {
mc->sp = ctx->regs[0];
mc->pc = ctx->regs[1] + ARCH_SIG_JMP_OFF;
ctx->regs[0] = 0;
return 1;
} else {
memcpy(mc, &ctx->mctx, sizeof(mcontext_t));
memset(&ctx->mctx, 0, sizeof(mcontext_t));
}
memset((void *)actx->regs, 0, sizeof(reg_t) * UREG_COUNT);
return 0;
}
static void
arch_mcontext_save(struct arch_context *ctx, mcontext_t *mc)
{
assert(ctx != &worker_thread_base_context);
ctx->regs[0] = 0;
memcpy(&ctx->mctx, mc, sizeof(mcontext_t));
actx->regs[UREG_RSP] = sp;
actx->regs[UREG_RIP] = ip;
}
/**
* @param current - the registers and context of the thing running
* @param next - the registers and context of what we're switching to
* @return always returns 0, indicating success
*
* NULL in either of these values indicates the "no sandbox to execute" state,
* which defaults to resuming execution of main
*/
static inline int
arch_context_switch(struct arch_context *ca, struct arch_context *na)
arch_context_switch(struct arch_context *current, struct arch_context *next)
{
if (!ca) {
assert(na);
/* switching from "no sandbox to execute" state to "executing a sandbox" */
ca = &worker_thread_base_context;
} else if (!na) {
assert(ca);
/* Assumption: Software Interrupts are disabled by caller */
assert(!software_interrupt_is_enabled());
/* switching from "executing a sandbox" to "no execution" state. */
na = &worker_thread_base_context;
} else {
assert(na && ca);
/* if both current and next are NULL, there is no state change */
assert(current != NULL || next != NULL);
/* switching between sandboxes. */
}
/* Assumption: The caller does not switch to itself */
assert(current != next);
reg_t *cr = ca->regs, *nr = na->regs;
assert(cr && nr);
/* Set any NULLs to worker_thread_base_context to resume execution of main */
if (current == NULL) current = &worker_thread_base_context;
if (next == NULL) next = &worker_thread_base_context;
reg_t *current_registers = current->regs, *next_registers = next->regs;
assert(current_registers && next_registers);
asm volatile("mov x0, sp\n\t"
"adr x1, reset%=\n\t"
"str x1, [%[curr], 8]\n\t"
"str x0, [%[curr]]\n\t"
"str x1, [%[current], 8]\n\t"
"str x0, [%[current]]\n\t"
"ldr x2, [%[next]]\n\t"
"cbz x2, slow%=\n\t"
"ldr x3, [%[next], 8]\n\t"
@ -105,9 +67,11 @@ arch_context_switch(struct arch_context *ca, struct arch_context *na)
".align 8\n\t"
"exit%=:\n\t"
:
: [ curr ] "r"(cr), [ next ] "r"(nr), [ slowpath ] "r"(&arch_context_mcontext_restore)
: [ current ] "r"(current_registers), [ next ] "r"(next_registers),
[ slowpath ] "r"(&arch_context_mcontext_restore)
: "memory", "cc", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12",
"x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15");
return 0;
}

@ -0,0 +1,37 @@
#pragma once
#include <assert.h>
#include <stdint.h>
#include <string.h>
#include <ucontext.h>
#include <unistd.h>
#include "software_interrupt.h"
/*
* This file contains the common dependencies of the architecture-dependent code
*/
typedef uint64_t reg_t;
/* Userspace Registers. */
enum UREGS
{
UREG_RSP = 0,
UREG_RIP = 1,
UREG_COUNT
};
struct arch_context {
reg_t regs[UREG_COUNT];
mcontext_t mctx;
};
/*
* This is the slowpath switch to a preempted sandbox!
* SIGUSR1 on the current thread and restore mcontext there!
*/
extern __thread struct arch_context worker_thread_base_context;
/* Cannot be inlined because called in Assembly */
void __attribute__((noinline)) __attribute__((noreturn)) arch_context_mcontext_restore(void);

@ -1,5 +1,14 @@
#pragma once
/*
* This header is the single entry point into the arch_context code.
* This includes both dependent and independent code
*/
/*
* Conditionally load architecture-dependent code
* Each of these headers include common.h
*/
#if defined(AARCH64) || defined(aarch64)
#include "aarch64/context.h"
#elif defined(X86_64) || defined(x86_64)
@ -9,3 +18,51 @@
#define X86_64
#include "x86_64/context.h"
#endif
/**
* Preempt the current sandbox and start executing the next sandbox
* @param mc - the context of the current thread of execution
* @param ctx - the context that we want to restore
* @return Return code in {0,1}
* 0 = context restored successfully.
* 1 = special processing because thread was last in a user-level context switch state
*/
static inline int
arch_mcontext_restore(mcontext_t *mc, struct arch_context *ctx)
{
assert(ctx != &worker_thread_base_context);
assert(!software_interrupt_is_enabled());
/* if ctx->regs[0] is set, this was last in a user-level context switch state!
* else restore mcontext..
*/
bool did_user_level_context_switch = ctx->regs[UREG_RSP];
if (did_user_level_context_switch) {
mc->gregs[REG_RSP] = ctx->regs[UREG_RSP];
mc->gregs[REG_RIP] = ctx->regs[UREG_RIP] + ARCH_SIG_JMP_OFF;
ctx->regs[UREG_RSP] = 0;
return 1;
}
/* Restore mcontext */
memcpy(mc, &ctx->mctx, sizeof(mcontext_t));
memset(&ctx->mctx, 0, sizeof(mcontext_t));
return 0;
}
/**
* Save the context of the currently executing process
* @param ctx - destination
* @param mc - source
*/
static inline void
arch_mcontext_save(struct arch_context *ctx, mcontext_t *mc)
{
assert(ctx != &worker_thread_base_context);
ctx->regs[UREG_RSP] = 0;
memcpy(&ctx->mctx, mc, sizeof(mcontext_t));
}

@ -1,39 +1,16 @@
#pragma once
#include <assert.h>
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <ucontext.h>
#include <unistd.h>
#include "arch_context.h"
#include "software_interrupt.h"
#include "arch/common.h"
#define ARCH_SIG_JMP_OFF 8
// Userspace Registers.
enum UREGS
{
UREG_RSP = 0,
UREG_RIP = 1,
UREG_COUNT
};
typedef uint64_t reg_t;
/*
* This is the slowpath switch to a preempted sandbox!
* SIGUSR1 on the current thread and restore mcontext there!
/**
* Initializes a context, zeros out registers, and sets the Instruction and
* Stack pointers
* @param actx arch_context to init
* @param ip value to set instruction pointer to
* @param sp value to set stack pointer to
*/
struct arch_context {
reg_t regs[UREG_COUNT];
mcontext_t mctx;
};
extern __thread struct arch_context worker_thread_base_context;
static void __attribute__((noinline)) arch_context_init(struct arch_context *actx, reg_t ip, reg_t sp)
{
memset(&actx->mctx, 0, sizeof(mcontext_t));
@ -62,52 +39,6 @@ static void __attribute__((noinline)) arch_context_init(struct arch_context *act
actx->regs[UREG_RIP] = ip;
}
/**
* Preempt the current sandbox and start executing the next sandbox
* @param mc - the context of the current thread of execution
* @param ctx - the context that we want to restore
* @return Return code in {0,1}
* 0 = context restored successfully.
* 1 = special processing because thread was last in a user-level context switch state
*/
static int
arch_mcontext_restore(mcontext_t *mc, struct arch_context *ctx)
{
assert(ctx != &worker_thread_base_context);
assert(!software_interrupt_is_enabled());
/* if ctx->regs[0] is set, this was last in a user-level context switch state!
* else restore mcontext..
*/
bool did_user_level_context_switch = ctx->regs[UREG_RSP];
if (did_user_level_context_switch) {
mc->gregs[REG_RSP] = ctx->regs[UREG_RSP];
mc->gregs[REG_RIP] = ctx->regs[UREG_RIP] + ARCH_SIG_JMP_OFF;
ctx->regs[UREG_RSP] = 0;
return 1;
}
/* Restore mcontext */
memcpy(mc, &ctx->mctx, sizeof(mcontext_t));
memset(&ctx->mctx, 0, sizeof(mcontext_t));
return 0;
}
/**
* Save the context of the currently executing process
* @param ctx - destination
* @param mc - source
*/
static void
arch_mcontext_save(struct arch_context *ctx, mcontext_t *mc)
{
assert(ctx != &worker_thread_base_context);
ctx->regs[UREG_RSP] = 0;
memcpy(&ctx->mctx, mc, sizeof(mcontext_t));
}
/**
* @param current - the registers and context of the thing running

@ -1 +0,0 @@
void __attribute__((noinline)) __attribute__((noreturn)) arch_context_mcontext_restore(void);
Loading…
Cancel
Save