|
|
|
@ -2,8 +2,11 @@
|
|
|
|
|
#define ARCH_X86_64_CONTEXT_H
|
|
|
|
|
|
|
|
|
|
#include <assert.h>
|
|
|
|
|
#include <unistd.h>
|
|
|
|
|
#include <ucontext.h>
|
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
|
|
#define ARCH_NREGS (16 /* GP registers */ + 1 /* for IP */)
|
|
|
|
|
#define ARCH_SIG_JMP_OFF 8
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Register strict ordering>
|
|
|
|
@ -27,14 +30,11 @@
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
typedef uint64_t reg_t;
|
|
|
|
|
#define ARCH_NREGS (16 /* GP registers */ + 1 /* for IP */)
|
|
|
|
|
#define ARCH_SIG_JMP_OFF 8
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This is the slowpath switch to a preempted sandbox!
|
|
|
|
|
* SIGUSR1 on the current thread and restore mcontext there!
|
|
|
|
|
*/
|
|
|
|
|
extern void __attribute__((noreturn)) worker_thread__sandbox_switch_preempt(void);
|
|
|
|
|
|
|
|
|
|
struct arch_context {
|
|
|
|
|
reg_t regs[ARCH_NREGS];
|
|
|
|
@ -42,15 +42,35 @@ struct arch_context {
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
typedef struct arch_context arch_context_t;
|
|
|
|
|
|
|
|
|
|
extern void __attribute__((noreturn)) worker_thread__sandbox_switch_preempt(void);
|
|
|
|
|
extern __thread arch_context_t worker_thread__base_context;
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
arch_mcontext_save(arch_context_t *ctx, mcontext_t *mc)
|
|
|
|
|
static void __attribute__((noinline)) arch_context_init(arch_context_t *actx, reg_t ip, reg_t sp)
|
|
|
|
|
{
|
|
|
|
|
assert(ctx != &worker_thread__base_context);
|
|
|
|
|
memset(&actx->mctx, 0, sizeof(mcontext_t));
|
|
|
|
|
memset((void *)actx->regs, 0, sizeof(reg_t) * ARCH_NREGS);
|
|
|
|
|
|
|
|
|
|
ctx->regs[5] = 0;
|
|
|
|
|
memcpy(&ctx->mctx, mc, sizeof(mcontext_t));
|
|
|
|
|
if (sp) {
|
|
|
|
|
/*
|
|
|
|
|
* context_switch conventions: bp is expected to be on top of the stack
|
|
|
|
|
* when co-op context switching..
|
|
|
|
|
*
|
|
|
|
|
* so push sp on this new stack and use
|
|
|
|
|
* that new sp as sp for switching to sandbox!
|
|
|
|
|
*/
|
|
|
|
|
asm volatile("movq %%rsp, %%rbx\n\t"
|
|
|
|
|
"movq %%rax, %%rsp\n\t"
|
|
|
|
|
"pushq %%rax\n\t"
|
|
|
|
|
"movq %%rsp, %%rax\n\t"
|
|
|
|
|
"movq %%rbx, %%rsp\n\t"
|
|
|
|
|
: "=a"(sp)
|
|
|
|
|
: "a"(sp)
|
|
|
|
|
: "memory", "cc", "rbx");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*(actx->regs + 5) = sp;
|
|
|
|
|
*(actx->regs + 16) = ip;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
@ -74,32 +94,15 @@ arch_mcontext_restore(mcontext_t *mc, arch_context_t *ctx)
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void __attribute__((noinline)) arch_context_init(arch_context_t *actx, reg_t ip, reg_t sp)
|
|
|
|
|
static void
|
|
|
|
|
arch_mcontext_save(arch_context_t *ctx, mcontext_t *mc)
|
|
|
|
|
{
|
|
|
|
|
memset(&actx->mctx, 0, sizeof(mcontext_t));
|
|
|
|
|
memset((void *)actx->regs, 0, sizeof(reg_t) * ARCH_NREGS);
|
|
|
|
|
assert(ctx != &worker_thread__base_context);
|
|
|
|
|
|
|
|
|
|
if (sp) {
|
|
|
|
|
/*
|
|
|
|
|
* context_switch conventions: bp is expected to be on top of the stack
|
|
|
|
|
* when co-op context switching..
|
|
|
|
|
*
|
|
|
|
|
* so push sp on this new stack and use
|
|
|
|
|
* that new sp as sp for switching to sandbox!
|
|
|
|
|
*/
|
|
|
|
|
asm volatile("movq %%rsp, %%rbx\n\t"
|
|
|
|
|
"movq %%rax, %%rsp\n\t"
|
|
|
|
|
"pushq %%rax\n\t"
|
|
|
|
|
"movq %%rsp, %%rax\n\t"
|
|
|
|
|
"movq %%rbx, %%rsp\n\t"
|
|
|
|
|
: "=a"(sp)
|
|
|
|
|
: "a"(sp)
|
|
|
|
|
: "memory", "cc", "rbx");
|
|
|
|
|
ctx->regs[5] = 0;
|
|
|
|
|
memcpy(&ctx->mctx, mc, sizeof(mcontext_t));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*(actx->regs + 5) = sp;
|
|
|
|
|
*(actx->regs + 16) = ip;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
|
arch_context_switch(arch_context_t *ca, arch_context_t *na)
|
|
|
|
|