feat: Round robin for FIFO preemption

main
Sean McBride 4 years ago
parent 69b2a476d4
commit 7f8a11f2d5

@ -1,3 +1,4 @@
#pragma once #pragma once
void local_runqueue_list_initialize(); void local_runqueue_list_initialize();
void local_runqueue_list_rotate();

@ -82,6 +82,18 @@ ps_list_ll_empty(struct ps_list *l)
return l->n == l; return l->n == l;
} }
static inline int
ps_list_ll_one_node(struct ps_list *l)
{
return l->n != l && l->n == l->p;
}
static inline int
ps_list_head_one_node(struct ps_list_head *lh)
{
return ps_list_ll_one_node(&lh->l);
}
static inline int static inline int
ps_list_head_empty(struct ps_list_head *lh) ps_list_head_empty(struct ps_list_head *lh)
{ {

@ -67,19 +67,24 @@ err_allocate:
static inline struct sandbox * static inline struct sandbox *
scheduler_fifo_get_next() scheduler_fifo_get_next()
{ {
struct sandbox * sandbox = local_runqueue_get_next(); struct sandbox *sandbox = local_runqueue_get_next();
struct sandbox_request *sandbox_request = NULL; struct sandbox_request *sandbox_request = NULL;
/* If the local runqueue is empty, pull from global request scheduler */
if (sandbox == NULL) { if (sandbox == NULL) {
struct sandbox_request *sandbox_request; /* If the local runqueue is empty, pull from global request scheduler */
if (global_request_scheduler_remove(&sandbox_request) < 0) goto err; if (global_request_scheduler_remove(&sandbox_request) < 0) goto err;
sandbox = sandbox_allocate(sandbox_request); sandbox = sandbox_allocate(sandbox_request);
if (!sandbox) goto err_allocate; if (!sandbox) goto err_allocate;
sandbox_set_as_runnable(sandbox, SANDBOX_INITIALIZED); sandbox_set_as_runnable(sandbox, SANDBOX_INITIALIZED);
}; } else if (sandbox == current_sandbox_get()) {
/* Execute Round Robin Scheduling Logic if the head is the current sandbox */
local_runqueue_list_rotate();
sandbox = local_runqueue_get_next();
}
done: done:
return sandbox; return sandbox;
@ -143,11 +148,6 @@ scheduler_runqueue_initialize()
static inline void static inline void
scheduler_preempt(ucontext_t *user_context) scheduler_preempt(ucontext_t *user_context)
{ {
/* If FIFO, just return
* TODO: Should this RR? */
if (scheduler == SCHEDULER_FIFO) return;
assert(scheduler == SCHEDULER_EDF);
assert(user_context != NULL); assert(user_context != NULL);
/* Process epoll to make sure that all runnable jobs are considered for execution */ /* Process epoll to make sure that all runnable jobs are considered for execution */
@ -160,7 +160,7 @@ scheduler_preempt(ucontext_t *user_context)
struct sandbox *next = scheduler_get_next(); struct sandbox *next = scheduler_get_next();
assert(next != NULL); assert(next != NULL);
/* If current equals return, we are already running earliest deadline, so resume execution */ /* If current equals next, no switch is necessary, so resume execution */
if (current == next) return; if (current == next) return;
#ifdef LOG_PREEMPTION #ifdef LOG_PREEMPTION
@ -175,10 +175,37 @@ scheduler_preempt(ucontext_t *user_context)
assert(next->state == SANDBOX_RUNNABLE); assert(next->state == SANDBOX_RUNNABLE);
sandbox_set_as_running(next, SANDBOX_RUNNABLE); sandbox_set_as_running(next, SANDBOX_RUNNABLE);
/* A sandbox cannot be preempted by a slow context because this was in the switch (next->ctxt.variant) {
* runqueue during the last scheduling decision. */ case ARCH_CONTEXT_VARIANT_FAST: {
assert(next->ctxt.variant == ARCH_CONTEXT_VARIANT_FAST); arch_context_restore_new(&user_context->uc_mcontext, &next->ctxt);
arch_context_restore_new(&user_context->uc_mcontext, &next->ctxt); break;
}
case ARCH_CONTEXT_VARIANT_SLOW: {
/* Our scheduler restores a fast context when switching to a sandbox that cooperatively yielded
* (probably by blocking) or when switching to a freshly allocated sandbox that hasn't yet run.
* These conditions can occur in either EDF or FIFO.
*
* A scheduler restores a slow context when switching to a sandbox that was preempted previously.
* Under EDF, a sandbox is only ever preempted by an earlier deadline that either had blocked and since
* become runnable or was just freshly allocated. This means that such EDF preemption context switches
* should always use a fast context.
*
* This is not true under FIFO, where there is no innate ordering between sandboxes. A runqueue is
* normally only a single sandbox, but it may have multiple sandboxes when one blocks and the worker
* pulls an addition request. When the blocked sandbox becomes runnable, the executing sandbox can be
* preempted yielding a slow context. This means that FIFO preemption context switches might cause
* either a fast or a slow context to be restored during "round robin" execution.
*/
assert(scheduler != SCHEDULER_EDF);
arch_mcontext_restore(&user_context->uc_mcontext, &next->ctxt);
break;
}
default: {
panic("Unexpectedly tried to switch to a context in %s state\n",
arch_context_variant_print(next->ctxt.variant));
}
}
} }
static inline char * static inline char *

@ -1,4 +1,5 @@
#include "client_socket.h" #include "client_socket.h"
#include "current_sandbox.h"
#include "global_request_scheduler.h" #include "global_request_scheduler.h"
#include "local_runqueue_list.h" #include "local_runqueue_list.h"
#include "local_runqueue.h" #include "local_runqueue.h"
@ -20,7 +21,7 @@ local_runqueue_list_get_head()
} }
/** /**
* Removes the thread from the thread-local runqueue * Removes the sandbox from the thread-local runqueue
* @param sandbox sandbox * @param sandbox sandbox
*/ */
void void
@ -38,32 +39,39 @@ local_runqueue_list_remove_and_return()
} }
/** /**
* Get the next sandbox and then insert at tail to "round robin" * Append a sandbox to the tail of the runqueue
* @return the sandbox to execute or NULL if none are available * @returns the appended sandbox
*/ */
struct sandbox * void
local_runqueue_list_get_next() local_runqueue_list_append(struct sandbox *sandbox_to_append)
{ {
if (local_runqueue_list_is_empty()) return NULL; assert(sandbox_to_append != NULL);
assert(ps_list_singleton_d(sandbox_to_append));
ps_list_head_append_d(&local_runqueue_list, sandbox_to_append);
}
/* Execute Round Robin Scheduling Logic */ /* Remove sandbox from head of runqueue and add it to tail */
struct sandbox *next_sandbox = local_runqueue_list_remove_and_return(); void
assert(next_sandbox->state == SANDBOX_RUNNABLE); local_runqueue_list_rotate()
{
/* If runqueue is size one, skip round robin logic since tail equals head */
if (ps_list_head_one_node(&local_runqueue_list)) return;
return next_sandbox; struct sandbox *sandbox_at_head = local_runqueue_list_remove_and_return();
assert(sandbox_at_head->state == SANDBOX_RUNNING || sandbox_at_head->state == SANDBOX_RUNNABLE);
local_runqueue_list_append(sandbox_at_head);
} }
/** /**
* Append a sandbox to the runqueue * Get the next sandbox
* @returns the appended sandbox * @return the sandbox to execute or NULL if none are available
*/ */
void struct sandbox *
local_runqueue_list_append(struct sandbox *sandbox_to_append) local_runqueue_list_get_next()
{ {
assert(sandbox_to_append != NULL); if (local_runqueue_list_is_empty()) return NULL;
assert(ps_list_singleton_d(sandbox_to_append));
ps_list_head_append_d(&local_runqueue_list, sandbox_to_append); return local_runqueue_list_get_head();
} }
void void

Loading…
Cancel
Save