feat: Round robin for FIFO preemption

main
Sean McBride 4 years ago
parent 69b2a476d4
commit 7f8a11f2d5

@ -1,3 +1,4 @@
#pragma once
void local_runqueue_list_initialize();
void local_runqueue_list_rotate();

@ -82,6 +82,18 @@ ps_list_ll_empty(struct ps_list *l)
return l->n == l;
}
static inline int
ps_list_ll_one_node(struct ps_list *l)
{
return l->n != l && l->n == l->p;
}
static inline int
ps_list_head_one_node(struct ps_list_head *lh)
{
return ps_list_ll_one_node(&lh->l);
}
static inline int
ps_list_head_empty(struct ps_list_head *lh)
{

@ -68,18 +68,23 @@ static inline struct sandbox *
scheduler_fifo_get_next()
{
struct sandbox *sandbox = local_runqueue_get_next();
struct sandbox_request *sandbox_request = NULL;
/* If the local runqueue is empty, pull from global request scheduler */
if (sandbox == NULL) {
struct sandbox_request *sandbox_request;
/* If the local runqueue is empty, pull from global request scheduler */
if (global_request_scheduler_remove(&sandbox_request) < 0) goto err;
sandbox = sandbox_allocate(sandbox_request);
if (!sandbox) goto err_allocate;
sandbox_set_as_runnable(sandbox, SANDBOX_INITIALIZED);
};
} else if (sandbox == current_sandbox_get()) {
/* Execute Round Robin Scheduling Logic if the head is the current sandbox */
local_runqueue_list_rotate();
sandbox = local_runqueue_get_next();
}
done:
return sandbox;
@ -143,11 +148,6 @@ scheduler_runqueue_initialize()
static inline void
scheduler_preempt(ucontext_t *user_context)
{
/* If FIFO, just return
* TODO: Should this RR? */
if (scheduler == SCHEDULER_FIFO) return;
assert(scheduler == SCHEDULER_EDF);
assert(user_context != NULL);
/* Process epoll to make sure that all runnable jobs are considered for execution */
@ -160,7 +160,7 @@ scheduler_preempt(ucontext_t *user_context)
struct sandbox *next = scheduler_get_next();
assert(next != NULL);
/* If current equals return, we are already running earliest deadline, so resume execution */
/* If current equals next, no switch is necessary, so resume execution */
if (current == next) return;
#ifdef LOG_PREEMPTION
@ -175,10 +175,37 @@ scheduler_preempt(ucontext_t *user_context)
assert(next->state == SANDBOX_RUNNABLE);
sandbox_set_as_running(next, SANDBOX_RUNNABLE);
/* A sandbox cannot be preempted by a slow context because this was in the
* runqueue during the last scheduling decision. */
assert(next->ctxt.variant == ARCH_CONTEXT_VARIANT_FAST);
switch (next->ctxt.variant) {
case ARCH_CONTEXT_VARIANT_FAST: {
arch_context_restore_new(&user_context->uc_mcontext, &next->ctxt);
break;
}
case ARCH_CONTEXT_VARIANT_SLOW: {
/* Our scheduler restores a fast context when switching to a sandbox that cooperatively yielded
* (probably by blocking) or when switching to a freshly allocated sandbox that hasn't yet run.
* These conditions can occur in either EDF or FIFO.
*
* A scheduler restores a slow context when switching to a sandbox that was preempted previously.
* Under EDF, a sandbox is only ever preempted by an earlier deadline that either had blocked and since
* become runnable or was just freshly allocated. This means that such EDF preemption context switches
* should always use a fast context.
*
* This is not true under FIFO, where there is no innate ordering between sandboxes. A runqueue is
* normally only a single sandbox, but it may have multiple sandboxes when one blocks and the worker
* pulls an addition request. When the blocked sandbox becomes runnable, the executing sandbox can be
* preempted yielding a slow context. This means that FIFO preemption context switches might cause
* either a fast or a slow context to be restored during "round robin" execution.
*/
assert(scheduler != SCHEDULER_EDF);
arch_mcontext_restore(&user_context->uc_mcontext, &next->ctxt);
break;
}
default: {
panic("Unexpectedly tried to switch to a context in %s state\n",
arch_context_variant_print(next->ctxt.variant));
}
}
}
static inline char *

@ -1,4 +1,5 @@
#include "client_socket.h"
#include "current_sandbox.h"
#include "global_request_scheduler.h"
#include "local_runqueue_list.h"
#include "local_runqueue.h"
@ -20,7 +21,7 @@ local_runqueue_list_get_head()
}
/**
* Removes the thread from the thread-local runqueue
* Removes the sandbox from the thread-local runqueue
* @param sandbox sandbox
*/
void
@ -38,32 +39,39 @@ local_runqueue_list_remove_and_return()
}
/**
* Get the next sandbox and then insert at tail to "round robin"
* @return the sandbox to execute or NULL if none are available
* Append a sandbox to the tail of the runqueue
* @returns the appended sandbox
*/
struct sandbox *
local_runqueue_list_get_next()
void
local_runqueue_list_append(struct sandbox *sandbox_to_append)
{
if (local_runqueue_list_is_empty()) return NULL;
assert(sandbox_to_append != NULL);
assert(ps_list_singleton_d(sandbox_to_append));
ps_list_head_append_d(&local_runqueue_list, sandbox_to_append);
}
/* Execute Round Robin Scheduling Logic */
struct sandbox *next_sandbox = local_runqueue_list_remove_and_return();
assert(next_sandbox->state == SANDBOX_RUNNABLE);
/* Remove sandbox from head of runqueue and add it to tail */
void
local_runqueue_list_rotate()
{
/* If runqueue is size one, skip round robin logic since tail equals head */
if (ps_list_head_one_node(&local_runqueue_list)) return;
return next_sandbox;
struct sandbox *sandbox_at_head = local_runqueue_list_remove_and_return();
assert(sandbox_at_head->state == SANDBOX_RUNNING || sandbox_at_head->state == SANDBOX_RUNNABLE);
local_runqueue_list_append(sandbox_at_head);
}
/**
* Append a sandbox to the runqueue
* @returns the appended sandbox
* Get the next sandbox
* @return the sandbox to execute or NULL if none are available
*/
void
local_runqueue_list_append(struct sandbox *sandbox_to_append)
struct sandbox *
local_runqueue_list_get_next()
{
assert(sandbox_to_append != NULL);
assert(ps_list_singleton_d(sandbox_to_append));
ps_list_head_append_d(&local_runqueue_list, sandbox_to_append);
if (local_runqueue_list_is_empty()) return NULL;
return local_runqueue_list_get_head();
}
void

Loading…
Cancel
Save