feat: complete lock refactor

master
Sean McBride 5 years ago
parent b3215c6dbc
commit f8f1db7eaf

@ -4,10 +4,10 @@
#include "runtime.h" #include "runtime.h"
#define LOCK_T ck_spinlock_mcs_t typedef ck_spinlock_mcs_t lock_t;
/** /**
* Initializes a lock of type LOCK_T * Initializes a lock of type lock_t
* @param lock - the address of the lock * @param lock - the address of the lock
*/ */
#define LOCK_INIT(lock) ck_spinlock_mcs_init((lock)) #define LOCK_INIT(lock) ck_spinlock_mcs_init((lock))
@ -23,33 +23,37 @@
/** /**
* Locks a lock, keeping track of overhead * Locks a lock, keeping track of overhead
* @param lock - the address of the lock * @param lock - the address of the lock
* @param node_name - a unique name to identify the lock node, which is prefixed by NODE_ * @param hygienic_prefix - a unique prefix to hygienically namespace an associated lock/unlock pair
*/ */
#define LOCK_LOCK_VERBOSE(lock, node_name) \ #define LOCK_LOCK_WITH_BOOKKEEPING(lock, hygienic_prefix) \
struct ck_spinlock_mcs(NODE_##node_name); \ struct ck_spinlock_mcs hygienic_prefix##_node; \
uint64_t PRE_##node_name = __getcycles(); \ uint64_t hygienic_prefix##_pre = __getcycles(); \
ck_spinlock_mcs_lock((lock), &(NODE_##node_name)); \ ck_spinlock_mcs_lock((lock), &(hygienic_prefix##_node)); \
worker_thread_lock_duration += (__getcycles() - PRE_##node_name) worker_thread_lock_duration += (__getcycles() - hygienic_prefix##_pre)
/** /**
* Unlocks a lock * Unlocks a lock
* @param lock - the address of the lock * @param lock - the address of the lock
* @param node_name - a unique name to identify the lock node, which is prefixed by NODE_ * @param hygienic_prefix - a unique prefix to hygienically namespace an associated lock/unlock pair
*/ */
#define LOCK_UNLOCK_VERBOSE(lock, node_name) ck_spinlock_mcs_unlock(lock, &(NODE_##node_name)) #define LOCK_UNLOCK_WITH_BOOKKEEPING(lock, hygienic_prefix) ck_spinlock_mcs_unlock(lock, &(hygienic_prefix##_node))
/** /**
* Locks a lock, keeping track of overhead * Locks a lock, keeping track of overhead
* Automatically assigns a lock node NODE_DEFAULT and a timestamp PRE_DEFAULT * Assumes the availability of DEFAULT as a hygienic prefix for DEFAULT_node and DEFAULT_pre
* This API can only be used once in a lexical scope. If this isn't true, use LOCK_LOCK_VERBOSE and LOCK_UNLOCK_VERBOSE *
* As such, this API can only be used once in a lexical scope.
*
* Use LOCK_LOCK_WITH_BOOKKEEPING and LOCK_UNLOCK_WITH_BOOKKEEPING if multiple locks are required
* @param lock - the address of the lock * @param lock - the address of the lock
*/ */
#define LOCK_LOCK(lock) LOCK_LOCK_VERBOSE(lock, DEFAULT) #define LOCK_LOCK(lock) LOCK_LOCK_WITH_BOOKKEEPING(lock, DEFAULT)
/** /**
* Unlocks a lock * Unlocks a lock
* Uses lock node NODE_DEFAULT and timestamp PRE_DEFAULT, so this assumes use of LOCK_LOCK * Uses lock node NODE_DEFAULT and timestamp PRE_DEFAULT, so this assumes use of LOCK_LOCK
* This API can only be used once in a lexical scope. If this isn't true, use LOCK_LOCK_VERBOSE and LOCK_UNLOCK_VERBOSE * This API can only be used once in a lexical scope. If this isn't true, use LOCK_LOCK_WITH_BOOKKEEPING and
* LOCK_UNLOCK_WITH_BOOKKEEPING
* @param lock - the address of the lock * @param lock - the address of the lock
*/ */
#define LOCK_UNLOCK(lock) LOCK_UNLOCK_VERBOSE(lock, DEFAULT) #define LOCK_UNLOCK(lock) LOCK_UNLOCK_WITH_BOOKKEEPING(lock, DEFAULT)

@ -16,7 +16,7 @@
struct perf_window { struct perf_window {
uint64_t buffer[PERF_WINDOW_BUFFER_SIZE]; uint64_t buffer[PERF_WINDOW_BUFFER_SIZE];
uint64_t count; uint64_t count;
LOCK_T lock; lock_t lock;
double mean; double mean;
}; };
@ -29,7 +29,7 @@ static inline void
perf_window_update_mean(struct perf_window *self) perf_window_update_mean(struct perf_window *self)
{ {
assert(self != NULL); assert(self != NULL);
LOCK_IS_LOCKED(&self->lock); assert(LOCK_IS_LOCKED(&self->lock));
uint64_t limit = self->count; uint64_t limit = self->count;
if (limit > PERF_WINDOW_BUFFER_SIZE) { limit = PERF_WINDOW_BUFFER_SIZE; } if (limit > PERF_WINDOW_BUFFER_SIZE) { limit = PERF_WINDOW_BUFFER_SIZE; }

@ -19,7 +19,7 @@ typedef uint64_t (*priority_queue_get_priority_fn_t)(void *element);
/* We assume that priority is expressed in terms of a 64 bit unsigned integral */ /* We assume that priority is expressed in terms of a 64 bit unsigned integral */
struct priority_queue { struct priority_queue {
LOCK_T queue; lock_t lock;
uint64_t highest_priority; uint64_t highest_priority;
void * items[MAX]; void * items[MAX];
int first_free; int first_free;

@ -28,7 +28,7 @@ global_request_scheduler_minheap_add(void *sandbox_request)
/** /**
* @param pointer to the pointer that we want to set to the address of the removed sandbox request * @param pointer to the pointer that we want to set to the address of the removed sandbox request
* @returns 0 if successful, -ENOENT if empty, -EAGAIN if unable to take lock * @returns 0 if successful, -ENOENT if empty
*/ */
int int
global_request_scheduler_minheap_remove(struct sandbox_request **removed_sandbox_request) global_request_scheduler_minheap_remove(struct sandbox_request **removed_sandbox_request)

@ -45,10 +45,7 @@ static int
local_runqueue_minheap_remove(struct sandbox **to_remove) local_runqueue_minheap_remove(struct sandbox **to_remove)
{ {
assert(!software_interrupt_is_enabled()); assert(!software_interrupt_is_enabled());
int rc = priority_queue_dequeue(&local_runqueue_minheap, (void **)to_remove); return priority_queue_dequeue(&local_runqueue_minheap, (void **)to_remove);
if (rc == -EAGAIN) panic("Worker unexpectedly unable to take lock on own runqueue\n");
return rc;
} }
/** /**
@ -85,9 +82,7 @@ local_runqueue_minheap_get_next()
struct sandbox_request *sandbox_request; struct sandbox_request *sandbox_request;
int sandbox_rc = priority_queue_top(&local_runqueue_minheap, (void **)&sandbox); int sandbox_rc = priority_queue_top(&local_runqueue_minheap, (void **)&sandbox);
if (sandbox_rc == -EAGAIN) { if (sandbox_rc == -ENOENT) {
panic("Worker unexpectedly unable to take lock on own runqueue\n");
} else if (sandbox_rc == -ENOENT) {
/* local runqueue empty, try to pull a sandbox request */ /* local runqueue empty, try to pull a sandbox request */
if (global_request_scheduler_remove(&sandbox_request) < 0) goto done; if (global_request_scheduler_remove(&sandbox_request) < 0) goto done;

@ -22,7 +22,7 @@ static inline int
priority_queue_append(struct priority_queue *self, void *new_item) priority_queue_append(struct priority_queue *self, void *new_item)
{ {
assert(self != NULL); assert(self != NULL);
assert(LOCK_IS_LOCKED(&self->queue)); assert(LOCK_IS_LOCKED(&self->lock));
if (self->first_free >= MAX) return -ENOSPC; if (self->first_free >= MAX) return -ENOSPC;
@ -39,7 +39,7 @@ priority_queue_percolate_up(struct priority_queue *self)
{ {
assert(self != NULL); assert(self != NULL);
assert(self->get_priority_fn != NULL); assert(self->get_priority_fn != NULL);
assert(LOCK_IS_LOCKED(&self->queue)); assert(LOCK_IS_LOCKED(&self->lock));
for (int i = self->first_free - 1; for (int i = self->first_free - 1;
i / 2 != 0 && self->get_priority_fn(self->items[i]) < self->get_priority_fn(self->items[i / 2]); i /= 2) { i / 2 != 0 && self->get_priority_fn(self->items[i]) < self->get_priority_fn(self->items[i / 2]); i /= 2) {
@ -64,7 +64,7 @@ priority_queue_find_smallest_child(struct priority_queue *self, int parent_index
assert(self != NULL); assert(self != NULL);
assert(parent_index >= 1 && parent_index < self->first_free); assert(parent_index >= 1 && parent_index < self->first_free);
assert(self->get_priority_fn != NULL); assert(self->get_priority_fn != NULL);
assert(LOCK_IS_LOCKED(&self->queue)); assert(LOCK_IS_LOCKED(&self->lock));
int left_child_index = 2 * parent_index; int left_child_index = 2 * parent_index;
int right_child_index = 2 * parent_index + 1; int right_child_index = 2 * parent_index + 1;
@ -92,7 +92,7 @@ priority_queue_percolate_down(struct priority_queue *self, int parent_index)
{ {
assert(self != NULL); assert(self != NULL);
assert(self->get_priority_fn != NULL); assert(self->get_priority_fn != NULL);
assert(LOCK_IS_LOCKED(&self->queue)); assert(LOCK_IS_LOCKED(&self->lock));
int left_child_index = 2 * parent_index; int left_child_index = 2 * parent_index;
while (left_child_index >= 2 && left_child_index < self->first_free) { while (left_child_index >= 2 && left_child_index < self->first_free) {
@ -120,7 +120,7 @@ static inline bool
priority_queue_is_empty_locked(struct priority_queue *self) priority_queue_is_empty_locked(struct priority_queue *self)
{ {
assert(self != NULL); assert(self != NULL);
assert(LOCK_IS_LOCKED(&self->queue)); assert(LOCK_IS_LOCKED(&self->lock));
return self->first_free == 1; return self->first_free == 1;
} }
@ -141,7 +141,7 @@ priority_queue_initialize(struct priority_queue *self, priority_queue_get_priori
memset(self->items, 0, sizeof(void *) * MAX); memset(self->items, 0, sizeof(void *) * MAX);
LOCK_INIT(&self->queue); LOCK_INIT(&self->lock);
self->first_free = 1; self->first_free = 1;
self->get_priority_fn = get_priority_fn; self->get_priority_fn = get_priority_fn;
@ -158,9 +158,9 @@ priority_queue_length(struct priority_queue *self)
{ {
assert(self != NULL); assert(self != NULL);
LOCK_LOCK(&self->queue); LOCK_LOCK(&self->lock);
int length = self->first_free - 1; int length = self->first_free - 1;
LOCK_UNLOCK(&self->queue); LOCK_UNLOCK(&self->lock);
return length; return length;
} }
@ -174,7 +174,7 @@ priority_queue_enqueue(struct priority_queue *self, void *value)
{ {
assert(self != NULL); assert(self != NULL);
LOCK_LOCK(&self->queue); LOCK_LOCK(&self->lock);
if (priority_queue_append(self, value) == -ENOSPC) return -ENOSPC; if (priority_queue_append(self, value) == -ENOSPC) return -ENOSPC;
@ -185,7 +185,7 @@ priority_queue_enqueue(struct priority_queue *self, void *value)
priority_queue_percolate_up(self); priority_queue_percolate_up(self);
} }
LOCK_UNLOCK(&self->queue); LOCK_UNLOCK(&self->lock);
return 0; return 0;
} }
@ -199,7 +199,7 @@ priority_queue_delete(struct priority_queue *self, void *value)
{ {
assert(self != NULL); assert(self != NULL);
LOCK_LOCK(&self->queue); LOCK_LOCK(&self->lock);
bool did_delete = false; bool did_delete = false;
for (int i = 1; i < self->first_free; i++) { for (int i = 1; i < self->first_free; i++) {
@ -211,7 +211,7 @@ priority_queue_delete(struct priority_queue *self, void *value)
} }
} }
LOCK_UNLOCK(&self->queue); LOCK_UNLOCK(&self->lock);
if (!did_delete) return -1; if (!did_delete) return -1;
return 0; return 0;
@ -220,7 +220,7 @@ priority_queue_delete(struct priority_queue *self, void *value)
/** /**
* @param self - the priority queue we want to add to * @param self - the priority queue we want to add to
* @param dequeued_element a pointer to set to the dequeued element * @param dequeued_element a pointer to set to the dequeued element
* @returns RC 0 if successfully set dequeued_element, -ENOENT if empty, -EAGAIN if unable to take lock * @returns RC 0 if successfully set dequeued_element, -ENOENT if empty
*/ */
int int
priority_queue_dequeue(struct priority_queue *self, void **dequeued_element) priority_queue_dequeue(struct priority_queue *self, void **dequeued_element)
@ -231,14 +231,7 @@ priority_queue_dequeue(struct priority_queue *self, void **dequeued_element)
int return_code; int return_code;
struct ck_spinlock_mcs lock; LOCK_LOCK(&self->lock);
uint64_t pre = __getcycles();
if (ck_spinlock_mcs_trylock(&self->queue, &lock) == false) {
worker_thread_lock_duration += (__getcycles() - pre);
return_code = -EAGAIN;
goto done;
};
worker_thread_lock_duration += (__getcycles() - pre);
if (priority_queue_is_empty_locked(self)) { if (priority_queue_is_empty_locked(self)) {
return_code = -ENOENT; return_code = -ENOENT;
@ -260,7 +253,7 @@ priority_queue_dequeue(struct priority_queue *self, void **dequeued_element)
return_code = 0; return_code = 0;
release_lock: release_lock:
ck_spinlock_mcs_unlock(&self->queue, &lock); LOCK_UNLOCK(&self->lock);
done: done:
return return_code; return return_code;
} }
@ -269,7 +262,7 @@ done:
* Returns the top of the priority queue without removing it * Returns the top of the priority queue without removing it
* @param self - the priority queue we want to add to * @param self - the priority queue we want to add to
* @param dequeued_element a pointer to set to the top element * @param dequeued_element a pointer to set to the top element
* @returns RC 0 if successfully set dequeued_element, -ENOENT if empty, -EAGAIN if unable to take lock * @returns RC 0 if successfully set dequeued_element, -ENOENT if empty
*/ */
int int
priority_queue_top(struct priority_queue *self, void **dequeued_element) priority_queue_top(struct priority_queue *self, void **dequeued_element)
@ -280,14 +273,7 @@ priority_queue_top(struct priority_queue *self, void **dequeued_element)
int return_code; int return_code;
struct ck_spinlock_mcs lock; LOCK_LOCK(&self->lock);
uint64_t pre = __getcycles();
if (ck_spinlock_mcs_trylock(&self->queue, &lock) == false) {
worker_thread_lock_duration += (__getcycles() - pre);
return_code = -EAGAIN;
goto done;
};
worker_thread_lock_duration += (__getcycles() - pre);
if (priority_queue_is_empty_locked(self)) { if (priority_queue_is_empty_locked(self)) {
return_code = -ENOENT; return_code = -ENOENT;
@ -298,7 +284,7 @@ priority_queue_top(struct priority_queue *self, void **dequeued_element)
return_code = 0; return_code = 0;
release_lock: release_lock:
ck_spinlock_mcs_unlock(&self->queue, &lock); LOCK_UNLOCK(&self->lock);
done: done:
return return_code; return return_code;
} }

Loading…
Cancel
Save