feat: all but trylock

main
Sean McBride 4 years ago
parent 6673734857
commit b3215c6dbc

@ -0,0 +1,55 @@
#pragma once
#include <spinlock/mcs.h>
#include "runtime.h"
#define LOCK_T ck_spinlock_mcs_t
/**
* Initializes a lock of type LOCK_T
* @param lock - the address of the lock
*/
#define LOCK_INIT(lock) ck_spinlock_mcs_init((lock))
/**
* Checks if a lock is locked
* @param lock - the address of the lock
* @returns bool if lock is locked
*/
#define LOCK_IS_LOCKED(lock) ck_spinlock_mcs_locked((lock))
/**
* Locks a lock, keeping track of overhead
* @param lock - the address of the lock
* @param node_name - a unique name to identify the lock node, which is prefixed by NODE_
*/
#define LOCK_LOCK_VERBOSE(lock, node_name) \
struct ck_spinlock_mcs(NODE_##node_name); \
uint64_t PRE_##node_name = __getcycles(); \
ck_spinlock_mcs_lock((lock), &(NODE_##node_name)); \
worker_thread_lock_duration += (__getcycles() - PRE_##node_name)
/**
* Unlocks a lock
* @param lock - the address of the lock
* @param node_name - a unique name to identify the lock node, which is prefixed by NODE_
*/
#define LOCK_UNLOCK_VERBOSE(lock, node_name) ck_spinlock_mcs_unlock(lock, &(NODE_##node_name))
/**
* Locks a lock, keeping track of overhead
* Automatically assigns a lock node NODE_DEFAULT and a timestamp PRE_DEFAULT
* This API can only be used once in a lexical scope. If this isn't true, use LOCK_LOCK_VERBOSE and LOCK_UNLOCK_VERBOSE
* @param lock - the address of the lock
*/
#define LOCK_LOCK(lock) LOCK_LOCK_VERBOSE(lock, DEFAULT)
/**
* Unlocks a lock
* Uses lock node NODE_DEFAULT and timestamp PRE_DEFAULT, so this assumes use of LOCK_LOCK
* This API can only be used once in a lexical scope. If this isn't true, use LOCK_LOCK_VERBOSE and LOCK_UNLOCK_VERBOSE
* @param lock - the address of the lock
*/
#define LOCK_UNLOCK(lock) LOCK_UNLOCK_VERBOSE(lock, DEFAULT)

@ -1,8 +1,8 @@
#pragma once
#include <spinlock/mcs.h>
#include <stdint.h>
#include "lock.h"
#include "runtime.h"
#include "worker_thread.h"
@ -14,10 +14,10 @@
#endif
struct perf_window {
uint64_t buffer[PERF_WINDOW_BUFFER_SIZE];
uint64_t count;
ck_spinlock_mcs_t queue;
double mean;
uint64_t buffer[PERF_WINDOW_BUFFER_SIZE];
uint64_t count;
LOCK_T lock;
double mean;
};
/**
@ -29,7 +29,7 @@ static inline void
perf_window_update_mean(struct perf_window *self)
{
assert(self != NULL);
assert(ck_spinlock_mcs_locked(&self->queue));
LOCK_IS_LOCKED(&self->lock);
uint64_t limit = self->count;
if (limit > PERF_WINDOW_BUFFER_SIZE) { limit = PERF_WINDOW_BUFFER_SIZE; }
@ -50,7 +50,7 @@ perf_window_initialize(struct perf_window *self)
{
assert(self != NULL);
ck_spinlock_mcs_init(&self->queue);
LOCK_INIT(&self->lock);
self->count = 0;
self->mean = 0;
memset(&self->buffer, 0, sizeof(uint64_t) * PERF_WINDOW_BUFFER_SIZE);
@ -70,15 +70,10 @@ perf_window_add(struct perf_window *self, uint64_t value)
/* A successful invocation should run for a non-zero amount of time */
assert(value > 0);
struct ck_spinlock_mcs lock;
uint64_t pre = __getcycles();
ck_spinlock_mcs_lock(&self->queue, &lock);
worker_thread_lock_duration += (__getcycles() - pre);
LOCK_LOCK(&self->lock);
self->buffer[self->count++ % PERF_WINDOW_BUFFER_SIZE] = value;
perf_window_update_mean(self);
ck_spinlock_mcs_unlock(&self->queue, &lock);
LOCK_UNLOCK(&self->lock);
}
/**

@ -1,8 +1,7 @@
#ifndef PRIORITY_QUEUE_H
#define PRIORITY_QUEUE_H
#include <spinlock/mcs.h>
#include "lock.h"
#include "runtime.h"
#include "worker_thread.h"
@ -20,7 +19,7 @@ typedef uint64_t (*priority_queue_get_priority_fn_t)(void *element);
/* We assume that priority is expressed in terms of a 64 bit unsigned integral */
struct priority_queue {
ck_spinlock_mcs_t queue;
LOCK_T queue;
uint64_t highest_priority;
void * items[MAX];
int first_free;

@ -22,7 +22,7 @@ static inline int
priority_queue_append(struct priority_queue *self, void *new_item)
{
assert(self != NULL);
assert(ck_spinlock_mcs_locked(&self->queue));
assert(LOCK_IS_LOCKED(&self->queue));
if (self->first_free >= MAX) return -ENOSPC;
@ -39,7 +39,7 @@ priority_queue_percolate_up(struct priority_queue *self)
{
assert(self != NULL);
assert(self->get_priority_fn != NULL);
assert(ck_spinlock_mcs_locked(&self->queue));
assert(LOCK_IS_LOCKED(&self->queue));
for (int i = self->first_free - 1;
i / 2 != 0 && self->get_priority_fn(self->items[i]) < self->get_priority_fn(self->items[i / 2]); i /= 2) {
@ -64,7 +64,7 @@ priority_queue_find_smallest_child(struct priority_queue *self, int parent_index
assert(self != NULL);
assert(parent_index >= 1 && parent_index < self->first_free);
assert(self->get_priority_fn != NULL);
assert(ck_spinlock_mcs_locked(&self->queue));
assert(LOCK_IS_LOCKED(&self->queue));
int left_child_index = 2 * parent_index;
int right_child_index = 2 * parent_index + 1;
@ -92,7 +92,7 @@ priority_queue_percolate_down(struct priority_queue *self, int parent_index)
{
assert(self != NULL);
assert(self->get_priority_fn != NULL);
assert(ck_spinlock_mcs_locked(&self->queue));
assert(LOCK_IS_LOCKED(&self->queue));
int left_child_index = 2 * parent_index;
while (left_child_index >= 2 && left_child_index < self->first_free) {
@ -120,7 +120,7 @@ static inline bool
priority_queue_is_empty_locked(struct priority_queue *self)
{
assert(self != NULL);
assert(ck_spinlock_mcs_locked(&self->queue));
assert(LOCK_IS_LOCKED(&self->queue));
return self->first_free == 1;
}
@ -141,7 +141,7 @@ priority_queue_initialize(struct priority_queue *self, priority_queue_get_priori
memset(self->items, 0, sizeof(void *) * MAX);
ck_spinlock_mcs_init(&self->queue);
LOCK_INIT(&self->queue);
self->first_free = 1;
self->get_priority_fn = get_priority_fn;
@ -158,14 +158,9 @@ priority_queue_length(struct priority_queue *self)
{
assert(self != NULL);
struct ck_spinlock_mcs lock;
uint64_t pre = __getcycles();
ck_spinlock_mcs_lock(&self->queue, &lock);
worker_thread_lock_duration += (__getcycles() - pre);
LOCK_LOCK(&self->queue);
int length = self->first_free - 1;
ck_spinlock_mcs_unlock(&self->queue, &lock);
LOCK_UNLOCK(&self->queue);
return length;
}
@ -179,10 +174,7 @@ priority_queue_enqueue(struct priority_queue *self, void *value)
{
assert(self != NULL);
struct ck_spinlock_mcs lock;
uint64_t pre = __getcycles();
ck_spinlock_mcs_lock(&self->queue, &lock);
worker_thread_lock_duration += (__getcycles() - pre);
LOCK_LOCK(&self->queue);
if (priority_queue_append(self, value) == -ENOSPC) return -ENOSPC;
@ -193,7 +185,7 @@ priority_queue_enqueue(struct priority_queue *self, void *value)
priority_queue_percolate_up(self);
}
ck_spinlock_mcs_unlock(&self->queue, &lock);
LOCK_UNLOCK(&self->queue);
return 0;
}
@ -207,10 +199,7 @@ priority_queue_delete(struct priority_queue *self, void *value)
{
assert(self != NULL);
struct ck_spinlock_mcs lock;
uint64_t pre = __getcycles();
ck_spinlock_mcs_lock(&self->queue, &lock);
worker_thread_lock_duration += (__getcycles() - pre);
LOCK_LOCK(&self->queue);
bool did_delete = false;
for (int i = 1; i < self->first_free; i++) {
@ -222,7 +211,7 @@ priority_queue_delete(struct priority_queue *self, void *value)
}
}
ck_spinlock_mcs_unlock(&self->queue, &lock);
LOCK_UNLOCK(&self->queue);
if (!did_delete) return -1;
return 0;

Loading…
Cancel
Save