ck_cohort_rw: Initial implementation with validation test.

I still need to implement benchmark tests and write documentation.  The reader-writer cohort locks also required that I add a method to the existing ck_cohort framework to determine whether or not a cohort lock is currently in a locked state.
ck_pring
Brendon Scheinman 12 years ago
parent 91ef0220c7
commit 5e1b614108

@ -51,8 +51,9 @@ enum ck_cohort_state {
#define CK_COHORT_LOCK(N, C, GC, LC) ck_cohort_##N##_lock(C, GC, LC)
#define CK_COHORT_UNLOCK(N, C, GC, LC) ck_cohort_##N##_unlock(C, GC, LC)
#define CK_COHORT_TRYLOCK(N, C, GLC, LLC, LUC) ck_cohort_##N##_trylock(C, GLC, LLC, LUC)
#define CK_COHORT_LOCKED(N, C, GC, LC) ck_cohort_##N##_locked(C, GC, LC)
#define CK_COHORT_PROTOTYPE(N, GL, GU, LL, LU) \
#define CK_COHORT_PROTOTYPE(N, GL, GU, GI, LL, LU, LI) \
CK_COHORT_INSTANCE(N) { \
void *global_lock; \
void *local_lock; \
@ -111,10 +112,18 @@ enum ck_cohort_state {
LU(cohort->local_lock, local_context); \
\
return; \
} \
\
CK_CC_INLINE static bool \
ck_cohort_##N##_locked(CK_COHORT_INSTANCE(N) *cohort, \
void *global_context, void *local_context) \
{ \
return GI(cohort->local_lock, local_context) || \
LI(cohort->global_lock, global_context); \
}
#define CK_COHORT_TRYLOCK_PROTOTYPE(N, GL, GU, GTL, LL, LU, LTL) \
CK_COHORT_PROTOTYPE(N, GL, GU, LL, LU) \
#define CK_COHORT_TRYLOCK_PROTOTYPE(N, GL, GU, GI, GTL, LL, LU, LI, LTL) \
CK_COHORT_PROTOTYPE(N, GL, GU, GI, LL, LU, LI) \
CK_CC_INLINE static bool \
ck_cohort_##N##_trylock(CK_COHORT_INSTANCE(N) *cohort, \
void *global_context, void *local_context, \
@ -132,7 +141,7 @@ enum ck_cohort_state {
\
if (cohort->release_state == CK_COHORT_STATE_GLOBAL && \
GTL(cohort->global_lock, global_context) == false) { \
LU(cohort->local_lock, local_unlock_context); \
LU(cohort->local_lock, local_unlock_context); \
return false; \
} \
\

@ -0,0 +1,142 @@
/*
* Copyright 2013 Samy Al Bahra.
* Copyright 2013 Brendon Scheinman.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _CK_COHORT_RW_H
#define _CK_COHORT_RW_H
/*
* This is an implementation of NUMA-aware reader-writer locks as described in:
* Calciu, I.; Dice, D.; Lev, Y.; Luchangco, V.; Marathe, V.; and Shavit, N. 2013.
* NUMA-Aware Reader-Writer Locks
*/
#include <ck_cc.h>
#include <ck_pr.h>
#include <stddef.h>
#include <ck_cohort.h>
#define CK_COHORT_RW_NAME(N) ck_cohort_rw_##N
#define CK_COHORT_RW_INSTANCE(N) struct CK_COHORT_RW_NAME(N)
#define CK_COHORT_RW_INIT(N, RW, WL) ck_cohort_rw_##N##_init(RW, WL)
#define CK_COHORT_RW_READ_LOCK(N, RW, C, GC, LC) ck_cohort_rw_##N##_read_lock(RW, C, GC, LC)
#define CK_COHORT_RW_READ_UNLOCK(N, RW) ck_cohort_rw_##N##_read_unlock(RW)
#define CK_COHORT_RW_WRITE_LOCK(N, RW, C, GC, LC) ck_cohort_rw_##N##_write_lock(RW, C, GC, LC)
#define CK_COHORT_RW_WRITE_UNLOCK(N, RW, C, GC, LC) ck_cohort_rw_##N##_write_unlock(RW, C, GC, LC)
#define CK_COHORT_RW_DEFAULT_WAIT_LIMIT 1000
#define CK_COHORT_RW_PROTOTYPE(N) \
CK_COHORT_RW_INSTANCE(N) { \
CK_COHORT_INSTANCE(N) *cohort; \
unsigned int read_counter; \
unsigned int write_barrier; \
unsigned int wait_limit; \
}; \
\
CK_CC_INLINE static void \
ck_cohort_rw_##N##_init(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \
unsigned int wait_limit) \
{ \
rw_cohort->read_counter = 0; \
rw_cohort->write_barrier = 0; \
rw_cohort->wait_limit = wait_limit; \
ck_pr_barrier(); \
return; \
} \
\
CK_CC_INLINE static void \
ck_cohort_rw_##N##_write_lock(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \
CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
void *local_context) \
{ \
while (ck_pr_load_uint(&rw_cohort->write_barrier) > 0) { \
ck_pr_stall(); \
} \
\
CK_COHORT_LOCK(N, cohort, global_context, local_context); \
\
while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \
ck_pr_stall(); \
} \
\
return; \
} \
\
CK_CC_INLINE static void \
ck_cohort_rw_##N##_write_unlock(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \
CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
void *local_context) \
{ \
(void)rw_cohort; \
CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
} \
\
CK_CC_INLINE static void \
ck_cohort_rw_##N##_read_lock(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \
CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
void *local_context) \
{ \
unsigned int wait_count = 0; \
bool raised = false; \
start: \
ck_pr_inc_uint(&rw_cohort->read_counter); \
if (CK_COHORT_LOCKED(N, cohort, global_context, local_context) \
== true) { \
ck_pr_dec_uint(&rw_cohort->read_counter); \
while (CK_COHORT_LOCKED(N, cohort, global_context, \
local_context) == true) { \
ck_pr_stall(); \
if (++wait_count > rw_cohort->wait_limit \
&& raised == false) { \
ck_pr_inc_uint( \
&rw_cohort->write_barrier); \
raised = true; \
} \
} \
goto start; \
} \
\
if (raised == true) { \
ck_pr_dec_uint(&rw_cohort->write_barrier); \
} \
\
return; \
} \
\
CK_CC_INLINE static void \
ck_cohort_rw_##N##_read_unlock(CK_COHORT_RW_INSTANCE(N) *cohort) \
{ \
ck_pr_dec_uint(&cohort->read_counter); \
}
#define CK_COHORT_RW_INITIALIZER { \
.cohort = NULL, \
.read_counter = 0, \
.write_barrier = 0, \
.wait_limit = 0 \
}
#endif /* _CK_COHORT_RW_H */

@ -0,0 +1,25 @@
#define LOCK_NAME "ck_cohort"
#define LOCK_DEFINE\
static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;\
static ck_spinlock_fas_t local_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;\
static void\
ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)\
{\
(void)context;\
ck_spinlock_fas_lock(lock);\
}\
\
static void\
ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)\
{\
(void)context;\
ck_spinlock_fas_unlock(lock);\
}\
CK_COHORT_PROTOTYPE(fas_fas,\
ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context,\
ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context)\
static CK_COHORT_INSTANCE(fas_fas) CK_CC_CACHELINE cohort = CK_COHORT_INITIALIZER
#define LOCK_INIT CK_COHORT_INIT(fas_fas, &cohort, &global_fas_lock, &local_fas_lock,\
CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT)
#define LOCK CK_COHORT_LOCK(fas_fas, &cohort, NULL, NULL)
#define UNLOCK CK_COHORT_UNLOCK(fas_fas, &cohort, NULL, NULL)

@ -0,0 +1,17 @@
.PHONY: check clean distribution
OBJECTS=validate
all: $(OBJECTS)
validate: validate.c ../../../include/ck_cohort_rw.h
$(CC) $(CFLAGS) -o validate validate.c -g
check: all
./validate $(CORES) 1
clean:
rm -rf *.dSYM *~ *.o $(OBJECTS)
include ../../../build/regressions.build
CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE

@ -0,0 +1,208 @@
/*
* Copyright 2013 Samy Al Bahra.
* Copything 2013 Brendon Scheinman.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <errno.h>
#include <inttypes.h>
#include <pthread.h>
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <sys/time.h>
#include <ck_pr.h>
#include <ck_cohort_rw.h>
#include <ck_spinlock.h>
#include "../../common.h"
#ifndef ITERATE
#define ITERATE 1000000
#endif
static struct affinity a;
static unsigned int locked;
static int nthr;
static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;
static void
ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)
{
(void)context;
ck_spinlock_fas_lock(lock);
}
static void
ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)
{
(void)context;
ck_spinlock_fas_unlock(lock);
}
static bool
ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context)
{
(void)context;
return ck_spinlock_fas_locked(lock);
}
CK_COHORT_PROTOTYPE(fas_fas,
ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context,
ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context)
CK_COHORT_RW_PROTOTYPE(fas_fas)
static CK_COHORT_INSTANCE(fas_fas) *cohorts;
static CK_COHORT_RW_INSTANCE(fas_fas) rw_cohort = CK_COHORT_RW_INITIALIZER;
static int n_cohorts;
static void *
thread(void *null CK_CC_UNUSED)
{
int i = ITERATE;
unsigned int l;
unsigned int core;
CK_COHORT_INSTANCE(fas_fas) *cohort;
if (aff_iterate_core(&a, &core)) {
perror("ERROR: Could not affine thread");
exit(EXIT_FAILURE);
}
cohort = cohorts + (core / (int)(a.delta)) % n_cohorts;
while (i--) {
CK_COHORT_RW_WRITE_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
{
l = ck_pr_load_uint(&locked);
if (l != 0) {
ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
}
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
l = ck_pr_load_uint(&locked);
if (l != 8) {
ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
}
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
l = ck_pr_load_uint(&locked);
if (l != 0) {
ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
}
}
CK_COHORT_RW_WRITE_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
{
l = ck_pr_load_uint(&locked);
if (l != 0) {
ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
}
}
CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort);
}
return (NULL);
}
int
main(int argc, char *argv[])
{
pthread_t *threads;
int threads_per_cohort;
ck_spinlock_fas_t *local_lock;
int i;
if (argc != 4) {
ck_error("Usage: validate <number of cohorts> <threads per cohort> <affinity delta>\n");
}
n_cohorts = atoi(argv[1]);
if (n_cohorts <= 0) {
ck_error("ERROR: Number of cohorts must be greater than 0\n");
}
threads_per_cohort = atoi(argv[2]);
if (threads_per_cohort <= 0) {
ck_error("ERROR: Threads per cohort must be greater than 0\n");
}
nthr = n_cohorts * threads_per_cohort;
threads = malloc(sizeof(pthread_t) * nthr);
if (threads == NULL) {
ck_error("ERROR: Could not allocate thread structures\n");
}
a.delta = atoi(argv[3]);
fprintf(stderr, "Creating cohorts...");
cohorts = malloc(sizeof(CK_COHORT_INSTANCE(fas_fas)) * n_cohorts);
if (cohorts == NULL) {
ck_error("ERROR: Could not allocate base cohort structures\n");
}
for (i = 0 ; i < n_cohorts ; i++) {
local_lock = malloc(sizeof(ck_spinlock_fas_t));
CK_COHORT_INIT(fas_fas, cohorts + i, &global_fas_lock, local_lock,
CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
}
fprintf(stderr, "done\n");
fprintf(stderr, "Creating threads...");
for (i = 0; i < nthr; i++) {
if (pthread_create(&threads[i], NULL, thread, NULL)) {
ck_error("ERROR: Could not create thread %d\n", i);
}
}
fprintf(stderr, "done\n");
fprintf(stderr, "Waiting for threads to finish correctness regression...");
for (i = 0; i < nthr; i++)
pthread_join(threads[i], NULL);
fprintf(stderr, "done (passed)\n");
return (0);
}
Loading…
Cancel
Save