ck_swlock: A single writer rwlock.

This lock is copy-safe when the latch operations are used.

Simplified write side operations lead to lower latencies than ck_rwlock
for single writer workloads.
ck_pring
Jaidev Sridhar 11 years ago
parent c3097a4a7b
commit 9732e2bdb3

17
.gitignore vendored

@ -15,6 +15,10 @@ build/Makefile
*.so
*.dSYM
.*.sw[op]
GPATH
GRTAGS
GTAGS
ID
regressions/ck_array/validate/serial
regressions/ck_cohort/benchmark/ck_cohort.LATENCY
regressions/ck_cohort/benchmark/ck_cohort.THROUGHPUT
@ -156,3 +160,16 @@ regressions/ck_rwcohort/benchmark/ck_rp.LATENCY
regressions/ck_rwcohort/benchmark/ck_rp.THROUGHPUT
regressions/ck_rwcohort/benchmark/ck_wp.LATENCY
regressions/ck_rwcohort/benchmark/ck_wp.THROUGHPUT
regressions/ck_hs/benchmark/parallel_bytestring.delete
regressions/ck_ht/benchmark/parallel_bytestring.delete
regressions/ck_ht/benchmark/serial.delete
regressions/ck_ht/validate/serial.delete
regressions/ck_rhs/benchmark/parallel_bytestring
regressions/ck_rhs/benchmark/serial
regressions/ck_rhs/validate/serial
regressions/ck_spinlock/benchmark/ck_hclh.LATENCY
regressions/ck_spinlock/benchmark/ck_hclh.THROUGHPUT
regressions/ck_spinlock/validate/ck_hclh
regressions/ck_swlock/benchmark/latency
regressions/ck_swlock/benchmark/throughput
regressions/ck_swlock/validate/validate

@ -0,0 +1,374 @@
/*
* Copyright 2011-2014 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _CK_SWLOCK_H
#define _CK_SWLOCK_H
#include <ck_elide.h>
#include <ck_pr.h>
#include <stdbool.h>
#include <stddef.h>
struct ck_swlock {
uint32_t writer;
uint32_t n_readers;
};
typedef struct ck_swlock ck_swlock_t;
#define CK_SWLOCK_INITIALIZER {0, 0}
#define CK_SWLOCK_LATCH_BIT (1 << 31)
#define CK_SWLOCK_READER_BITS (UINT32_MAX ^ CK_SWLOCK_LATCH_BIT)
CK_CC_INLINE static void
ck_swlock_init(struct ck_swlock *rw)
{
rw->writer = 0;
rw->n_readers = 0;
ck_pr_fence_store();
return;
}
CK_CC_INLINE static void
ck_swlock_write_unlock(ck_swlock_t *rw)
{
ck_pr_store_32(&rw->writer, 0);
return;
}
CK_CC_INLINE static bool
ck_swlock_locked_writer(ck_swlock_t *rw)
{
return ck_pr_load_32(&rw->writer);
}
CK_CC_INLINE static void
ck_swlock_write_downgrade(ck_swlock_t *rw)
{
ck_pr_inc_32(&rw->n_readers);
ck_swlock_write_unlock(rw);
return;
}
CK_CC_INLINE static bool
ck_swlock_locked(ck_swlock_t *rw)
{
uint32_t r;
r = ck_pr_load_32(&rw->writer);
return ck_pr_load_32(&rw->n_readers) | r;
}
CK_CC_INLINE static bool
ck_swlock_write_trylock(ck_swlock_t *rw)
{
ck_pr_store_32(&rw->writer, 1);
ck_pr_fence_atomic_load();
if (ck_pr_load_32(&rw->n_readers) != 0) {
ck_swlock_write_unlock(rw);
return false;
}
return true;
}
CK_ELIDE_TRYLOCK_PROTOTYPE(ck_swlock_write, ck_swlock_t,
ck_swlock_locked, ck_swlock_write_trylock)
CK_CC_INLINE static void
ck_swlock_write_lock(ck_swlock_t *rw)
{
ck_pr_store_32(&rw->writer, 1);
ck_pr_fence_atomic_load();
while (ck_pr_load_32(&rw->n_readers) != 0)
ck_pr_stall();
return;
}
CK_CC_INLINE static void
ck_swlock_write_latch(ck_swlock_t *rw)
{
ck_pr_store_32(&rw->writer, 1);
ck_pr_fence_atomic_load();
while (ck_pr_cas_32(&rw->n_readers, 0, CK_SWLOCK_LATCH_BIT) == false) {
/* Stall until readers have seen the latch and cleared */
ck_pr_stall();
}
return;
}
CK_CC_INLINE static void
ck_swlock_write_unlatch(ck_swlock_t *rw)
{
ck_pr_store_32(&rw->n_readers, 0);
ck_swlock_write_unlock(rw);
return;
}
CK_ELIDE_PROTOTYPE(ck_swlock_write, ck_swlock_t,
ck_swlock_locked, ck_swlock_write_lock,
ck_swlock_locked_writer, ck_swlock_write_unlock)
CK_CC_INLINE static bool
ck_swlock_read_trylock(ck_swlock_t *rw)
{
if (ck_pr_load_32(&rw->writer) != 0)
return false;
if (ck_pr_faa_32(&rw->n_readers, 1) & CK_SWLOCK_LATCH_BIT) {
return false;
}
/*
* Serialize with respect to concurrent write
* lock operation.
*/
ck_pr_fence_atomic_load();
if (ck_pr_load_32(&rw->writer) == 0) {
ck_pr_fence_load();
return true;
}
ck_pr_dec_32(&rw->n_readers);
return false;
}
CK_ELIDE_TRYLOCK_PROTOTYPE(ck_swlock_read, ck_swlock_t,
ck_swlock_locked_writer, ck_swlock_read_trylock)
CK_CC_INLINE static void
ck_swlock_read_lock(ck_swlock_t *rw)
{
for (;;) {
while (ck_pr_load_32(&rw->writer) != 0)
ck_pr_stall();
ck_pr_inc_32(&rw->n_readers);
/*
* Serialize with respect to concurrent write
* lock operation.
*/
ck_pr_fence_atomic_load();
if (ck_pr_load_32(&rw->writer) == 0)
break;
ck_pr_dec_32(&rw->n_readers);
}
/* Acquire semantics are necessary. */
ck_pr_fence_load();
return;
}
CK_CC_INLINE static void
ck_swlock_read_latchlock(ck_swlock_t *rw)
{
for (;;) {
while (ck_pr_load_32(&rw->writer) != 0)
ck_pr_stall();
if (ck_pr_faa_32(&rw->n_readers, 1) & CK_SWLOCK_LATCH_BIT) {
/* Writer has latched, stall the reader */
continue;
}
/*
* Serialize with respect to concurrent write
* lock operation.
*/
ck_pr_fence_atomic_load();
if (ck_pr_load_32(&rw->writer) == 0)
break;
ck_pr_dec_32(&rw->n_readers);
}
/* Acquire semantics are necessary. */
ck_pr_fence_load();
return;
}
CK_CC_INLINE static bool
ck_swlock_locked_reader(ck_swlock_t *rw)
{
ck_pr_fence_load();
return (ck_pr_load_32(&rw->n_readers) & CK_SWLOCK_READER_BITS);
}
CK_CC_INLINE static void
ck_swlock_read_unlock(ck_swlock_t *rw)
{
ck_pr_fence_load_atomic();
ck_pr_dec_32(&rw->n_readers);
return;
}
CK_ELIDE_PROTOTYPE(ck_swlock_read, ck_swlock_t,
ck_swlock_locked_writer, ck_swlock_read_lock,
ck_swlock_locked_reader, ck_swlock_read_unlock)
/*
* Recursive writer reader-writer lock implementation.
*/
struct ck_swlock_recursive {
struct ck_swlock rw;
uint32_t wc;
};
typedef struct ck_swlock_recursive ck_swlock_recursive_t;
#define CK_SWLOCK_RECURSIVE_INITIALIZER {CK_SWLOCK_INITIALIZER, 0}
CK_CC_INLINE static void
ck_swlock_recursive_write_lock(ck_swlock_recursive_t *rw)
{
ck_pr_store_32(&rw->rw.writer, 1);
ck_pr_fence_atomic_load();
while (ck_pr_load_32(&rw->rw.n_readers) != 0)
ck_pr_stall();
rw->wc++;
return;
}
CK_CC_INLINE static void
ck_swlock_recursive_write_latch(ck_swlock_recursive_t *rw)
{
ck_pr_store_32(&rw->rw.writer, 1);
ck_pr_fence_atomic_load();
while (ck_pr_cas_32(&rw->rw.n_readers, 0, CK_SWLOCK_LATCH_BIT) == false)
ck_pr_stall();
rw->wc++;
return;
}
CK_CC_INLINE static bool
ck_swlock_recursive_write_trylock(ck_swlock_recursive_t *rw)
{
ck_pr_store_32(&rw->rw.writer, 1);
ck_pr_fence_atomic_load();
if (ck_pr_load_32(&rw->rw.n_readers) != 0) {
ck_pr_store_32(&rw->rw.writer, 0);
return false;
}
rw->wc++;
return true;
}
CK_CC_INLINE static void
ck_swlock_recursive_write_unlock(ck_swlock_recursive_t *rw)
{
if (--rw->wc == 0) {
ck_pr_fence_release();
ck_pr_store_32(&rw->rw.writer, 0);
}
return;
}
CK_CC_INLINE static void
ck_swlock_recursive_write_unlatch(ck_swlock_recursive_t *rw)
{
ck_pr_store_32(&rw->rw.n_readers, 0);
ck_swlock_recursive_write_unlock(rw);
return;
}
CK_CC_INLINE static void
ck_swlock_recursive_read_lock(ck_swlock_recursive_t *rw)
{
ck_swlock_read_lock(&rw->rw);
return;
}
CK_CC_INLINE static void
ck_swlock_recursive_read_latchlock(ck_swlock_recursive_t *rw)
{
ck_swlock_read_latchlock(&rw->rw);
return;
}
CK_CC_INLINE static bool
ck_swlock_recursive_read_trylock(ck_swlock_recursive_t *rw)
{
return ck_swlock_read_trylock(&rw->rw);
}
CK_CC_INLINE static void
ck_swlock_recursive_read_unlock(ck_swlock_recursive_t *rw)
{
ck_swlock_read_unlock(&rw->rw);
return;
}
#endif /* _CK_SWLOCK_H */

@ -16,6 +16,7 @@ DIR=array \
queue \
ring \
rwlock \
swlock \
sequence \
spinlock \
stack
@ -58,6 +59,8 @@ all:
$(MAKE) -C ./ck_ring/benchmark all
$(MAKE) -C ./ck_rwlock/validate all
$(MAKE) -C ./ck_rwlock/benchmark all
$(MAKE) -C ./ck_swlock/validate all
$(MAKE) -C ./ck_swlock/benchmark all
$(MAKE) -C ./ck_pflock/validate all
$(MAKE) -C ./ck_pflock/benchmark all
$(MAKE) -C ./ck_hp/validate all
@ -101,6 +104,8 @@ clean:
$(MAKE) -C ./ck_ring/benchmark clean
$(MAKE) -C ./ck_rwlock/validate clean
$(MAKE) -C ./ck_rwlock/benchmark clean
$(MAKE) -C ./ck_swlock/validate clean
$(MAKE) -C ./ck_swlock/benchmark clean
$(MAKE) -C ./ck_pflock/validate clean
$(MAKE) -C ./ck_pflock/benchmark clean
$(MAKE) -C ./ck_hp/validate clean

@ -0,0 +1,17 @@
.PHONY: clean distribution
OBJECTS=latency throughput
all: $(OBJECTS)
latency: latency.c ../../../include/ck_swlock.h
$(CC) $(CFLAGS) -o latency latency.c
throughput: throughput.c ../../../include/ck_swlock.h
$(CC) $(CFLAGS) -o throughput throughput.c
clean:
rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
include ../../../build/regressions.build
CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE

@ -0,0 +1,100 @@
/*
* Copyright 2011-2014 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <ck_swlock.h>
#include <inttypes.h>
#include <stdio.h>
#include "../../common.h"
#define CK_F_PR_RTM
#ifndef STEPS
#define STEPS 2000000
#endif
int
main(void)
{
uint64_t s_b, e_b, i;
ck_swlock_t swlock = CK_SWLOCK_INITIALIZER;
for (i = 0; i < STEPS; i++) {
ck_swlock_write_lock(&swlock);
ck_swlock_write_unlock(&swlock);
}
s_b = rdtsc();
for (i = 0; i < STEPS; i++) {
ck_swlock_write_lock(&swlock);
ck_swlock_write_unlock(&swlock);
}
e_b = rdtsc();
printf(" WRITE: swlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
for (i = 0; i < STEPS; i++) {
ck_swlock_read_lock(&swlock);
ck_swlock_read_unlock(&swlock);
}
s_b = rdtsc();
for (i = 0; i < STEPS; i++) {
ck_swlock_read_lock(&swlock);
ck_swlock_read_unlock(&swlock);
}
e_b = rdtsc();
printf(" READ: swlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
for (i = 0; i < STEPS; i++) {
ck_swlock_write_latch(&swlock);
ck_swlock_write_unlatch(&swlock);
}
s_b = rdtsc();
for (i = 0; i < STEPS; i++) {
ck_swlock_write_latch(&swlock);
ck_swlock_write_unlatch(&swlock);
}
e_b = rdtsc();
printf(" LATCH: swlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
for (i = 0; i < STEPS; i++) {
ck_swlock_read_latchlock(&swlock);
ck_swlock_read_unlock(&swlock);
}
s_b = rdtsc();
for (i = 0; i < STEPS; i++) {
ck_swlock_read_latchlock(&swlock);
ck_swlock_read_unlock(&swlock);
}
e_b = rdtsc();
printf(" READ_LATCHLOCK: swlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
return 0;
}

@ -0,0 +1,249 @@
/*
* Copyright 2011-2014 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <ck_swlock.h>
#include <inttypes.h>
#include <pthread.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
#include "../../common.h"
#ifndef STEPS
#define STEPS 1000000
#endif
static int barrier;
static int threads;
static unsigned int flag CK_CC_CACHELINE;
static struct {
ck_swlock_t lock;
} rw CK_CC_CACHELINE = {
.lock = CK_SWLOCK_INITIALIZER
};
static struct affinity affinity;
static void *
thread_lock(void *pun)
{
uint64_t s_b, e_b, a, i;
uint64_t *value = pun;
if (aff_iterate(&affinity) != 0) {
perror("ERROR: Could not affine thread");
exit(EXIT_FAILURE);
}
ck_pr_inc_int(&barrier);
while (ck_pr_load_int(&barrier) != threads)
ck_pr_stall();
for (i = 1, a = 0;; i++) {
s_b = rdtsc();
ck_swlock_read_lock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_lock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_lock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_lock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_lock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_lock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_lock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_lock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_lock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_lock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_lock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_lock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_lock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_lock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_lock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_lock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
e_b = rdtsc();
a += (e_b - s_b) >> 4;
if (ck_pr_load_uint(&flag) == 1)
break;
}
ck_pr_inc_int(&barrier);
while (ck_pr_load_int(&barrier) != threads * 2)
ck_pr_stall();
*value = (a / i);
return NULL;
}
static void *
thread_latchlock(void *pun)
{
uint64_t s_b, e_b, a, i;
uint64_t *value = pun;
if (aff_iterate(&affinity) != 0) {
perror("ERROR: Could not affine thread");
exit(EXIT_FAILURE);
}
ck_pr_inc_int(&barrier);
while (ck_pr_load_int(&barrier) != threads)
ck_pr_stall();
for (i = 1, a = 0;; i++) {
s_b = rdtsc();
ck_swlock_read_latchlock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_latchlock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_latchlock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_latchlock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_latchlock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_latchlock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_latchlock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_latchlock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_latchlock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_latchlock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_latchlock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_latchlock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_latchlock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_latchlock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_latchlock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
ck_swlock_read_latchlock(&rw.lock);
ck_swlock_read_unlock(&rw.lock);
e_b = rdtsc();
a += (e_b - s_b) >> 4;
if (ck_pr_load_uint(&flag) == 1)
break;
}
ck_pr_inc_int(&barrier);
while (ck_pr_load_int(&barrier) != threads * 2)
ck_pr_stall();
*value = (a / i);
return NULL;
}
static void
swlock_test(pthread_t *p, int d, uint64_t *latency, void *(*f)(void *), const char *label)
{
int t;
ck_pr_store_int(&barrier, 0);
ck_pr_store_uint(&flag, 0);
affinity.delta = d;
affinity.request = 0;
fprintf(stderr, "Creating threads (%s)...", label);
for (t = 0; t < threads; t++) {
if (pthread_create(&p[t], NULL, f, latency + t) != 0) {
ck_error("ERROR: Could not create thread %d\n", t);
}
}
fprintf(stderr, "done\n");
common_sleep(10);
ck_pr_store_uint(&flag, 1);
fprintf(stderr, "Waiting for threads to finish acquisition regression...");
for (t = 0; t < threads; t++)
pthread_join(p[t], NULL);
fprintf(stderr, "done\n\n");
for (t = 1; t <= threads; t++)
printf("%10u %20" PRIu64 "\n", t, latency[t - 1]);
fprintf(stderr, "\n");
return;
}
int
main(int argc, char *argv[])
{
int d;
pthread_t *p;
uint64_t *latency;
if (argc != 3) {
ck_error("Usage: throughput <delta> <threads>\n");
}
threads = atoi(argv[2]);
if (threads <= 0) {
ck_error("ERROR: Threads must be a value > 0.\n");
}
p = malloc(sizeof(pthread_t) * threads);
if (p == NULL) {
ck_error("ERROR: Failed to initialize thread.\n");
}
latency = malloc(sizeof(uint64_t) * threads);
if (latency == NULL) {
ck_error("ERROR: Failed to create latency buffer.\n");
}
d = atoi(argv[1]);
swlock_test(p, d, latency, thread_lock, "swlock");
swlock_test(p, d, latency, thread_latchlock, "swlock");
return 0;
}

@ -0,0 +1,17 @@
.PHONY: check clean distribution
OBJECTS=validate
all: $(OBJECTS)
validate: validate.c ../../../include/ck_swlock.h
$(CC) $(CFLAGS) -o validate validate.c
check: all
./validate $(CORES) 1
clean:
rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
include ../../../build/regressions.build
CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE -O0

@ -0,0 +1,239 @@
/*
* Copyright 2011-2014 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <errno.h>
#include <inttypes.h>
#include <pthread.h>
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <sys/time.h>
#include <ck_pr.h>
#include <ck_swlock.h>
#include "../../common.h"
#ifndef ITERATE
#define ITERATE 1000000
#endif
static struct affinity a;
static unsigned int locked;
static int nthr;
static ck_swlock_t lock = CK_SWLOCK_INITIALIZER;
static ck_swlock_recursive_t r_lock = CK_SWLOCK_RECURSIVE_INITIALIZER;
static void *
thread_recursive(void *arg)
{
int i = ITERATE;
unsigned int l;
unsigned int tid = *(int *) arg;
if (aff_iterate(&a)) {
perror("ERROR: Could not affine thread");
exit(EXIT_FAILURE);
}
while (i--) {
if (tid == 0) {
/* Writer */
while (ck_swlock_recursive_write_trylock(&r_lock) == false)
ck_pr_stall();
ck_swlock_recursive_write_lock(&r_lock);
ck_swlock_recursive_write_latch(&r_lock);
ck_swlock_recursive_write_lock(&r_lock);
{
l = ck_pr_load_uint(&locked);
if (l != 0) {
ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
}
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
l = ck_pr_load_uint(&locked);
if (l != 8) {
ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
}
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
l = ck_pr_load_uint(&locked);
if (l != 0) {
ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
}
}
ck_swlock_recursive_write_unlock(&r_lock);
ck_swlock_recursive_write_unlatch(&r_lock);
ck_swlock_recursive_write_unlock(&r_lock);
ck_swlock_recursive_write_unlock(&r_lock);
}
ck_swlock_recursive_read_latchlock(&r_lock);
{
l = ck_pr_load_uint(&locked);
if (l != 0) {
ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
}
}
ck_swlock_recursive_read_unlock(&r_lock);
}
return (NULL);
}
static void *
thread(void *arg)
{
unsigned int i = ITERATE;
unsigned int l;
int tid = *(int *) arg;
if (aff_iterate(&a)) {
perror("ERROR: Could not affine thread");
exit(EXIT_FAILURE);
}
while (i--) {
if (tid == 0) {
/* Writer */
ck_swlock_write_latch(&lock);
{
l = ck_pr_load_uint(&locked);
if (l != 0) {
ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
}
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
l = ck_pr_load_uint(&locked);
if (l != 8) {
ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
}
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
l = ck_pr_load_uint(&locked);
if (l != 0) {
ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
}
}
ck_swlock_write_unlatch(&lock);
}
ck_swlock_read_latchlock(&lock);
{
l = ck_pr_load_uint(&locked);
if (l != 0) {
ck_error("ERROR [RD:%d]: %u != 0 r:%x w:%d\n", __LINE__, l, lock.n_readers, lock.writer);
}
}
ck_swlock_read_unlock(&lock);
}
return (NULL);
}
static void
swlock_test(pthread_t *threads, void *(*f)(void *), const char *test)
{
int i;
fprintf(stderr, "Creating threads (%s)...", test);
for (i = 0; i < nthr; i++) {
if (pthread_create(&threads[i], NULL, f, &i)) {
ck_error("ERROR: Could not create thread %d\n", i);
}
}
fprintf(stderr, ".");
for (i = 0; i < nthr; i++)
pthread_join(threads[i], NULL);
fprintf(stderr, "done (passed)\n");
return;
}
int
main(int argc, char *argv[])
{
pthread_t *threads;
if (argc != 3) {
ck_error("Usage: validate <number of threads> <affinity delta>\n");
}
nthr = atoi(argv[1]);
if (nthr <= 0) {
ck_error("ERROR: Number of threads must be greater than 0\n");
}
threads = malloc(sizeof(pthread_t) * nthr);
if (threads == NULL) {
ck_error("ERROR: Could not allocate thread structures\n");
}
a.delta = atoi(argv[2]);
swlock_test(threads, thread, "regular");
swlock_test(threads, thread_recursive, "recursive");
return 0;
}
Loading…
Cancel
Save