Merge branch 'master' of https://github.com/sbahra/ck into cohort

ck_pring
Brendon Scheinman 12 years ago
commit c4e3edfeac

@ -142,7 +142,6 @@ ck_fifo_spsc_dequeue(struct ck_fifo_spsc *fifo, void *value)
return (false);
/* If entry is visible, guarantee store to value is visible. */
ck_pr_fence_load_depends();
ck_pr_store_ptr(value, entry->value);
ck_pr_fence_store();
ck_pr_store_ptr(&fifo->head, entry);
@ -244,7 +243,6 @@ ck_fifo_mpmc_enqueue(struct ck_fifo_mpmc *fifo,
tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
ck_pr_fence_load();
tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
ck_pr_fence_load_depends();
next.generation = ck_pr_load_ptr(&tail.pointer->next.generation);
next.pointer = ck_pr_load_ptr(&tail.pointer->next.pointer);
@ -296,7 +294,6 @@ ck_fifo_mpmc_tryenqueue(struct ck_fifo_mpmc *fifo,
tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
ck_pr_fence_load();
tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
ck_pr_fence_load_depends();
next.generation = ck_pr_load_ptr(&tail.pointer->next.generation);
next.pointer = ck_pr_load_ptr(&tail.pointer->next.pointer);

@ -0,0 +1,142 @@
/*
* Copyright 2013 John Wittrock.
* Copyright 2013 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXCK_PFLOCK_PRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _CK_PFLOCK_H
#define _CK_PFLOCK_H
/*
* This is a naive implementation of phase-fair locks derived
* from the work described in:
* Brandenburg, B. and Anderson, J. 2010. Spin-Based
* Reader-Writer Synchronization for Multiprocessor Real-Time Systems
*/
#include <ck_cc.h>
#include <ck_pr.h>
struct ck_pflock {
uint32_t rin;
uint32_t rout;
uint32_t win;
uint32_t wout;
};
typedef struct ck_pflock ck_pflock_t;
#define CK_PFLOCK_LSB 0xFFFFFFF0
#define CK_PFLOCK_RINC 0x100 /* Reader increment value. */
#define CK_PFLOCK_WBITS 0x3 /* Writer bits in reader. */
#define CK_PFLOCK_PRES 0x2 /* Writer present bit. */
#define CK_PFLOCK_PHID 0x1 /* Phase ID bit. */
#define CK_PFLOCK_INITIALIZER {0, 0, 0, 0}
CK_CC_INLINE static void
ck_pflock_init(struct ck_pflock *pf)
{
pf->rin = 0;
pf->rout = 0;
pf->win = 0;
pf->wout = 0;
ck_pr_fence_memory();
return;
}
CK_CC_INLINE static void
ck_pflock_write_unlock(ck_pflock_t *pf)
{
ck_pr_fence_memory();
/* Migrate from write phase to read phase. */
ck_pr_and_32(&pf->rin, CK_PFLOCK_LSB);
/* Allow other writers to continue. */
ck_pr_faa_32(&pf->wout, 1);
return;
}
CK_CC_INLINE static void
ck_pflock_write_lock(ck_pflock_t *pf)
{
uint32_t ticket;
/* Acquire ownership of write-phase. */
ticket = ck_pr_faa_32(&pf->win, 1);
while (ck_pr_load_32(&pf->wout) != ticket)
ck_pr_stall();
/*
* Acquire ticket on read-side in order to allow them
* to flush. Indicates to any incoming reader that a
* write-phase is pending.
*/
ticket = ck_pr_faa_32(&pf->rin,
(ticket & CK_PFLOCK_PHID) | CK_PFLOCK_PRES);
/* Wait for any pending readers to flush. */
while (ck_pr_load_32(&pf->rout) != ticket)
ck_pr_stall();
ck_pr_fence_memory();
return;
}
CK_CC_INLINE static void
ck_pflock_read_unlock(ck_pflock_t *pf)
{
ck_pr_fence_load();
ck_pr_faa_32(&pf->rout, CK_PFLOCK_RINC);
return;
}
CK_CC_INLINE static void
ck_pflock_read_lock(ck_pflock_t *pf)
{
uint32_t w;
/*
* If no writer is present, then the operation has completed
* successfully.
*/
w = ck_pr_faa_32(&pf->rin, CK_PFLOCK_RINC) & CK_PFLOCK_WBITS;
if (w == 0)
goto leave;
/* Wait for current write phase to complete. */
while ((ck_pr_load_32(&pf->rin) & CK_PFLOCK_WBITS) == w)
ck_pr_stall();
leave:
/* Acquire semantics. */
ck_pr_fence_load();
return;
}
#endif /* _CK_PFLOCK_H */

@ -405,6 +405,141 @@ ck_spinlock_dec_unlock(struct ck_spinlock_dec *lock)
#ifndef CK_F_SPINLOCK_TICKET
#define CK_F_SPINLOCK_TICKET
/*
* If 16-bit or 32-bit increment is supported, implement support for
* trylock functionality on availability of 32-bit or 64-bit fetch-and-add
* and compare-and-swap.
*/
#if defined(CK_MD_TSO)
#if defined(CK_F_PR_FAA_32) && defined(CK_F_PR_INC_16) && defined(CK_F_PR_CAS_32)
#define CK_SPINLOCK_TICKET_TYPE uint32_t
#define CK_SPINLOCK_TICKET_TYPE_BASE uint16_t
#define CK_SPINLOCK_TICKET_INC(x) ck_pr_inc_16(x)
#define CK_SPINLOCK_TICKET_CAS(x, y, z) ck_pr_cas_32(x, y, z)
#define CK_SPINLOCK_TICKET_FAA(x, y) ck_pr_faa_32(x, y)
#define CK_SPINLOCK_TICKET_LOAD(x) ck_pr_load_32(x)
#define CK_SPINLOCK_TICKET_INCREMENT (0x00010000UL)
#define CK_SPINLOCK_TICKET_MASK (0xFFFFUL)
#define CK_SPINLOCK_TICKET_SHIFT (16)
#elif defined(CK_F_PR_FAA_64) && defined(CK_F_PR_INC_32) && defined(CK_F_PR_CAS_64)
#define CK_SPINLOCK_TICKET_TYPE uint64_t
#define CK_SPINLOCK_TICKET_TYPE_BASE uint32_t
#define CK_SPINLOCK_TICKET_INC(x) ck_pr_inc_32(x)
#define CK_SPINLOCK_TICKET_CAS(x, y, z) ck_pr_cas_64(x, y, z)
#define CK_SPINLOCK_TICKET_FAA(x, y) ck_pr_faa_64(x, y)
#define CK_SPINLOCK_TICKET_LOAD(x) ck_pr_load_64(x)
#define CK_SPINLOCK_TICKET_INCREMENT (0x0000000100000000ULL)
#define CK_SPINLOCK_TICKET_MASK (0xFFFFFFFFULL)
#define CK_SPINLOCK_TICKET_SHIFT (32)
#endif
#endif /* CK_MD_TSO */
#if defined(CK_SPINLOCK_TICKET_TYPE)
#define CK_F_SPINLOCK_TICKET_TRYLOCK
struct ck_spinlock_ticket {
CK_SPINLOCK_TICKET_TYPE value;
};
typedef struct ck_spinlock_ticket ck_spinlock_ticket_t;
#define CK_SPINLOCK_TICKET_INITIALIZER { .value = 0 }
CK_CC_INLINE static void
ck_spinlock_ticket_init(struct ck_spinlock_ticket *ticket)
{
ticket->value = 0;
ck_pr_fence_store();
return;
}
CK_CC_INLINE static void
ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket)
{
CK_SPINLOCK_TICKET_TYPE request, position;
/* Get our ticket number and set next ticket number. */
request = CK_SPINLOCK_TICKET_FAA(&ticket->value,
CK_SPINLOCK_TICKET_INCREMENT);
position = request & CK_SPINLOCK_TICKET_MASK;
request >>= CK_SPINLOCK_TICKET_SHIFT;
while (request != position) {
ck_pr_stall();
position = CK_SPINLOCK_TICKET_LOAD(&ticket->value) &
CK_SPINLOCK_TICKET_MASK;
}
ck_pr_fence_memory();
return;
}
CK_CC_INLINE static void
ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c)
{
CK_SPINLOCK_TICKET_TYPE request, position;
ck_backoff_t backoff;
/* Get our ticket number and set next ticket number. */
request = CK_SPINLOCK_TICKET_FAA(&ticket->value,
CK_SPINLOCK_TICKET_INCREMENT);
position = request & CK_SPINLOCK_TICKET_MASK;
request >>= CK_SPINLOCK_TICKET_SHIFT;
while (request != position) {
ck_pr_stall();
position = CK_SPINLOCK_TICKET_LOAD(&ticket->value) &
CK_SPINLOCK_TICKET_MASK;
backoff = request - position;
backoff <<= c;
ck_backoff_eb(&backoff);
}
ck_pr_fence_memory();
return;
}
CK_CC_INLINE static bool
ck_spinlock_ticket_trylock(struct ck_spinlock_ticket *ticket)
{
CK_SPINLOCK_TICKET_TYPE snapshot, request, position;
snapshot = CK_SPINLOCK_TICKET_LOAD(&ticket->value);
position = snapshot & CK_SPINLOCK_TICKET_MASK;
request = snapshot >> CK_SPINLOCK_TICKET_SHIFT;
if (position != request)
return false;
if (CK_SPINLOCK_TICKET_CAS(&ticket->value,
snapshot, snapshot + CK_SPINLOCK_TICKET_INCREMENT) == false) {
return false;
}
ck_pr_fence_memory();
return true;
}
CK_CC_INLINE static void
ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket)
{
ck_pr_fence_memory();
CK_SPINLOCK_TICKET_INC((CK_SPINLOCK_TICKET_TYPE_BASE *)&ticket->value);
return;
}
#undef CK_SPINLOCK_TICKET_TYPE
#undef CK_SPINLOCK_TICKET_TYPE_BASE
#undef CK_SPINLOCK_TICKET_INC
#undef CK_SPINLOCK_TICKET_FAA
#undef CK_SPINLOCK_TICKET_LOAD
#undef CK_SPINLOCK_TICKET_INCREMENT
#undef CK_SPINLOCK_TICKET_MASK
#undef CK_SPINLOCK_TICKET_SHIFT
#else
/*
* MESI benefits from cacheline padding between next and current. This avoids
* invalidation of current from the cache due to incoming lock requests.
@ -449,7 +584,7 @@ ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket)
}
CK_CC_INLINE static void
ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket)
ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c)
{
ck_backoff_t backoff;
unsigned int request, position;
@ -463,7 +598,7 @@ ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket)
/* Overflow is handled fine, assuming 2s complement. */
backoff = (request - position);
backoff *= 64;
backoff <<= c;
/*
* Ideally, back-off from generating cache traffic for at least
@ -494,6 +629,7 @@ ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket)
ck_pr_store_uint(&ticket->position, update + 1);
return;
}
#endif /* !CK_F_SPINLOCK_TICKET_TRYLOCK */
#endif /* CK_F_SPINLOCK_TICKET */
#ifndef CK_F_SPINLOCK_MCS

@ -99,6 +99,8 @@ ck_pr_stall(void)
CK_CC_INLINE static void
ck_pr_fence_load_depends(void)
{
__sync_synchronize();
return;
}

@ -9,6 +9,7 @@ DIR=backoff \
hp \
hs \
ht \
pflock \
pr \
queue \
ring \
@ -47,6 +48,8 @@ all:
$(MAKE) -C ./ck_ring/benchmark all
$(MAKE) -C ./ck_rwlock/validate all
$(MAKE) -C ./ck_rwlock/benchmark all
$(MAKE) -C ./ck_pflock/validate all
$(MAKE) -C ./ck_pflock/benchmark all
$(MAKE) -C ./ck_hp/validate all
$(MAKE) -C ./ck_hp/benchmark all
$(MAKE) -C ./ck_bag/validate all
@ -79,6 +82,8 @@ clean:
$(MAKE) -C ./ck_ring/benchmark clean
$(MAKE) -C ./ck_rwlock/validate clean
$(MAKE) -C ./ck_rwlock/benchmark clean
$(MAKE) -C ./ck_pflock/validate clean
$(MAKE) -C ./ck_pflock/benchmark clean
$(MAKE) -C ./ck_hp/validate clean
$(MAKE) -C ./ck_hp/benchmark clean
$(MAKE) -C ./ck_bag/validate clean

@ -0,0 +1,17 @@
.PHONY: clean distribution
OBJECTS=latency throughput
all: $(OBJECTS)
latency: latency.c ../../../include/ck_rwlock.h
$(CC) $(CFLAGS) -o latency latency.c
throughput: throughput.c ../../../include/ck_rwlock.h
$(CC) $(CFLAGS) -o throughput throughput.c
clean:
rm -rf *.dSYM *~ *.o $(OBJECTS)
include ../../../build/regressions.build
CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE

@ -0,0 +1,72 @@
/*
* Copyright 2011-2013 Samy Al Bahra.
* Copyright 2013 John Wittrock.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHEPFISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <ck_pflock.h>
#include <inttypes.h>
#include <stdio.h>
#include "../../common.h"
#ifndef STEPS
#define STEPS 1000000
#endif
int
main(void)
{
uint64_t s_b, e_b, i;
ck_pflock_t pflock = CK_PFLOCK_INITIALIZER;
for (i = 0; i < STEPS; i++) {
ck_pflock_write_lock(&pflock);
ck_pflock_write_unlock(&pflock);
}
s_b = rdtsc();
for (i = 0; i < STEPS; i++) {
ck_pflock_write_lock(&pflock);
ck_pflock_write_unlock(&pflock);
}
e_b = rdtsc();
printf("WRITE: pflock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
for (i = 0; i < STEPS; i++) {
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
}
s_b = rdtsc();
for (i = 0; i < STEPS; i++) {
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
}
e_b = rdtsc();
printf("READ: pflock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
return 0;
}

@ -0,0 +1,163 @@
/*
* Copyright 2011-2013 Samy Al Bahra.
* Copyright 2013 John Wittrock.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHEPFISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <ck_pflock.h>
#include <inttypes.h>
#include <pthread.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
#include "../../common.h"
#ifndef STEPS
#define STEPS 1000000
#endif
static int barrier;
static int threads;
static unsigned int flag CK_CC_CACHELINE;
static ck_pflock_t pflock = CK_PFLOCK_INITIALIZER;
static struct affinity affinity;
static void *
thread_pflock(void *pun)
{
uint64_t s_b, e_b, a, i;
uint64_t *value = pun;
if (aff_iterate(&affinity) != 0) {
perror("ERROR: Could not affine thread");
exit(EXIT_FAILURE);
}
ck_pr_inc_int(&barrier);
while (ck_pr_load_int(&barrier) != threads)
ck_pr_stall();
for (i = 1, a = 0;; i++) {
s_b = rdtsc();
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
ck_pflock_read_lock(&pflock);
ck_pflock_read_unlock(&pflock);
e_b = rdtsc();
a += (e_b - s_b) >> 4;
if (ck_pr_load_uint(&flag) == 1)
break;
}
ck_pr_inc_int(&barrier);
while (ck_pr_load_int(&barrier) != threads * 2)
ck_pr_stall();
*value = (a / i);
return NULL;
}
int
main(int argc, char *argv[])
{
int t;
pthread_t *p;
uint64_t *latency;
if (argc != 3) {
ck_error("Usage: throughput <delta> <threads>\n");
}
threads = atoi(argv[2]);
if (threads <= 0) {
ck_error("ERROR: Threads must be a value > 0.\n");
}
p = malloc(sizeof(pthread_t) * threads);
if (p == NULL) {
ck_error("ERROR: Failed to initialize thread.\n");
}
latency = malloc(sizeof(uint64_t) * threads);
if (latency == NULL) {
ck_error("ERROR: Failed to create latency buffer.\n");
}
affinity.delta = atoi(argv[1]);
affinity.request = 0;
fprintf(stderr, "Creating threads (pflock)...");
for (t = 0; t < threads; t++) {
if (pthread_create(&p[t], NULL, thread_pflock, latency + t) != 0) {
ck_error("ERROR: Could not create thread %d\n", t);
}
}
fprintf(stderr, "done\n");
common_sleep(10);
ck_pr_store_uint(&flag, 1);
fprintf(stderr, "Waiting for threads to finish acquisition regression...");
for (t = 0; t < threads; t++)
pthread_join(p[t], NULL);
fprintf(stderr, "done\n\n");
for (t = 1; t <= threads; t++)
printf("%10u %20" PRIu64 "\n", t, latency[t - 1]);
return 0;
}

@ -0,0 +1,17 @@
.PHONY: check clean distribution
OBJECTS=validate
all: $(OBJECTS)
validate: validate.c ../../../include/ck_pflock.h
$(CC) $(CFLAGS) -o validate validate.c
check: all
./validate $(CORES) 1
clean:
rm -rf *.dSYM *~ *.o $(OBJECTS)
include ../../../build/regressions.build
CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE

@ -0,0 +1,151 @@
/*
* Copyright 2011-2013 Samy Al Bahra, John Wittrock.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <errno.h>
#include <inttypes.h>
#include <pthread.h>
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <sys/time.h>
#include <ck_pr.h>
#include <ck_pflock.h>
#include "../../common.h"
#ifndef ITERATE
#define ITERATE 1000000
#endif
static struct affinity a;
static unsigned int locked;
static int nthr;
static ck_pflock_t lock = CK_PFLOCK_INITIALIZER;
static void *
thread(void *null CK_CC_UNUSED)
{
int i = ITERATE;
unsigned int l;
if (aff_iterate(&a)) {
perror("ERROR: Could not affine thread");
exit(EXIT_FAILURE);
}
while (i--) {
ck_pflock_write_lock(&lock);
{
l = ck_pr_load_uint(&locked);
if (l != 0) {
ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
}
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
l = ck_pr_load_uint(&locked);
if (l != 8) {
ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
}
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
l = ck_pr_load_uint(&locked);
if (l != 0) {
ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
}
}
ck_pflock_write_unlock(&lock);
ck_pflock_read_lock(&lock);
{
l = ck_pr_load_uint(&locked);
if (l != 0) {
ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
}
}
ck_pflock_read_unlock(&lock);
}
return NULL;
}
int
main(int argc, char *argv[])
{
pthread_t *threads;
int i;
if (argc != 3) {
ck_error("Usage: validate <number of threads> <affinity delta>\n");
}
nthr = atoi(argv[1]);
if (nthr <= 0) {
ck_error("ERROR: Number of threads must be greater than 0\n");
}
threads = malloc(sizeof(pthread_t) * nthr);
if (threads == NULL) {
ck_error("ERROR: Could not allocate thread structures\n");
}
a.delta = atoi(argv[2]);
fprintf(stderr, "Creating threads (mutual exclusion)...");
for (i = 0; i < nthr; i++) {
if (pthread_create(&threads[i], NULL, thread, NULL)) {
ck_error("ERROR: Could not create thread %d\n", i);
}
}
fprintf(stderr, "done\n");
fprintf(stderr, "Waiting for threads to finish correctness regression...");
for (i = 0; i < nthr; i++)
pthread_join(threads[i], NULL);
fprintf(stderr, "done (passed)\n");
return 0;
}

@ -1,5 +1,10 @@
#include <ck_spinlock.h>
#define LOCK_NAME "ck_ticket"
#define LOCK_DEFINE static ck_spinlock_ticket_t CK_CC_CACHELINE lock = CK_SPINLOCK_TICKET_INITIALIZER
#define LOCK ck_spinlock_ticket_lock(&lock)
#define UNLOCK ck_spinlock_ticket_unlock(&lock)
#ifdef CK_F_SPINLOCK_TICKET_TRYLOCK
#define TRYLOCK ck_spinlock_ticket_trylock(&lock)
#endif

@ -1,5 +1,5 @@
#define LOCK_NAME "ck_ticket_pb"
#define LOCK_DEFINE static ck_spinlock_ticket_t CK_CC_CACHELINE lock = CK_SPINLOCK_TICKET_INITIALIZER
#define LOCK ck_spinlock_ticket_lock_pb(&lock)
#define LOCK ck_spinlock_ticket_lock_pb(&lock, 5)
#define UNLOCK ck_spinlock_ticket_unlock(&lock)

@ -70,21 +70,40 @@ thread(void *null CK_CC_UNUSED)
}
while (i--) {
#ifdef TRYLOCK
if (i & 1) {
LOCK;
} else {
while (TRYLOCK == false)
ck_pr_stall();
}
#else
LOCK;
#endif
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
j = ck_pr_load_uint(&locked);
if (j != 5) {
if (j != 10) {
ck_error("ERROR (WR): Race condition (%u)\n", j);
exit(EXIT_FAILURE);
}
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
@ -92,6 +111,7 @@ thread(void *null CK_CC_UNUSED)
ck_pr_dec_uint(&locked);
UNLOCK;
LOCK;
j = ck_pr_load_uint(&locked);
@ -99,6 +119,7 @@ thread(void *null CK_CC_UNUSED)
ck_error("ERROR (RD): Race condition (%u)\n", j);
exit(EXIT_FAILURE);
}
UNLOCK;
}

Loading…
Cancel
Save