ck_epoch: Remove debug output, update comments.

ck_pring
Samy Al Bahra 14 years ago
parent 3747da1f2a
commit 875d070814

@ -0,0 +1,237 @@
/*
* Copyright 2011 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _CK_EPOCH_H
#define _CK_EPOCH_H
#include <ck_backoff.h>
#include <ck_cc.h>
#include <ck_pr.h>
#include <ck_stack.h>
#include <stdbool.h>
/*
* CK_EPOCH_LENGTH must be a power of 2.
*/
#ifndef CK_EPOCH_LENGTH
#define CK_EPOCH_LENGTH 4
#endif
typedef void (*ck_epoch_destructor_t)(ck_stack_entry_t *);
enum {
CK_EPOCH_USED = 0,
CK_EPOCH_FREE = 1
};
struct ck_epoch;
struct ck_epoch_record {
unsigned int active;
unsigned int status;
unsigned int epoch;
ck_stack_t pending[CK_EPOCH_LENGTH];
unsigned int delta;
struct ck_epoch *global;
ck_stack_entry_t record_next;
} CK_CC_CACHELINE;
typedef struct ck_epoch_record ck_epoch_record_t;
struct ck_epoch {
ck_stack_t records;
unsigned int threshold;
unsigned int epoch;
unsigned int n_free;
ck_epoch_destructor_t destroy;
};
typedef struct ck_epoch ck_epoch_t;
CK_STACK_CONTAINER(struct ck_epoch_record, record_next, ck_epoch_record_container)
CK_CC_INLINE static void
ck_epoch_init(struct ck_epoch *global,
unsigned int threshold,
ck_epoch_destructor_t destroy)
{
ck_stack_init(&global->records);
global->epoch = 1;
global->n_free = 0;
global->destroy = destroy;
global->threshold = threshold;
ck_pr_fence_store();
return;
}
CK_CC_INLINE static void
ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record)
{
size_t i;
record->status = CK_EPOCH_USED;
record->active = 0;
record->epoch = 0;
record->delta = 0;
record->global = global;
for (i = 0; i < CK_EPOCH_LENGTH; i++)
ck_stack_init(&record->pending[i]);
ck_pr_fence_store();
ck_stack_push_upmc(&global->records, &record->record_next);
return;
}
CK_CC_INLINE static void
ck_epoch_unregister(struct ck_epoch_record *record)
{
record->status = CK_EPOCH_FREE;
ck_pr_fence_store();
return;
}
CK_CC_INLINE static void
ck_epoch_update(struct ck_epoch *global, struct ck_epoch_record *record)
{
struct ck_epoch_record *c_record;
ck_stack_entry_t *cursor;
unsigned int g_epoch = ck_pr_load_uint(&global->epoch);
CK_STACK_FOREACH(&global->records, cursor) {
c_record = ck_epoch_record_container(cursor);
if (ck_pr_load_uint(&c_record->status) == CK_EPOCH_FREE || c_record == record)
continue;
if (ck_pr_load_uint(&c_record->active) == true && ck_pr_load_uint(&c_record->epoch) != g_epoch)
return;
}
ck_pr_inc_uint(&global->epoch);
return;
}
CK_CC_INLINE static void
ck_epoch_activate(struct ck_epoch_record *record)
{
ck_pr_store_uint(&record->active, 1);
ck_pr_fence_store();
return;
}
CK_CC_INLINE static void
ck_epoch_deactivate(struct ck_epoch_record *record)
{
ck_pr_fence_store();
ck_pr_store_uint(&record->active, 0);
return;
}
CK_CC_INLINE static void
ck_epoch_start(struct ck_epoch_record *record)
{
struct ck_epoch *global = record->global;
unsigned int g_epoch;
for (;;) {
g_epoch = ck_pr_load_uint(&global->epoch);
if (record->epoch != g_epoch) {
ck_stack_entry_t *next, *cursor;
unsigned int epoch = record->epoch & (CK_EPOCH_LENGTH - 1);
/*
* This means all threads with a potential reference to a hazard pointer
* will have a view as new as or newer than the calling thread. No active
* reference should exist to any object in the record's pending list.
*/
CK_STACK_FOREACH_SAFE(&record->pending[epoch], cursor, next)
global->destroy(cursor);
ck_stack_init(&record->pending[epoch]);
record->epoch = g_epoch;
record->delta = 0;
break;
}
if (++record->delta >= global->threshold) {
record->delta = 0;
ck_epoch_update(global, record);
continue;
}
break;
}
return;
}
CK_CC_INLINE static void
ck_epoch_stop(struct ck_epoch_record *record CK_CC_UNUSED)
{
return;
}
CK_CC_INLINE static void
ck_epoch_begin(struct ck_epoch_record *record)
{
ck_epoch_activate(record);
ck_epoch_start(record);
return;
}
CK_CC_INLINE static void
ck_epoch_end(struct ck_epoch_record *record)
{
ck_epoch_deactivate(record);
return;
}
CK_CC_INLINE static void
ck_epoch_flush(struct ck_epoch_record *record)
{
ck_epoch_update(record->global, record);
ck_epoch_start(record);
return;
}
CK_CC_INLINE static void
ck_epoch_free(struct ck_epoch_record *record, ck_stack_entry_t *entry)
{
unsigned int epoch = ck_pr_load_uint(&record->epoch) & (CK_EPOCH_LENGTH - 1);
ck_stack_push_spnc(&record->pending[epoch], entry);
record->delta++;
return;
}
#endif /* _CK_EPOCH_H */

@ -0,0 +1,14 @@
.PHONY: clean distribution
OBJECTS=ck_stack
all: $(OBJECTS)
ck_stack: ck_stack.c ../../../include/ck_stack.h ../../../include/ck_epoch.h
$(CC) $(CFLAGS) -o ck_stack ck_stack.c
clean:
rm -rf *~ *.o $(OBJECTS) *.dSYM
include ../../../build/regressions.build
CFLAGS+=-lpthread -D_GNU_SOURCE

@ -0,0 +1,152 @@
/*
* Copyright 2010-2011 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <errno.h>
#include <inttypes.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <sys/time.h>
#include <ck_backoff.h>
#include <ck_cc.h>
#include <ck_pr.h>
#include <stdbool.h>
#include <stddef.h>
#include <ck_epoch.h>
#include <ck_stack.h>
#include "../../common.h"
static unsigned int threshold;
static unsigned int n_threads;
static unsigned int barrier;
static unsigned int e_barrier;
#ifndef PAIRS
#define PAIRS 1000000
#endif
struct node {
unsigned int value;
ck_stack_entry_t stack_entry;
};
static ck_stack_t stack = {NULL, NULL};
static ck_epoch_t stack_epoch;
CK_STACK_CONTAINER(struct node, stack_entry, stack_container)
static struct affinity a;
static void *
thread(void *unused CK_CC_UNUSED)
{
struct node **entry, *e;
unsigned int i;
ck_epoch_record_t record;
ck_stack_entry_t *s;
ck_epoch_register(&stack_epoch, &record);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
exit(EXIT_FAILURE);
}
entry = malloc(sizeof(struct node *) * PAIRS);
if (entry == NULL) {
fprintf(stderr, "Failed allocation.\n");
exit(EXIT_FAILURE);
}
for (i = 0; i < PAIRS; i++) {
entry[i] = malloc(sizeof(struct node));
if (entry == NULL) {
fprintf(stderr, "Failed individual allocation\n");
exit(EXIT_FAILURE);
}
}
ck_pr_inc_uint(&barrier);
while (ck_pr_load_uint(&barrier) < n_threads);
for (i = 0; i < PAIRS; i++) {
ck_epoch_begin(&record);
ck_stack_push_upmc(&stack, &entry[i]->stack_entry);
ck_epoch_end(&record);
ck_epoch_begin(&record);
s = ck_stack_pop_upmc(&stack);
ck_epoch_end(&record);
e = stack_container(s);
ck_epoch_free(&record, &e->stack_entry);
}
ck_pr_inc_uint(&e_barrier);
while (ck_pr_load_uint(&e_barrier) < n_threads);
return (NULL);
}
static void
destructor(ck_stack_entry_t *p)
{
struct node *e = stack_container(p);
free(e);
return;
}
int
main(int argc, char *argv[])
{
unsigned int i;
pthread_t *threads;
if (argc != 4) {
fprintf(stderr, "Usage: stack <threads> <threshold> <delta>\n");
exit(EXIT_FAILURE);
}
n_threads = atoi(argv[1]);
threshold = atoi(argv[2]);
a.delta = atoi(argv[3]);
a.request = 0;
threads = malloc(sizeof(pthread_t) * n_threads);
ck_epoch_init(&stack_epoch, threshold, destructor);
for (i = 0; i < n_threads; i++)
pthread_create(threads + i, NULL, thread, NULL);
for (i = 0; i < n_threads; i++)
pthread_join(threads[i], NULL);
return (0);
}
Loading…
Cancel
Save