ck_epoch: Support per-object destructors.

This increases epoch per-object overhead to 16 bytes.
ck_pring
Samy Al Bahra 14 years ago
parent 0f48b6fe7a
commit 10ffb2e6f1

@ -45,13 +45,20 @@
#define CK_EPOCH_LENGTH 4
#endif
typedef void (*ck_epoch_destructor_t)(ck_stack_entry_t *);
struct ck_epoch_entry;
typedef struct ck_epoch_entry ck_epoch_entry_t;
typedef void (*ck_epoch_destructor_t)(ck_epoch_entry_t *);
enum {
CK_EPOCH_USED = 0,
CK_EPOCH_FREE = 1
struct ck_epoch_entry {
ck_epoch_destructor_t destroy;
ck_stack_entry_t stack_entry;
};
/*
* Return pointer to ck_epoch_entry container object.
*/
#define CK_EPOCH_CONTAINER(T, M, N) CK_CC_CONTAINER(struct ck_epoch_entry, T, M, N)
struct ck_epoch;
struct ck_epoch_record {
unsigned int active;
@ -73,182 +80,11 @@ struct ck_epoch {
ck_stack_t records;
unsigned int threshold;
unsigned int n_free;
ck_epoch_destructor_t destroy;
};
typedef struct ck_epoch ck_epoch_t;
CK_STACK_CONTAINER(struct ck_epoch_record, record_next, ck_epoch_record_container)
CK_CC_INLINE static void
ck_epoch_init(struct ck_epoch *global,
unsigned int threshold,
ck_epoch_destructor_t destroy)
{
ck_stack_init(&global->records);
global->epoch = 1;
global->n_free = 0;
global->destroy = destroy;
global->threshold = threshold;
ck_pr_fence_store();
return;
}
CK_CC_INLINE static struct ck_epoch_record *
ck_epoch_recycle(struct ck_epoch *global)
{
struct ck_epoch_record *record;
ck_stack_entry_t *cursor;
unsigned int status;
if (ck_pr_load_uint(&global->n_free) == 0)
return (NULL);
CK_STACK_FOREACH(&global->records, cursor) {
record = ck_epoch_record_container(cursor);
if (ck_pr_load_uint(&record->status) == CK_EPOCH_FREE) {
status = ck_pr_fas_uint(&record->status, CK_EPOCH_USED);
if (status == CK_EPOCH_FREE) {
ck_pr_dec_uint(&global->n_free);
return record;
}
}
}
return NULL;
}
CK_CC_INLINE static void
ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record)
{
size_t i;
record->status = CK_EPOCH_USED;
record->active = 0;
record->epoch = 0;
record->delta = 0;
record->n_pending = 0;
record->n_peak = 0;
record->n_reclamations = 0;
record->global = global;
for (i = 0; i < CK_EPOCH_LENGTH; i++)
ck_stack_init(&record->pending[i]);
ck_pr_fence_store();
ck_stack_push_upmc(&global->records, &record->record_next);
return;
}
CK_CC_INLINE static void
ck_epoch_unregister(struct ck_epoch_record *record)
{
size_t i;
record->status = CK_EPOCH_FREE;
record->active = 0;
record->epoch = 0;
record->delta = 0;
record->n_pending = 0;
record->n_peak = 0;
record->n_reclamations = 0;
for (i = 0; i < CK_EPOCH_LENGTH; i++)
ck_stack_init(&record->pending[i]);
ck_pr_inc_uint(&record->global->n_free);
ck_pr_fence_store();
return;
}
CK_CC_INLINE static void
ck_epoch_tick(struct ck_epoch *global, struct ck_epoch_record *record)
{
struct ck_epoch_record *c_record;
ck_stack_entry_t *cursor;
unsigned int g_epoch = ck_pr_load_uint(&global->epoch);
g_epoch &= CK_EPOCH_LENGTH - 1;
CK_STACK_FOREACH(&global->records, cursor) {
c_record = ck_epoch_record_container(cursor);
if (ck_pr_load_uint(&c_record->status) == CK_EPOCH_FREE ||
c_record == record)
continue;
if (ck_pr_load_uint(&c_record->active) != 0 &&
ck_pr_load_uint(&c_record->epoch) != g_epoch)
return;
}
ck_pr_inc_uint(&global->epoch);
return;
}
CK_CC_INLINE static bool
ck_epoch_reclaim(struct ck_epoch_record *record)
{
struct ck_epoch *global = record->global;
unsigned int g_epoch = ck_pr_load_uint(&global->epoch);
unsigned int epoch = record->epoch;
ck_stack_entry_t *next, *cursor;
g_epoch &= CK_EPOCH_LENGTH - 1;
if (epoch == g_epoch)
return false;
/*
* This means all threads with a potential reference to a
* hazard pointer will have a view as new as or newer than
* the calling thread. No active reference should exist to
* any object in the record's pending list.
*/
CK_STACK_FOREACH_SAFE(&record->pending[g_epoch], cursor, next) {
global->destroy(cursor);
record->n_pending--;
record->n_reclamations++;
}
ck_stack_init(&record->pending[g_epoch]);
record->epoch = g_epoch;
record->delta = 0;
return true;
}
CK_CC_INLINE static void
ck_epoch_write_begin(struct ck_epoch_record *record)
{
struct ck_epoch *global = record->global;
ck_pr_store_uint(&record->active, record->active + 1);
ck_pr_fence_store();
/*
* In the case of recursive write sections, avoid ticking
* over global epoch.
*/
if (record->active > 1)
return;
for (;;) {
if (ck_epoch_reclaim(record) == true)
break;
if (++record->delta >= global->threshold) {
record->delta = 0;
ck_epoch_tick(global, record);
continue;
}
break;
}
return;
}
CK_CC_INLINE static void
ck_epoch_read_begin(struct ck_epoch_record *record)
ck_epoch_read_begin(ck_epoch_record_t *record)
{
/*
@ -267,7 +103,7 @@ ck_epoch_read_begin(struct ck_epoch_record *record)
}
CK_CC_INLINE static void
ck_epoch_end(struct ck_epoch_record *record)
ck_epoch_end(ck_epoch_record_t *record)
{
ck_pr_fence_store();
@ -276,45 +112,22 @@ ck_epoch_end(struct ck_epoch_record *record)
}
CK_CC_INLINE static void
ck_epoch_retire(struct ck_epoch_record *record, ck_stack_entry_t *entry)
{
ck_stack_push_spnc(&record->pending[record->epoch], entry);
record->n_pending += 1;
return;
}
CK_CC_INLINE static void
ck_epoch_free(struct ck_epoch_record *record, ck_stack_entry_t *entry)
ck_epoch_retire(ck_epoch_record_t *record, ck_epoch_entry_t *entry)
{
unsigned int epoch = ck_pr_load_uint(&record->epoch);
struct ck_epoch *global = record->global;
ck_stack_push_spnc(&record->pending[epoch], entry);
ck_stack_push_spnc(&record->pending[record->epoch], &entry->stack_entry);
record->n_pending += 1;
if (record->n_pending > record->n_peak)
record->n_peak = record->n_pending;
if (record->n_pending >= global->threshold && ck_epoch_reclaim(record) == false)
ck_epoch_tick(global, record);
return;
}
CK_CC_INLINE static void
ck_epoch_purge(struct ck_epoch_record *record)
{
ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
while (record->n_pending > 0) {
ck_epoch_reclaim(record);
ck_epoch_tick(record->global, record);
if (record->n_pending > 0)
ck_backoff_gb(&backoff);
}
return;
}
void ck_epoch_init(ck_epoch_t *, unsigned int);
ck_epoch_record_t *ck_epoch_recycle(ck_epoch_t *);
void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *);
void ck_epoch_unregister(ck_epoch_record_t *);
void ck_epoch_tick(ck_epoch_t *, ck_epoch_record_t *);
bool ck_epoch_reclaim(ck_epoch_record_t *);
void ck_epoch_write_begin(ck_epoch_record_t *);
void ck_epoch_free(ck_epoch_record_t *, ck_epoch_destructor_t, ck_epoch_entry_t *);
void ck_epoch_purge(ck_epoch_record_t *record);
#endif /* _CK_EPOCH_H */

@ -4,11 +4,11 @@ OBJECTS=ck_stack ck_stack_read
all: $(OBJECTS)
ck_stack_read: ck_stack_read.c ../../../include/ck_stack.h ../../../include/ck_epoch.h
$(CC) $(CFLAGS) -o ck_stack_read ck_stack_read.c
ck_stack_read: ck_stack_read.c ../../../include/ck_stack.h ../../../include/ck_epoch.h ../../../src/ck_epoch.c
$(CC) $(CFLAGS) -o ck_stack_read ck_stack_read.c ../../../src/ck_epoch.c
ck_stack: ck_stack.c ../../../include/ck_stack.h ../../../include/ck_epoch.h
$(CC) $(CFLAGS) -o ck_stack ck_stack.c
ck_stack: ck_stack.c ../../../include/ck_stack.h ../../../include/ck_epoch.h ../../../src/ck_epoch.c
$(CC) $(CFLAGS) -o ck_stack ck_stack.c ../../../src/ck_epoch.c
clean:
rm -rf *~ *.o $(OBJECTS) *.dSYM

@ -55,13 +55,24 @@ static unsigned int e_barrier;
struct node {
unsigned int value;
ck_epoch_entry_t epoch_entry;
ck_stack_entry_t stack_entry;
};
static ck_stack_t stack = {NULL, NULL};
static ck_epoch_t stack_epoch;
CK_STACK_CONTAINER(struct node, stack_entry, stack_container)
CK_EPOCH_CONTAINER(struct node, epoch_entry, epoch_container)
static struct affinity a;
static void
destructor(ck_epoch_entry_t *p)
{
struct node *e = epoch_container(p);
free(e);
return;
}
static void *
thread(void *unused CK_CC_UNUSED)
{
@ -104,7 +115,7 @@ thread(void *unused CK_CC_UNUSED)
ck_epoch_end(&record);
e = stack_container(s);
ck_epoch_free(&record, &e->stack_entry);
ck_epoch_free(&record, destructor, &e->epoch_entry);
}
ck_pr_inc_uint(&e_barrier);
@ -129,15 +140,6 @@ thread(void *unused CK_CC_UNUSED)
return (NULL);
}
static void
destructor(ck_stack_entry_t *p)
{
struct node *e = stack_container(p);
free(e);
return;
}
int
main(int argc, char *argv[])
{
@ -156,7 +158,7 @@ main(int argc, char *argv[])
threads = malloc(sizeof(pthread_t) * n_threads);
ck_epoch_init(&stack_epoch, threshold, destructor);
ck_epoch_init(&stack_epoch, threshold);
for (i = 0; i < n_threads; i++)
pthread_create(threads + i, NULL, thread, NULL);

@ -61,12 +61,23 @@ static unsigned int readers;
struct node {
unsigned int value;
ck_stack_entry_t stack_entry;
ck_epoch_entry_t epoch_entry;
};
static ck_stack_t stack = CK_STACK_INITIALIZER;
static ck_epoch_t stack_epoch;
CK_STACK_CONTAINER(struct node, stack_entry, stack_container)
CK_EPOCH_CONTAINER(struct node, epoch_entry, epoch_container)
static struct affinity a;
static void
destructor(ck_epoch_entry_t *p)
{
struct node *e = epoch_container(p);
free(e);
return;
}
static void *
read_thread(void *unused CK_CC_UNUSED)
{
@ -171,7 +182,7 @@ thread(void *unused CK_CC_UNUSED)
ck_epoch_end(&record);
e = stack_container(s);
ck_epoch_free(&record, &e->stack_entry);
ck_epoch_free(&record, destructor, &e->epoch_entry);
}
}
@ -186,15 +197,6 @@ thread(void *unused CK_CC_UNUSED)
return (NULL);
}
static void
destructor(ck_stack_entry_t *p)
{
struct node *e = stack_container(p);
free(e);
return;
}
int
main(int argc, char *argv[])
{
@ -213,7 +215,7 @@ main(int argc, char *argv[])
threads = malloc(sizeof(pthread_t) * n_threads);
ck_epoch_init(&stack_epoch, threshold, destructor);
ck_epoch_init(&stack_epoch, threshold);
for (i = 0; i < n_threads - 1; i++)
pthread_create(threads + i, NULL, read_thread, NULL);

@ -5,7 +5,8 @@ OBJECTS=ck_hp.o \
ck_barrier_combining.o \
ck_barrier_dissemination.o \
ck_barrier_tournament.o \
ck_barrier_mcs.o
ck_barrier_mcs.o \
ck_epoch.o
all: libck.so libck.a
@ -15,6 +16,9 @@ libck.so: $(OBJECTS)
libck.a: $(OBJECTS)
ar rcs libck.a $(OBJECTS)
ck_epoch.o: ../include/ck_epoch.h ck_epoch.c
$(CC) $(CFLAGS) -c -o ck_epoch.o ck_epoch.c
ck_hp.o: ck_hp.c
$(CC) $(CFLAGS) -c -o ck_hp.o ck_hp.c

@ -0,0 +1,247 @@
/*
* Copyright 2011 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* The implementation here is inspired from the work described in:
* Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
* of Cambridge Computing Laboratory.
*/
#include <ck_cc.h>
#include <ck_epoch.h>
#include <ck_pr.h>
#include <ck_stack.h>
#include <stdbool.h>
enum {
CK_EPOCH_USED = 0,
CK_EPOCH_FREE = 1
};
CK_STACK_CONTAINER(struct ck_epoch_record, record_next, ck_epoch_record_container)
CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry, ck_epoch_entry_container)
void
ck_epoch_init(struct ck_epoch *global, unsigned int threshold)
{
ck_stack_init(&global->records);
global->epoch = 1;
global->n_free = 0;
global->threshold = threshold;
ck_pr_fence_store();
return;
}
struct ck_epoch_record *
ck_epoch_recycle(struct ck_epoch *global)
{
struct ck_epoch_record *record;
ck_stack_entry_t *cursor;
unsigned int status;
if (ck_pr_load_uint(&global->n_free) == 0)
return (NULL);
CK_STACK_FOREACH(&global->records, cursor) {
record = ck_epoch_record_container(cursor);
if (ck_pr_load_uint(&record->status) == CK_EPOCH_FREE) {
status = ck_pr_fas_uint(&record->status, CK_EPOCH_USED);
if (status == CK_EPOCH_FREE) {
ck_pr_dec_uint(&global->n_free);
return record;
}
}
}
return NULL;
}
void
ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record)
{
size_t i;
record->status = CK_EPOCH_USED;
record->active = 0;
record->epoch = 0;
record->delta = 0;
record->n_pending = 0;
record->n_peak = 0;
record->n_reclamations = 0;
record->global = global;
for (i = 0; i < CK_EPOCH_LENGTH; i++)
ck_stack_init(&record->pending[i]);
ck_pr_fence_store();
ck_stack_push_upmc(&global->records, &record->record_next);
return;
}
void
ck_epoch_unregister(struct ck_epoch_record *record)
{
size_t i;
record->status = CK_EPOCH_FREE;
record->active = 0;
record->epoch = 0;
record->delta = 0;
record->n_pending = 0;
record->n_peak = 0;
record->n_reclamations = 0;
for (i = 0; i < CK_EPOCH_LENGTH; i++)
ck_stack_init(&record->pending[i]);
ck_pr_inc_uint(&record->global->n_free);
ck_pr_fence_store();
return;
}
void
ck_epoch_tick(struct ck_epoch *global, struct ck_epoch_record *record)
{
struct ck_epoch_record *c_record;
ck_stack_entry_t *cursor;
unsigned int g_epoch = ck_pr_load_uint(&global->epoch);
g_epoch &= CK_EPOCH_LENGTH - 1;
CK_STACK_FOREACH(&global->records, cursor) {
c_record = ck_epoch_record_container(cursor);
if (ck_pr_load_uint(&c_record->status) == CK_EPOCH_FREE ||
c_record == record)
continue;
if (ck_pr_load_uint(&c_record->active) != 0 &&
ck_pr_load_uint(&c_record->epoch) != g_epoch)
return;
}
ck_pr_inc_uint(&global->epoch);
return;
}
bool
ck_epoch_reclaim(struct ck_epoch_record *record)
{
struct ck_epoch *global = record->global;
unsigned int g_epoch = ck_pr_load_uint(&global->epoch);
unsigned int epoch = record->epoch;
ck_stack_entry_t *next, *cursor;
g_epoch &= CK_EPOCH_LENGTH - 1;
if (epoch == g_epoch)
return false;
/*
* This means all threads with a potential reference to a
* hazard pointer will have a view as new as or newer than
* the calling thread. No active reference should exist to
* any object in the record's pending list.
*/
CK_STACK_FOREACH_SAFE(&record->pending[g_epoch], cursor, next) {
struct ck_epoch_entry *entry = ck_epoch_entry_container(cursor);
entry->destroy(entry);
record->n_pending--;
record->n_reclamations++;
}
ck_stack_init(&record->pending[g_epoch]);
record->epoch = g_epoch;
record->delta = 0;
return true;
}
void
ck_epoch_write_begin(struct ck_epoch_record *record)
{
struct ck_epoch *global = record->global;
ck_pr_store_uint(&record->active, record->active + 1);
ck_pr_fence_store();
/*
* In the case of recursive write sections, avoid ticking
* over global epoch.
*/
if (record->active > 1)
return;
for (;;) {
if (ck_epoch_reclaim(record) == true)
break;
if (++record->delta >= global->threshold) {
record->delta = 0;
ck_epoch_tick(global, record);
continue;
}
break;
}
return;
}
void
ck_epoch_free(struct ck_epoch_record *record,
ck_epoch_destructor_t destroy,
ck_epoch_entry_t *entry)
{
unsigned int epoch = ck_pr_load_uint(&record->epoch);
struct ck_epoch *global = record->global;
entry->destroy = destroy;
ck_stack_push_spnc(&record->pending[epoch], &entry->stack_entry);
record->n_pending += 1;
if (record->n_pending > record->n_peak)
record->n_peak = record->n_pending;
if (record->n_pending >= global->threshold && ck_epoch_reclaim(record) == false)
ck_epoch_tick(global, record);
return;
}
void
ck_epoch_purge(struct ck_epoch_record *record)
{
ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
while (record->n_pending > 0) {
ck_epoch_reclaim(record);
ck_epoch_tick(record->global, record);
if (record->n_pending > 0)
ck_backoff_gb(&backoff);
}
return;
}
Loading…
Cancel
Save