From 57104fcde7a1c359dff94c946e31288610854b13 Mon Sep 17 00:00:00 2001 From: Samy Al Bahra Date: Mon, 3 Sep 2012 10:21:03 -0400 Subject: [PATCH] ck_epoch: Change to epoch semantics, bump grace period to 4. --- doc/ck_epoch_call | 6 ++++-- include/ck_epoch.h | 7 ++++--- regressions/ck_epoch/validate/ck_epoch_poll.c | 2 +- .../ck_epoch/validate/ck_epoch_synchronize.c | 2 +- regressions/ck_epoch/validate/ck_stack.c | 2 +- src/ck_epoch.c | 14 ++++++++++++-- 6 files changed, 23 insertions(+), 10 deletions(-) diff --git a/doc/ck_epoch_call b/doc/ck_epoch_call index 1982cb2..cc4c243 100644 --- a/doc/ck_epoch_call +++ b/doc/ck_epoch_call @@ -38,13 +38,15 @@ typedef struct ck_epoch_entry ck_epoch_entry_t; typedef void ck_epoch_cb_t(ck_epoch_entry_t *); .Ft void -.Fn ck_epoch_call "ck_epoch_record_t *record" "ck_epoch_entry_t *entry" "ck_epoch_cb_t *function" +.Fn ck_epoch_call "ck_epoch_t *epoch" "ck_epoch_record_t *record" "ck_epoch_entry_t *entry" "ck_epoch_cb_t *function" .Sh DESCRIPTION The .Fn ck_epoch_call 3 function will defer the execution of the function pointed to by .Fa function -until a grace-period has been detected. The function will be provided +until a grace-period has been detected in +.Fa epoch . +The function will be provided the pointer specified by .Fa entry . The function will execute at some time in the future via calls to diff --git a/include/ck_epoch.h b/include/ck_epoch.h index fcf5592..ce4af9f 100644 --- a/include/ck_epoch.h +++ b/include/ck_epoch.h @@ -121,11 +121,13 @@ ck_epoch_end(ck_epoch_t *global, ck_epoch_record_t *record) * non-blocking deferral. */ CK_CC_INLINE static void -ck_epoch_call(ck_epoch_record_t *record, +ck_epoch_call(ck_epoch_t *epoch, + ck_epoch_record_t *record, ck_epoch_entry_t *entry, ck_epoch_cb_t *function) { - unsigned int offset = record->epoch & (CK_EPOCH_LENGTH - 1); + unsigned int e = ck_pr_load_uint(&epoch->epoch); + unsigned int offset = e & (CK_EPOCH_LENGTH - 1); record->n_pending++; entry->function = function; @@ -138,7 +140,6 @@ ck_epoch_record_t *ck_epoch_recycle(ck_epoch_t *); void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *); void ck_epoch_unregister(ck_epoch_record_t *); bool ck_epoch_poll(ck_epoch_t *, ck_epoch_record_t *); -void ck_epoch_call(ck_epoch_record_t *, ck_epoch_entry_t *, ck_epoch_cb_t *); void ck_epoch_synchronize(ck_epoch_t *, ck_epoch_record_t *); void ck_epoch_barrier(ck_epoch_t *, ck_epoch_record_t *); diff --git a/regressions/ck_epoch/validate/ck_epoch_poll.c b/regressions/ck_epoch/validate/ck_epoch_poll.c index b198eaa..2cfa91e 100644 --- a/regressions/ck_epoch/validate/ck_epoch_poll.c +++ b/regressions/ck_epoch/validate/ck_epoch_poll.c @@ -183,7 +183,7 @@ thread(void *unused CK_CC_UNUSED) s = ck_stack_pop_upmc(&stack); e = stack_container(s); - ck_epoch_call(&record, &e->epoch_entry, destructor); + ck_epoch_call(&stack_epoch, &record, &e->epoch_entry, destructor); if (i % 1024) ck_epoch_poll(&stack_epoch, &record); diff --git a/regressions/ck_epoch/validate/ck_epoch_synchronize.c b/regressions/ck_epoch/validate/ck_epoch_synchronize.c index 4c13b91..7840707 100644 --- a/regressions/ck_epoch/validate/ck_epoch_synchronize.c +++ b/regressions/ck_epoch/validate/ck_epoch_synchronize.c @@ -194,7 +194,7 @@ thread(void *unused CK_CC_UNUSED) ck_epoch_synchronize(&stack_epoch, &record); if (i & 1) { - ck_epoch_call(&record, &e->epoch_entry, destructor); + ck_epoch_call(&stack_epoch, &record, &e->epoch_entry, destructor); } else { destructor(&e->epoch_entry); } diff --git a/regressions/ck_epoch/validate/ck_stack.c b/regressions/ck_epoch/validate/ck_stack.c index 45777fb..5a8c740 100644 --- a/regressions/ck_epoch/validate/ck_stack.c +++ b/regressions/ck_epoch/validate/ck_stack.c @@ -112,7 +112,7 @@ thread(void *unused CK_CC_UNUSED) ck_epoch_end(&stack_epoch, &record); e = stack_container(s); - ck_epoch_call(&record, &e->epoch_entry, destructor); + ck_epoch_call(&stack_epoch, &record, &e->epoch_entry, destructor); smr += ck_epoch_poll(&stack_epoch, &record) == false; } diff --git a/src/ck_epoch.c b/src/ck_epoch.c index 6ab71b8..13da8cc 100644 --- a/src/ck_epoch.c +++ b/src/ck_epoch.c @@ -128,7 +128,7 @@ * this is to not apply modulo arithmetic to e_g but only to deferral list * indexing. */ -#define CK_EPOCH_GRACE 3U +#define CK_EPOCH_GRACE 4U enum { CK_EPOCH_STATE_USED = 0, @@ -338,6 +338,16 @@ ck_epoch_synchronize(struct ck_epoch *global, struct ck_epoch_record *record) return; } +/* + * It may be woeth it to actually apply these deferral semantics to an epoch + * that was observed at ck_epoch_call time. The problem is that the latter would + * require a full fence. + * + * ck_epoch_call will dispatch to the latest epoch snapshot that was observed. + * There are cases where it will fail to reclaim as early as it could. If this + * becomes a problem, we could actually use a heap for epoch buckets but that + * is far from ideal too. + */ bool ck_epoch_poll(struct ck_epoch *global, struct ck_epoch_record *record) { @@ -346,6 +356,7 @@ ck_epoch_poll(struct ck_epoch *global, struct ck_epoch_record *record) struct ck_epoch_record *cr = NULL; /* Serialize record epoch snapshots with respect to global epoch load. */ + record->epoch = epoch; ck_pr_fence_memory(); cr = ck_epoch_scan(global, cr, epoch); if (cr != NULL) @@ -353,7 +364,6 @@ ck_epoch_poll(struct ck_epoch *global, struct ck_epoch_record *record) ck_pr_cas_uint_value(&global->epoch, epoch, epoch + 1, &snapshot); ck_epoch_dispatch(record, epoch + 1); - record->epoch = snapshot; return true; }