ck_epoch: Change to epoch semantics, bump grace period to 4.

ck_pring
Samy Al Bahra 12 years ago
parent b145b513de
commit 57104fcde7

@ -38,13 +38,15 @@ typedef struct ck_epoch_entry ck_epoch_entry_t;
typedef void ck_epoch_cb_t(ck_epoch_entry_t *);
.Ft void
.Fn ck_epoch_call "ck_epoch_record_t *record" "ck_epoch_entry_t *entry" "ck_epoch_cb_t *function"
.Fn ck_epoch_call "ck_epoch_t *epoch" "ck_epoch_record_t *record" "ck_epoch_entry_t *entry" "ck_epoch_cb_t *function"
.Sh DESCRIPTION
The
.Fn ck_epoch_call 3
function will defer the execution of the function pointed to by
.Fa function
until a grace-period has been detected. The function will be provided
until a grace-period has been detected in
.Fa epoch .
The function will be provided
the pointer specified by
.Fa entry .
The function will execute at some time in the future via calls to

@ -121,11 +121,13 @@ ck_epoch_end(ck_epoch_t *global, ck_epoch_record_t *record)
* non-blocking deferral.
*/
CK_CC_INLINE static void
ck_epoch_call(ck_epoch_record_t *record,
ck_epoch_call(ck_epoch_t *epoch,
ck_epoch_record_t *record,
ck_epoch_entry_t *entry,
ck_epoch_cb_t *function)
{
unsigned int offset = record->epoch & (CK_EPOCH_LENGTH - 1);
unsigned int e = ck_pr_load_uint(&epoch->epoch);
unsigned int offset = e & (CK_EPOCH_LENGTH - 1);
record->n_pending++;
entry->function = function;
@ -138,7 +140,6 @@ ck_epoch_record_t *ck_epoch_recycle(ck_epoch_t *);
void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *);
void ck_epoch_unregister(ck_epoch_record_t *);
bool ck_epoch_poll(ck_epoch_t *, ck_epoch_record_t *);
void ck_epoch_call(ck_epoch_record_t *, ck_epoch_entry_t *, ck_epoch_cb_t *);
void ck_epoch_synchronize(ck_epoch_t *, ck_epoch_record_t *);
void ck_epoch_barrier(ck_epoch_t *, ck_epoch_record_t *);

@ -183,7 +183,7 @@ thread(void *unused CK_CC_UNUSED)
s = ck_stack_pop_upmc(&stack);
e = stack_container(s);
ck_epoch_call(&record, &e->epoch_entry, destructor);
ck_epoch_call(&stack_epoch, &record, &e->epoch_entry, destructor);
if (i % 1024)
ck_epoch_poll(&stack_epoch, &record);

@ -194,7 +194,7 @@ thread(void *unused CK_CC_UNUSED)
ck_epoch_synchronize(&stack_epoch, &record);
if (i & 1) {
ck_epoch_call(&record, &e->epoch_entry, destructor);
ck_epoch_call(&stack_epoch, &record, &e->epoch_entry, destructor);
} else {
destructor(&e->epoch_entry);
}

@ -112,7 +112,7 @@ thread(void *unused CK_CC_UNUSED)
ck_epoch_end(&stack_epoch, &record);
e = stack_container(s);
ck_epoch_call(&record, &e->epoch_entry, destructor);
ck_epoch_call(&stack_epoch, &record, &e->epoch_entry, destructor);
smr += ck_epoch_poll(&stack_epoch, &record) == false;
}

@ -128,7 +128,7 @@
* this is to not apply modulo arithmetic to e_g but only to deferral list
* indexing.
*/
#define CK_EPOCH_GRACE 3U
#define CK_EPOCH_GRACE 4U
enum {
CK_EPOCH_STATE_USED = 0,
@ -338,6 +338,16 @@ ck_epoch_synchronize(struct ck_epoch *global, struct ck_epoch_record *record)
return;
}
/*
* It may be woeth it to actually apply these deferral semantics to an epoch
* that was observed at ck_epoch_call time. The problem is that the latter would
* require a full fence.
*
* ck_epoch_call will dispatch to the latest epoch snapshot that was observed.
* There are cases where it will fail to reclaim as early as it could. If this
* becomes a problem, we could actually use a heap for epoch buckets but that
* is far from ideal too.
*/
bool
ck_epoch_poll(struct ck_epoch *global, struct ck_epoch_record *record)
{
@ -346,6 +356,7 @@ ck_epoch_poll(struct ck_epoch *global, struct ck_epoch_record *record)
struct ck_epoch_record *cr = NULL;
/* Serialize record epoch snapshots with respect to global epoch load. */
record->epoch = epoch;
ck_pr_fence_memory();
cr = ck_epoch_scan(global, cr, epoch);
if (cr != NULL)
@ -353,7 +364,6 @@ ck_epoch_poll(struct ck_epoch *global, struct ck_epoch_record *record)
ck_pr_cas_uint_value(&global->epoch, epoch, epoch + 1, &snapshot);
ck_epoch_dispatch(record, epoch + 1);
record->epoch = snapshot;
return true;
}

Loading…
Cancel
Save