ck_epoch: If all threads are inactive, then we are at a grace period.

ck_pring
Samy Al Bahra 13 years ago
parent 5293ad2348
commit 7821be8a60

@ -215,10 +215,14 @@ ck_epoch_unregister(struct ck_epoch_record *record)
} }
static struct ck_epoch_record * static struct ck_epoch_record *
ck_epoch_scan(struct ck_epoch *global, struct ck_epoch_record *cr, unsigned int epoch) ck_epoch_scan(struct ck_epoch *global,
struct ck_epoch_record *cr,
unsigned int epoch,
bool *af)
{ {
ck_stack_entry_t *cursor; ck_stack_entry_t *cursor;
*af = false;
if (cr == NULL) { if (cr == NULL) {
cursor = CK_STACK_FIRST(&global->records); cursor = CK_STACK_FIRST(&global->records);
} else { } else {
@ -226,7 +230,7 @@ ck_epoch_scan(struct ck_epoch *global, struct ck_epoch_record *cr, unsigned int
} }
while (cursor != NULL) { while (cursor != NULL) {
unsigned int state; unsigned int state, active;
cr = ck_epoch_record_container(cursor); cr = ck_epoch_record_container(cursor);
@ -234,8 +238,10 @@ ck_epoch_scan(struct ck_epoch *global, struct ck_epoch_record *cr, unsigned int
if (state & CK_EPOCH_STATE_FREE) if (state & CK_EPOCH_STATE_FREE)
continue; continue;
if (ck_pr_load_uint(&cr->active) != 0 && active = ck_pr_load_uint(&cr->active);
ck_pr_load_uint(&cr->epoch) != epoch) *af |= active;
if (active != 0 && ck_pr_load_uint(&cr->epoch) != epoch)
return cr; return cr;
cursor = CK_STACK_NEXT(cursor); cursor = CK_STACK_NEXT(cursor);
@ -275,6 +281,7 @@ ck_epoch_barrier(struct ck_epoch *global, struct ck_epoch_record *record)
{ {
struct ck_epoch_record *cr; struct ck_epoch_record *cr;
unsigned int delta, epoch, goal, i; unsigned int delta, epoch, goal, i;
bool active;
/* /*
* Technically, we are vulnerable to an overflow in presence of multiple * Technically, we are vulnerable to an overflow in presence of multiple
@ -295,9 +302,16 @@ ck_epoch_barrier(struct ck_epoch *global, struct ck_epoch_record *record)
* Determine whether all threads have observed the current epoch. * Determine whether all threads have observed the current epoch.
* We can get away without a fence here. * We can get away without a fence here.
*/ */
while (cr = ck_epoch_scan(global, cr, delta), cr != NULL) while (cr = ck_epoch_scan(global, cr, delta, &active), cr != NULL)
ck_pr_stall(); ck_pr_stall();
/*
* If we have observed all threads as inactive, then we assume
* we are at a grace period.
*/
if (active == false)
goto dispatch;
/* /*
* Increment current epoch. CAS semantics are used to eliminate * Increment current epoch. CAS semantics are used to eliminate
* increment operations for synchronization that occurs for the * increment operations for synchronization that occurs for the
@ -329,15 +343,20 @@ ck_epoch_barrier(struct ck_epoch *global, struct ck_epoch_record *record)
* could exist to the snapshot of e observed at the time this * could exist to the snapshot of e observed at the time this
* function was called. * function was called.
*/ */
while (cr = ck_epoch_scan(global, cr, delta), cr != NULL) { while (cr = ck_epoch_scan(global, cr, delta, &active), cr != NULL) {
ck_pr_stall(); ck_pr_stall();
/* /*
* If the epoch value was changed from underneath us then * If the epoch value was changed from underneath us then
* our epoch must have been observed at some point. * our epoch must have been observed at some point.
*
* If all threads have gone inactive, we are also at a grace
* period as any reads succeeding this call to synchronize
* will imply a full memory barrier (logically deleted objects
* will not be visible).
*/ */
epoch = ck_pr_load_uint(&global->epoch); epoch = ck_pr_load_uint(&global->epoch);
if (epoch != delta) if ((epoch != delta) | (active == false))
break; break;
} }
@ -377,18 +396,29 @@ ck_epoch_synchronize(struct ck_epoch *global, struct ck_epoch_record *record)
bool bool
ck_epoch_poll(struct ck_epoch *global, struct ck_epoch_record *record) ck_epoch_poll(struct ck_epoch *global, struct ck_epoch_record *record)
{ {
bool active;
struct ck_epoch_record *cr = NULL;
unsigned int epoch = ck_pr_load_uint(&global->epoch); unsigned int epoch = ck_pr_load_uint(&global->epoch);
unsigned int snapshot; unsigned int snapshot;
struct ck_epoch_record *cr = NULL;
/* Serialize record epoch snapshots with respect to global epoch load. */ /* Serialize record epoch snapshots with respect to global epoch load. */
ck_pr_fence_memory(); ck_pr_fence_memory();
cr = ck_epoch_scan(global, cr, epoch); cr = ck_epoch_scan(global, cr, epoch, &active);
if (cr != NULL) { if (cr != NULL) {
record->epoch = epoch; record->epoch = epoch;
return false; return false;
} }
/* We are at a grace period if all threads are inactive. */
if (active == false) {
record->epoch = epoch;
for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
ck_epoch_dispatch(record, epoch);
return true;
}
/* If an active thread exists, rely on epoch observation. */
if (ck_pr_cas_uint_value(&global->epoch, epoch, epoch + 1, &snapshot) == false) { if (ck_pr_cas_uint_value(&global->epoch, epoch, epoch + 1, &snapshot) == false) {
record->epoch = snapshot; record->epoch = snapshot;
} else { } else {

Loading…
Cancel
Save