ck_epoch: Barrier placement for RMO.

ck_pring
Samy Al Bahra 12 years ago
parent c274e8bc54
commit 98d902b024

@ -80,6 +80,9 @@ struct ck_epoch {
}; };
typedef struct ck_epoch ck_epoch_t; typedef struct ck_epoch ck_epoch_t;
/*
* Marks the beginning of an epoch-protected section.
*/
CK_CC_INLINE static void CK_CC_INLINE static void
ck_epoch_begin(ck_epoch_t *epoch, ck_epoch_record_t *record) ck_epoch_begin(ck_epoch_t *epoch, ck_epoch_record_t *record)
{ {
@ -98,6 +101,9 @@ ck_epoch_begin(ck_epoch_t *epoch, ck_epoch_record_t *record)
return; return;
} }
/*
* Marks the end of an epoch-protected section.
*/
CK_CC_INLINE static void CK_CC_INLINE static void
ck_epoch_end(ck_epoch_t *global, ck_epoch_record_t *record) ck_epoch_end(ck_epoch_t *global, ck_epoch_record_t *record)
{ {

@ -276,17 +276,25 @@ ck_epoch_barrier(struct ck_epoch *global, struct ck_epoch_record *record)
struct ck_epoch_record *cr; struct ck_epoch_record *cr;
unsigned int delta, epoch, goal, i; unsigned int delta, epoch, goal, i;
/*
* Technically, we are vulnerable to an overflow in presence of multiple
* writers. Realistically, this will require 2^32 scans. You can use
* epoch-protected sections on the writer-side if this is a concern.
*/
delta = epoch = ck_pr_load_uint(&global->epoch);
goal = epoch + CK_EPOCH_GRACE;
/* /*
* Guarantee any mutations previous to the barrier will be made visible * Guarantee any mutations previous to the barrier will be made visible
* with respect to epoch snapshots we will read. * with respect to epoch snapshots we will read.
*/ */
ck_pr_fence_memory(); ck_pr_fence_memory();
delta = epoch = ck_pr_load_uint(&global->epoch);
goal = epoch + CK_EPOCH_GRACE;
for (i = 0, cr = NULL; i < CK_EPOCH_GRACE; cr = NULL, i++) { for (i = 0, cr = NULL; i < CK_EPOCH_GRACE; cr = NULL, i++) {
/* Determine whether all threads have observed the current epoch. */ /*
* Determine whether all threads have observed the current epoch.
* We can get away without a fence here.
*/
while (cr = ck_epoch_scan(global, cr, delta), cr != NULL) while (cr = ck_epoch_scan(global, cr, delta), cr != NULL)
ck_pr_stall(); ck_pr_stall();
@ -337,6 +345,8 @@ ck_epoch_poll(struct ck_epoch *global, struct ck_epoch_record *record)
unsigned int snapshot; unsigned int snapshot;
struct ck_epoch_record *cr = NULL; struct ck_epoch_record *cr = NULL;
/* Serialize record epoch snapshots with respect to global epoch load. */
ck_pr_fence_memory();
cr = ck_epoch_scan(global, cr, epoch); cr = ck_epoch_scan(global, cr, epoch);
if (cr != NULL) if (cr != NULL)
return false; return false;

Loading…
Cancel
Save