epoch: Fix fat-finger that breaks epoch sections.

And this is why tests should also be validated against
fault injection.
ck_pring
Samy Al Bahra 9 years ago
parent ff23c2d3fd
commit 80a9b6ff9c

@ -47,7 +47,7 @@
* This is used for sense detection with-respect to concurrent * This is used for sense detection with-respect to concurrent
* epoch sections. * epoch sections.
*/ */
#define CK_EPOCH_SENSE 2 #define CK_EPOCH_SENSE (2)
struct ck_epoch_entry; struct ck_epoch_entry;
typedef struct ck_epoch_entry ck_epoch_entry_t; typedef struct ck_epoch_entry ck_epoch_entry_t;

@ -137,6 +137,8 @@ CK_STACK_CONTAINER(struct ck_epoch_record, record_next,
CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry, CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
ck_epoch_entry_container) ck_epoch_entry_container)
#define CK_EPOCH_SENSE_MASK (CK_EPOCH_SENSE - 1)
void void
_ck_epoch_delref(struct ck_epoch_record *record, _ck_epoch_delref(struct ck_epoch_record *record,
struct ck_epoch_section *section) struct ck_epoch_section *section)
@ -159,7 +161,8 @@ _ck_epoch_delref(struct ck_epoch_record *record,
if (current->count == 0) { if (current->count == 0) {
struct ck_epoch_ref *other; struct ck_epoch_ref *other;
other = &record->local.bucket[(i + 1) & CK_EPOCH_SENSE]; other = &record->local.bucket[(i + 1) &
CK_EPOCH_SENSE_MASK];
if (other->count > 0 && if (other->count > 0 &&
((int)(current->epoch - other->epoch) < 0 || ((int)(current->epoch - other->epoch) < 0 ||
(current->epoch - other->epoch) > 1)) { (current->epoch - other->epoch) > 1)) {
@ -183,7 +186,7 @@ _ck_epoch_addref(struct ck_epoch_record *record,
unsigned int epoch, i; unsigned int epoch, i;
epoch = ck_pr_load_uint(&global->epoch); epoch = ck_pr_load_uint(&global->epoch);
i = epoch & (CK_EPOCH_SENSE - 1); i = epoch & CK_EPOCH_SENSE_MASK;
ref = &record->local.bucket[i]; ref = &record->local.bucket[i];
if (ref->count++ == 0) { if (ref->count++ == 0) {
@ -200,7 +203,8 @@ _ck_epoch_addref(struct ck_epoch_record *record,
* and load-{store, load} ordering are sufficient to guarantee * and load-{store, load} ordering are sufficient to guarantee
* this ordering. * this ordering.
*/ */
previous = &record->local.bucket[(i + 1) & CK_EPOCH_SENSE]; previous = &record->local.bucket[(i + 1) &
CK_EPOCH_SENSE_MASK];
if (previous->count > 0) if (previous->count > 0)
ck_pr_fence_acqrel(); ck_pr_fence_acqrel();
#endif /* !CK_MD_TSO */ #endif /* !CK_MD_TSO */

Loading…
Cancel
Save