ck_epoch: Cache associated epoch state machine in record.

This ends up removing arguments (and vector for error) for several
functions on the common path of ck_epoch.
ck_pring
Samy Al Bahra 9 years ago
parent f1302c4920
commit 0d6d384f3f

@ -62,6 +62,7 @@ struct ck_epoch_entry {
#define CK_EPOCH_CONTAINER(T, M, N) CK_CC_CONTAINER(struct ck_epoch_entry, T, M, N)
struct ck_epoch_record {
struct ck_epoch *global;
unsigned int state;
unsigned int epoch;
unsigned int active;
@ -85,8 +86,9 @@ typedef struct ck_epoch ck_epoch_t;
* Marks the beginning of an epoch-protected section.
*/
CK_CC_INLINE static void
ck_epoch_begin(ck_epoch_t *epoch, ck_epoch_record_t *record)
ck_epoch_begin(ck_epoch_record_t *record)
{
struct ck_epoch *epoch = record->global;
/*
* Only observe new epoch if thread is not recursing into a read
@ -121,11 +123,9 @@ ck_epoch_begin(ck_epoch_t *epoch, ck_epoch_record_t *record)
* Marks the end of an epoch-protected section.
*/
CK_CC_INLINE static void
ck_epoch_end(ck_epoch_t *global, ck_epoch_record_t *record)
ck_epoch_end(ck_epoch_record_t *record)
{
(void)global;
ck_pr_fence_release();
ck_pr_store_uint(&record->active, record->active - 1);
return;
@ -137,11 +137,11 @@ ck_epoch_end(ck_epoch_t *global, ck_epoch_record_t *record)
* non-blocking deferral.
*/
CK_CC_INLINE static void
ck_epoch_call(ck_epoch_t *epoch,
ck_epoch_record_t *record,
ck_epoch_call(ck_epoch_record_t *record,
ck_epoch_entry_t *entry,
ck_epoch_cb_t *function)
{
struct ck_epoch *epoch = record->global;
unsigned int e = ck_pr_load_uint(&epoch->epoch);
unsigned int offset = e & (CK_EPOCH_LENGTH - 1);
@ -154,10 +154,10 @@ ck_epoch_call(ck_epoch_t *epoch,
void ck_epoch_init(ck_epoch_t *);
ck_epoch_record_t *ck_epoch_recycle(ck_epoch_t *);
void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *);
void ck_epoch_unregister(ck_epoch_t *, ck_epoch_record_t *);
bool ck_epoch_poll(ck_epoch_t *, ck_epoch_record_t *);
void ck_epoch_synchronize(ck_epoch_t *, ck_epoch_record_t *);
void ck_epoch_barrier(ck_epoch_t *, ck_epoch_record_t *);
void ck_epoch_unregister(ck_epoch_record_t *);
bool ck_epoch_poll(ck_epoch_record_t *);
void ck_epoch_synchronize(ck_epoch_record_t *);
void ck_epoch_barrier(ck_epoch_record_t *);
void ck_epoch_reclaim(ck_epoch_record_t *);
#endif /* CK_EPOCH_H */

@ -38,7 +38,7 @@ cb(ck_epoch_entry_t *p)
{
if (counter == 0)
ck_epoch_call(&epoch, &record[1], p, cb);
ck_epoch_call(&record[1], p, cb);
printf("Counter value: %u -> %u\n",
counter, counter + 1);
@ -54,9 +54,9 @@ main(void)
ck_epoch_register(&epoch, &record[0]);
ck_epoch_register(&epoch, &record[1]);
ck_epoch_call(&epoch, &record[1], &entry, cb);
ck_epoch_barrier(&epoch, &record[1]);
ck_epoch_barrier(&epoch, &record[1]);
ck_epoch_call(&record[1], &entry, cb);
ck_epoch_barrier(&record[1]);
ck_epoch_barrier(&record[1]);
if (counter != 2)
ck_error("Expected counter value 2, read %u.\n", counter);

@ -108,7 +108,7 @@ read_thread(void *unused CK_CC_UNUSED)
j = 0;
for (;;) {
ck_epoch_begin(&stack_epoch, &record);
ck_epoch_begin(&record);
CK_STACK_FOREACH(&stack, cursor) {
if (cursor == NULL)
continue;
@ -116,7 +116,7 @@ read_thread(void *unused CK_CC_UNUSED)
n = CK_STACK_NEXT(cursor);
j += ck_pr_load_ptr(&n) != NULL;
}
ck_epoch_end(&stack_epoch, &record);
ck_epoch_end(&record);
if (j != 0 && ck_pr_load_uint(&readers) == 0)
ck_pr_store_uint(&readers, 1);
@ -178,17 +178,17 @@ write_thread(void *unused CK_CC_UNUSED)
}
for (i = 0; i < PAIRS_S; i++) {
ck_epoch_begin(&stack_epoch, &record);
ck_epoch_begin(&record);
s = ck_stack_pop_upmc(&stack);
e = stack_container(s);
ck_epoch_end(&stack_epoch, &record);
ck_epoch_end(&record);
ck_epoch_call(&stack_epoch, &record, &e->epoch_entry, destructor);
ck_epoch_poll(&stack_epoch, &record);
ck_epoch_call(&record, &e->epoch_entry, destructor);
ck_epoch_poll(&record);
}
}
ck_epoch_barrier(&stack_epoch, &record);
ck_epoch_barrier(&record);
if (tid == 0) {
fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] Peak: %u (%2.2f%%)\n Reclamations: %lu\n\n",

@ -109,7 +109,7 @@ read_thread(void *unused CK_CC_UNUSED)
j = 0;
for (;;) {
ck_epoch_begin(&stack_epoch, &record);
ck_epoch_begin(&record);
CK_STACK_FOREACH(&stack, cursor) {
if (cursor == NULL)
continue;
@ -117,7 +117,7 @@ read_thread(void *unused CK_CC_UNUSED)
n = CK_STACK_NEXT(cursor);
j += ck_pr_load_ptr(&n) != NULL;
}
ck_epoch_end(&stack_epoch, &record);
ck_epoch_end(&record);
if (j != 0 && ck_pr_load_uint(&readers) == 0)
ck_pr_store_uint(&readers, 1);
@ -179,20 +179,20 @@ write_thread(void *unused CK_CC_UNUSED)
}
for (i = 0; i < PAIRS_S; i++) {
ck_epoch_begin(&stack_epoch, &record);
ck_epoch_begin(&record);
s = ck_stack_pop_upmc(&stack);
e = stack_container(s);
ck_epoch_end(&stack_epoch, &record);
ck_epoch_end(&record);
if (i & 1) {
ck_epoch_synchronize(&stack_epoch, &record);
ck_epoch_synchronize(&record);
ck_epoch_reclaim(&record);
} else {
ck_epoch_barrier(&stack_epoch, &record);
ck_epoch_barrier(&record);
}
if (i & 1) {
ck_epoch_call(&stack_epoch, &record, &e->epoch_entry, destructor);
ck_epoch_call(&record, &e->epoch_entry, destructor);
} else {
if (tid == 0 && i % 8192)
fprintf(stderr, "\b%c", animate[i % strlen(animate)]);
@ -202,7 +202,7 @@ write_thread(void *unused CK_CC_UNUSED)
}
}
ck_epoch_synchronize(&stack_epoch, &record);
ck_epoch_synchronize(&record);
if (tid == 0) {
fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] Peak: %u (%2.2f%%)\n Reclamations: %lu\n\n",

@ -104,14 +104,14 @@ thread(void *unused CK_CC_UNUSED)
while (ck_pr_load_uint(&barrier) < n_threads);
for (i = 0; i < PAIRS; i++) {
ck_epoch_begin(&stack_epoch, &record);
ck_epoch_begin(&record);
ck_stack_push_upmc(&stack, &entry[i]->stack_entry);
s = ck_stack_pop_upmc(&stack);
ck_epoch_end(&stack_epoch, &record);
ck_epoch_end(&record);
e = stack_container(s);
ck_epoch_call(&stack_epoch, &record, &e->epoch_entry, destructor);
smr += ck_epoch_poll(&stack_epoch, &record) == false;
ck_epoch_call(&record, &e->epoch_entry, destructor);
smr += ck_epoch_poll(&record) == false;
}
ck_pr_inc_uint(&e_barrier);
@ -124,7 +124,7 @@ thread(void *unused CK_CC_UNUSED)
record.n_pending,
record.n_dispatch);
ck_epoch_barrier(&stack_epoch, &record);
ck_epoch_barrier(&record);
ck_pr_inc_uint(&e_barrier);
while (ck_pr_load_uint(&e_barrier) < (n_threads << 1));

@ -124,7 +124,7 @@ hs_free(void *p, size_t b, bool r)
if (r == true) {
/* Destruction requires safe memory reclamation. */
ck_epoch_call(&epoch_hs, &epoch_wr, &(--e)->epoch_entry, hs_destroy);
ck_epoch_call(&epoch_wr, &(--e)->epoch_entry, hs_destroy);
} else {
free(--e);
}
@ -237,7 +237,7 @@ reader(void *unused)
ck_epoch_register(&epoch_hs, &epoch_record);
for (;;) {
j++;
ck_epoch_begin(&epoch_hs, &epoch_record);
ck_epoch_begin(&epoch_record);
s = rdtsc();
for (i = 0; i < keys_length; i++) {
char *r;
@ -257,7 +257,7 @@ reader(void *unused)
ck_error("ERROR: Found invalid value: [%s] but expected [%s]\n", (char *)r, keys[i]);
}
a += rdtsc() - s;
ck_epoch_end(&epoch_hs, &epoch_record);
ck_epoch_end(&epoch_record);
n_state = ck_pr_load_int(&state);
if (n_state != state_previous) {
@ -452,7 +452,7 @@ main(int argc, char *argv[])
fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
ck_epoch_record_t epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_hs, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
"%u pending, %u peak, %lu reclamations\n\n",
@ -503,7 +503,7 @@ main(int argc, char *argv[])
while (ck_pr_load_int(&barrier[HS_STATE_STRICT_REPLACEMENT]) != n_threads)
ck_pr_stall();
set_reset();
ck_epoch_synchronize(&epoch_hs, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
a / (repeated * keys_length), acc(HS_STATE_STRICT_REPLACEMENT) / n_threads);
@ -538,7 +538,7 @@ main(int argc, char *argv[])
ck_pr_stall();
set_reset();
ck_epoch_synchronize(&epoch_hs, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
a / (repeated * keys_length), acc(HS_STATE_DELETION) / n_threads);
@ -585,13 +585,13 @@ main(int argc, char *argv[])
while (ck_pr_load_int(&barrier[HS_STATE_REPLACEMENT]) != n_threads)
ck_pr_stall();
set_reset();
ck_epoch_synchronize(&epoch_hs, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
a / (repeated * keys_length), acc(HS_STATE_REPLACEMENT) / n_threads);
ck_pr_inc_int(&barrier[HS_STATE_REPLACEMENT]);
epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_hs, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
"%u pending, %u peak, %lu reclamations\n\n",

@ -109,7 +109,7 @@ ht_free(void *p, size_t b, bool r)
if (r == true) {
/* Destruction requires safe memory reclamation. */
ck_epoch_call(&epoch_ht, &epoch_wr, &(--e)->epoch_entry, ht_destroy);
ck_epoch_call(&epoch_wr, &(--e)->epoch_entry, ht_destroy);
} else {
free(--e);
}
@ -224,7 +224,7 @@ reader(void *unused)
ck_epoch_register(&epoch_ht, &epoch_record);
for (;;) {
j++;
ck_epoch_begin(&epoch_ht, &epoch_record);
ck_epoch_begin(&epoch_record);
s = rdtsc();
for (i = 0; i < keys_length; i++) {
char *r;
@ -242,7 +242,7 @@ reader(void *unused)
ck_error("ERROR: Found invalid value: [%s] but expected [%s]\n", r, keys[i]);
}
a += rdtsc() - s;
ck_epoch_end(&epoch_ht, &epoch_record);
ck_epoch_end(&epoch_record);
n_state = ck_pr_load_int(&state);
if (n_state != state_previous) {
@ -424,7 +424,7 @@ main(int argc, char *argv[])
fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
ck_epoch_record_t epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_ht, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
"%u pending, %u peak, %lu reclamations\n\n",
@ -469,7 +469,7 @@ main(int argc, char *argv[])
while (ck_pr_load_int(&barrier[HT_STATE_STRICT_REPLACEMENT]) != n_threads)
ck_pr_stall();
table_reset();
ck_epoch_synchronize(&epoch_ht, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
a / (repeated * keys_length), accumulator[HT_STATE_STRICT_REPLACEMENT] / n_threads);
@ -504,7 +504,7 @@ main(int argc, char *argv[])
ck_pr_stall();
table_reset();
ck_epoch_synchronize(&epoch_ht, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
a / (repeated * keys_length), accumulator[HT_STATE_DELETION] / n_threads);
@ -543,13 +543,13 @@ main(int argc, char *argv[])
while (ck_pr_load_int(&barrier[HT_STATE_REPLACEMENT]) != n_threads)
ck_pr_stall();
table_reset();
ck_epoch_synchronize(&epoch_ht, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
a / (repeated * keys_length), accumulator[HT_STATE_REPLACEMENT] / n_threads);
ck_pr_inc_int(&barrier[HT_STATE_REPLACEMENT]);
epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_ht, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
"%u pending, %u peak, %lu reclamations\n\n",

@ -107,7 +107,7 @@ ht_free(void *p, size_t b, bool r)
if (r == true) {
/* Destruction requires safe memory reclamation. */
ck_epoch_call(&epoch_ht, &epoch_wr, &(--e)->epoch_entry, ht_destroy);
ck_epoch_call(&epoch_wr, &(--e)->epoch_entry, ht_destroy);
} else {
free(--e);
}
@ -224,7 +224,7 @@ ht_reader(void *unused)
ck_epoch_register(&epoch_ht, &epoch_record);
for (;;) {
j++;
ck_epoch_begin(&epoch_ht, &epoch_record);
ck_epoch_begin(&epoch_record);
s = rdtsc();
for (i = 0; i < keys_length; i++) {
uintptr_t r;
@ -243,7 +243,7 @@ ht_reader(void *unused)
(uintmax_t)r);
}
a += rdtsc() - s;
ck_epoch_end(&epoch_ht, &epoch_record);
ck_epoch_end(&epoch_record);
n_state = ck_pr_load_int(&state);
if (n_state != state_previous) {
@ -410,7 +410,7 @@ main(int argc, char *argv[])
fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
ck_epoch_record_t epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_ht, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
"%u pending, %u peak, %lu reclamations\n\n",
@ -455,7 +455,7 @@ main(int argc, char *argv[])
while (ck_pr_load_int(&barrier[HT_STATE_STRICT_REPLACEMENT]) != n_threads)
ck_pr_stall();
table_reset();
ck_epoch_synchronize(&epoch_ht, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
a / (repeated * keys_length), accumulator[HT_STATE_STRICT_REPLACEMENT] / n_threads);
@ -490,7 +490,7 @@ main(int argc, char *argv[])
ck_pr_stall();
table_reset();
ck_epoch_synchronize(&epoch_ht, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
a / (repeated * keys_length), accumulator[HT_STATE_DELETION] / n_threads);
@ -529,13 +529,13 @@ main(int argc, char *argv[])
while (ck_pr_load_int(&barrier[HT_STATE_REPLACEMENT]) != n_threads)
ck_pr_stall();
table_reset();
ck_epoch_synchronize(&epoch_ht, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
a / (repeated * keys_length), accumulator[HT_STATE_REPLACEMENT] / n_threads);
ck_pr_inc_int(&barrier[HT_STATE_REPLACEMENT]);
epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_ht, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
"%u pending, %u peak, %lu reclamations\n\n",

@ -124,7 +124,7 @@ hs_free(void *p, size_t b, bool r)
if (r == true) {
/* Destruction requires safe memory reclamation. */
ck_epoch_call(&epoch_hs, &epoch_wr, &(--e)->epoch_entry, hs_destroy);
ck_epoch_call(&epoch_wr, &(--e)->epoch_entry, hs_destroy);
} else {
free(--e);
}
@ -234,7 +234,7 @@ reader(void *unused)
ck_epoch_register(&epoch_hs, &epoch_record);
for (;;) {
j++;
ck_epoch_begin(&epoch_hs, &epoch_record);
ck_epoch_begin(&epoch_record);
s = rdtsc();
for (i = 0; i < keys_length; i++) {
char *r;
@ -254,7 +254,7 @@ reader(void *unused)
ck_error("ERROR: Found invalid value: [%s] but expected [%s]\n", (char *)r, keys[i]);
}
a += rdtsc() - s;
ck_epoch_end(&epoch_hs, &epoch_record);
ck_epoch_end(&epoch_record);
n_state = ck_pr_load_int(&state);
if (n_state != state_previous) {
@ -449,7 +449,7 @@ main(int argc, char *argv[])
fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
ck_epoch_record_t epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_hs, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
"%u pending, %u peak, %lu reclamations\n\n",
@ -500,7 +500,7 @@ main(int argc, char *argv[])
while (ck_pr_load_int(&barrier[HS_STATE_STRICT_REPLACEMENT]) != n_threads)
ck_pr_stall();
set_reset();
ck_epoch_synchronize(&epoch_hs, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
a / (repeated * keys_length), acc(HS_STATE_STRICT_REPLACEMENT) / n_threads);
@ -535,7 +535,7 @@ main(int argc, char *argv[])
ck_pr_stall();
set_reset();
ck_epoch_synchronize(&epoch_hs, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
a / (repeated * keys_length), acc(HS_STATE_DELETION) / n_threads);
@ -582,13 +582,13 @@ main(int argc, char *argv[])
while (ck_pr_load_int(&barrier[HS_STATE_REPLACEMENT]) != n_threads)
ck_pr_stall();
set_reset();
ck_epoch_synchronize(&epoch_hs, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
a / (repeated * keys_length), acc(HS_STATE_REPLACEMENT) / n_threads);
ck_pr_inc_int(&barrier[HS_STATE_REPLACEMENT]);
epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_hs, &epoch_wr);
ck_epoch_synchronize(&epoch_wr);
fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
"%u pending, %u peak, %lu reclamations\n\n",

@ -180,6 +180,7 @@ ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record)
{
size_t i;
record->global = global;
record->state = CK_EPOCH_STATE_USED;
record->active = 0;
record->epoch = 0;
@ -196,8 +197,9 @@ ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record)
}
void
ck_epoch_unregister(struct ck_epoch *global, struct ck_epoch_record *record)
ck_epoch_unregister(struct ck_epoch_record *record)
{
struct ck_epoch *global = record->global;
size_t i;
record->active = 0;
@ -298,8 +300,9 @@ ck_epoch_reclaim(struct ck_epoch_record *record)
* This function must not be called with-in read section.
*/
void
ck_epoch_synchronize(struct ck_epoch *global, struct ck_epoch_record *record)
ck_epoch_synchronize(struct ck_epoch_record *record)
{
struct ck_epoch *global = record->global;
struct ck_epoch_record *cr;
unsigned int delta, epoch, goal, i;
bool active;
@ -332,7 +335,7 @@ ck_epoch_synchronize(struct ck_epoch *global, struct ck_epoch_record *record)
/*
* Another writer may have already observed a grace
* period.
* period.
*/
e_d = ck_pr_load_uint(&global->epoch);
if (e_d != delta) {
@ -384,10 +387,10 @@ reload:
}
void
ck_epoch_barrier(struct ck_epoch *global, struct ck_epoch_record *record)
ck_epoch_barrier(struct ck_epoch_record *record)
{
ck_epoch_synchronize(global, record);
ck_epoch_synchronize(record);
ck_epoch_reclaim(record);
return;
}
@ -403,12 +406,15 @@ ck_epoch_barrier(struct ck_epoch *global, struct ck_epoch_record *record)
* is far from ideal too.
*/
bool
ck_epoch_poll(struct ck_epoch *global, struct ck_epoch_record *record)
ck_epoch_poll(struct ck_epoch_record *record)
{
bool active;
struct ck_epoch_record *cr = NULL;
unsigned int epoch = ck_pr_load_uint(&global->epoch);
unsigned int epoch;
unsigned int snapshot;
struct ck_epoch_record *cr = NULL;
struct ck_epoch *global = record->global;
epoch = ck_pr_load_uint(&global->epoch);
/* Serialize epoch snapshots with respect to global epoch. */
ck_pr_fence_memory();

Loading…
Cancel
Save