regressions/ck_epoch: fix other record read-reclaim races.

awsm
Samy Al Bahra 6 years ago
parent 999d4612e8
commit dbfe282866

@ -86,10 +86,14 @@ static void *
read_thread(void *unused CK_CC_UNUSED) read_thread(void *unused CK_CC_UNUSED)
{ {
unsigned int j; unsigned int j;
ck_epoch_record_t record CK_CC_CACHELINE; ck_epoch_record_t *record CK_CC_CACHELINE;
ck_stack_entry_t *cursor, *n; ck_stack_entry_t *cursor, *n;
ck_epoch_register(&stack_epoch, &record, NULL); record = malloc(sizeof *record);
if (record == NULL)
ck_error("record allocation failure");
ck_epoch_register(&stack_epoch, record, NULL);
if (aff_iterate(&a)) { if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread"); perror("ERROR: failed to affine thread");
@ -108,7 +112,7 @@ read_thread(void *unused CK_CC_UNUSED)
j = 0; j = 0;
for (;;) { for (;;) {
ck_epoch_begin(&record, NULL); ck_epoch_begin(record, NULL);
CK_STACK_FOREACH(&stack, cursor) { CK_STACK_FOREACH(&stack, cursor) {
if (cursor == NULL) if (cursor == NULL)
continue; continue;
@ -116,7 +120,7 @@ read_thread(void *unused CK_CC_UNUSED)
n = CK_STACK_NEXT(cursor); n = CK_STACK_NEXT(cursor);
j += ck_pr_load_ptr(&n) != NULL; j += ck_pr_load_ptr(&n) != NULL;
} }
ck_epoch_end(&record, NULL); ck_epoch_end(record, NULL);
if (j != 0 && ck_pr_load_uint(&readers) == 0) if (j != 0 && ck_pr_load_uint(&readers) == 0)
ck_pr_store_uint(&readers, 1); ck_pr_store_uint(&readers, 1);
@ -138,10 +142,13 @@ write_thread(void *unused CK_CC_UNUSED)
{ {
struct node **entry, *e; struct node **entry, *e;
unsigned int i, j, tid; unsigned int i, j, tid;
ck_epoch_record_t record; ck_epoch_record_t *record;
ck_stack_entry_t *s; ck_stack_entry_t *s;
ck_epoch_register(&stack_epoch, &record, NULL); record = malloc(sizeof *record);
if (record == NULL)
ck_error("record allocation failure");
ck_epoch_register(&stack_epoch, record, NULL);
if (aff_iterate(&a)) { if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread"); perror("ERROR: failed to affine thread");
@ -178,23 +185,23 @@ write_thread(void *unused CK_CC_UNUSED)
} }
for (i = 0; i < PAIRS_S; i++) { for (i = 0; i < PAIRS_S; i++) {
ck_epoch_begin(&record, NULL); ck_epoch_begin(record, NULL);
s = ck_stack_pop_upmc(&stack); s = ck_stack_pop_upmc(&stack);
e = stack_container(s); e = stack_container(s);
ck_epoch_end(&record, NULL); ck_epoch_end(record, NULL);
ck_epoch_call(&record, &e->epoch_entry, destructor); ck_epoch_call(record, &e->epoch_entry, destructor);
ck_epoch_poll(&record); ck_epoch_poll(record);
} }
} }
ck_epoch_barrier(&record); ck_epoch_barrier(record);
if (tid == 0) { if (tid == 0) {
fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] Peak: %u (%2.2f%%)\n Reclamations: %u\n\n", fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] Peak: %u (%2.2f%%)\n Reclamations: %u\n\n",
record.n_peak, record->n_peak,
(double)record.n_peak / ((double)PAIRS_S * ITERATE_S) * 100, (double)record->n_peak / ((double)PAIRS_S * ITERATE_S) * 100,
record.n_dispatch); record->n_dispatch);
} }
ck_pr_inc_uint(&e_barrier); ck_pr_inc_uint(&e_barrier);

@ -133,10 +133,14 @@ read_thread(void *unused CK_CC_UNUSED)
static void * static void *
write_thread(void *unused CK_CC_UNUSED) write_thread(void *unused CK_CC_UNUSED)
{ {
ck_epoch_record_t record; ck_epoch_record_t *record;
unsigned long iterations = 0; unsigned long iterations = 0;
ck_epoch_register(&epoch, &record, NULL); record = malloc(sizeof *record);
if (record == NULL)
ck_error("record allocation failure");
ck_epoch_register(&epoch, record, NULL);
if (aff_iterate(&a)) { if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread"); perror("ERROR: failed to affine thread");
@ -150,7 +154,7 @@ write_thread(void *unused CK_CC_UNUSED)
if (!(iterations % 1048575)) if (!(iterations % 1048575))
fprintf(stderr, "."); fprintf(stderr, ".");
ck_epoch_synchronize(&record); ck_epoch_synchronize(record);
iterations++; iterations++;
if (ck_pr_load_uint(&leave) == 1) if (ck_pr_load_uint(&leave) == 1)

@ -86,12 +86,15 @@ static void *
read_thread(void *unused CK_CC_UNUSED) read_thread(void *unused CK_CC_UNUSED)
{ {
unsigned int j; unsigned int j;
ck_epoch_record_t record CK_CC_CACHELINE; ck_epoch_record_t *record CK_CC_CACHELINE;
ck_stack_entry_t *cursor; ck_stack_entry_t *cursor;
ck_stack_entry_t *n; ck_stack_entry_t *n;
unsigned int i; unsigned int i;
ck_epoch_register(&stack_epoch, &record, NULL); record = malloc(sizeof *record);
if (record == NULL)
ck_error("record allocation failure");
ck_epoch_register(&stack_epoch, record, NULL);
if (aff_iterate(&a)) { if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread"); perror("ERROR: failed to affine thread");
@ -112,7 +115,7 @@ read_thread(void *unused CK_CC_UNUSED)
for (;;) { for (;;) {
i = 0; i = 0;
ck_epoch_begin(&record, NULL); ck_epoch_begin(record, NULL);
CK_STACK_FOREACH(&stack, cursor) { CK_STACK_FOREACH(&stack, cursor) {
if (cursor == NULL) if (cursor == NULL)
continue; continue;
@ -123,7 +126,7 @@ read_thread(void *unused CK_CC_UNUSED)
if (i++ > 4098) if (i++ > 4098)
break; break;
} }
ck_epoch_end(&record, NULL); ck_epoch_end(record, NULL);
if (j != 0 && ck_pr_load_uint(&readers) == 0) if (j != 0 && ck_pr_load_uint(&readers) == 0)
ck_pr_store_uint(&readers, 1); ck_pr_store_uint(&readers, 1);
@ -145,10 +148,13 @@ write_thread(void *unused CK_CC_UNUSED)
{ {
struct node **entry, *e; struct node **entry, *e;
unsigned int i, j, tid; unsigned int i, j, tid;
ck_epoch_record_t record; ck_epoch_record_t *record;
ck_stack_entry_t *s; ck_stack_entry_t *s;
ck_epoch_register(&stack_epoch, &record, NULL); record = malloc(sizeof *record);
if (record == NULL)
ck_error("record allocation failure");
ck_epoch_register(&stack_epoch, record, NULL);
if (aff_iterate(&a)) { if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread"); perror("ERROR: failed to affine thread");
@ -180,17 +186,17 @@ write_thread(void *unused CK_CC_UNUSED)
ck_pr_stall(); ck_pr_stall();
for (i = 0; i < PAIRS_S; i++) { for (i = 0; i < PAIRS_S; i++) {
ck_epoch_begin(&record, NULL); ck_epoch_begin(record, NULL);
s = ck_stack_pop_upmc(&stack); s = ck_stack_pop_upmc(&stack);
e = stack_container(s); e = stack_container(s);
ck_epoch_end(&record, NULL); ck_epoch_end(record, NULL);
if (i & 1) { if (i & 1) {
ck_epoch_synchronize(&record); ck_epoch_synchronize(record);
ck_epoch_reclaim(&record); ck_epoch_reclaim(record);
ck_epoch_call(&record, &e->epoch_entry, destructor); ck_epoch_call(record, &e->epoch_entry, destructor);
} else { } else {
ck_epoch_barrier(&record); ck_epoch_barrier(record);
destructor(&e->epoch_entry); destructor(&e->epoch_entry);
} }
@ -201,13 +207,13 @@ write_thread(void *unused CK_CC_UNUSED)
} }
} }
ck_epoch_synchronize(&record); ck_epoch_synchronize(record);
if (tid == 0) { if (tid == 0) {
fprintf(stderr, "[W] Peak: %u (%2.2f%%)\n Reclamations: %u\n\n", fprintf(stderr, "[W] Peak: %u (%2.2f%%)\n Reclamations: %u\n\n",
record.n_peak, record->n_peak,
(double)record.n_peak / ((double)PAIRS_S * ITERATE_S) * 100, (double)record->n_peak / ((double)PAIRS_S * ITERATE_S) * 100,
record.n_dispatch); record->n_dispatch);
} }
ck_pr_inc_uint(&e_barrier); ck_pr_inc_uint(&e_barrier);

Loading…
Cancel
Save