ck_epoch: introduce ck_epoch_deferred

Allow for deferral to occur outside epoch poll critical loop (which may access per-CPU structures).
awsm
Matthew Macy 7 years ago committed by Samy Al Bahra
parent 9587bbb362
commit deca119d14

@ -266,6 +266,7 @@ void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *, void *);
void ck_epoch_unregister(ck_epoch_record_t *); void ck_epoch_unregister(ck_epoch_record_t *);
bool ck_epoch_poll(ck_epoch_record_t *); bool ck_epoch_poll(ck_epoch_record_t *);
bool ck_epoch_poll_deferred(struct ck_epoch_record *record, ck_stack_t *deferred);
void ck_epoch_synchronize(ck_epoch_record_t *); void ck_epoch_synchronize(ck_epoch_record_t *);
void ck_epoch_synchronize_wait(ck_epoch_t *, ck_epoch_wait_cb_t *, void *); void ck_epoch_synchronize_wait(ck_epoch_t *, ck_epoch_wait_cb_t *, void *);
void ck_epoch_barrier(ck_epoch_record_t *); void ck_epoch_barrier(ck_epoch_record_t *);

@ -349,7 +349,7 @@ ck_epoch_scan(struct ck_epoch *global,
} }
static void static void
ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e) ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e, ck_stack_t *deferred)
{ {
unsigned int epoch = e & (CK_EPOCH_LENGTH - 1); unsigned int epoch = e & (CK_EPOCH_LENGTH - 1);
ck_stack_entry_t *head, *next, *cursor; ck_stack_entry_t *head, *next, *cursor;
@ -362,6 +362,9 @@ ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e)
ck_epoch_entry_container(cursor); ck_epoch_entry_container(cursor);
next = CK_STACK_NEXT(cursor); next = CK_STACK_NEXT(cursor);
if (deferred != NULL)
ck_stack_push_spnc(deferred, &entry->stack_entry);
else
entry->function(entry); entry->function(entry);
i++; i++;
} }
@ -390,7 +393,7 @@ ck_epoch_reclaim(struct ck_epoch_record *record)
unsigned int epoch; unsigned int epoch;
for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++) for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
ck_epoch_dispatch(record, epoch); ck_epoch_dispatch(record, epoch, NULL);
return; return;
} }
@ -551,7 +554,7 @@ ck_epoch_barrier_wait(struct ck_epoch_record *record, ck_epoch_wait_cb_t *cb,
* is far from ideal too. * is far from ideal too.
*/ */
bool bool
ck_epoch_poll(struct ck_epoch_record *record) ck_epoch_poll_deferred(struct ck_epoch_record *record, ck_stack_t *deferred)
{ {
bool active; bool active;
unsigned int epoch; unsigned int epoch;
@ -572,7 +575,7 @@ ck_epoch_poll(struct ck_epoch_record *record)
if (active == false) { if (active == false) {
record->epoch = epoch; record->epoch = epoch;
for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++) for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
ck_epoch_dispatch(record, epoch); ck_epoch_dispatch(record, epoch, deferred);
return true; return true;
} }
@ -580,6 +583,13 @@ ck_epoch_poll(struct ck_epoch_record *record)
/* If an active thread exists, rely on epoch observation. */ /* If an active thread exists, rely on epoch observation. */
(void)ck_pr_cas_uint(&global->epoch, epoch, epoch + 1); (void)ck_pr_cas_uint(&global->epoch, epoch, epoch + 1);
ck_epoch_dispatch(record, epoch + 1); ck_epoch_dispatch(record, epoch + 1, deferred);
return true; return true;
} }
bool
ck_epoch_poll(struct ck_epoch_record *record)
{
return ck_epoch_poll_deferred(record, NULL);
}

Loading…
Cancel
Save