ck_hp: Match epoch semantics.

Added improved observability and a ck_hp_purge.
The rename matches the naming used in ck_epoch.
Documentation updates to follow through soon.
ck_pring
Samy Al Bahra 14 years ago
parent a72e86e0ba
commit 20fb7a9200

@ -68,7 +68,9 @@ struct ck_hp_record {
ck_stack_t pending;
unsigned int n_pending;
ck_stack_entry_t global_entry;
};
unsigned int n_peak;
uint64_t n_reclamations;
} CK_CC_CACHELINE;
typedef struct ck_hp_record ck_hp_record_t;
CK_CC_INLINE static void
@ -96,8 +98,9 @@ void ck_hp_set_threshold(ck_hp_t *, unsigned int);
void ck_hp_register(ck_hp_t *, ck_hp_record_t *, void **);
void ck_hp_unregister(ck_hp_record_t *);
ck_hp_record_t *ck_hp_recycle(ck_hp_t *);
void ck_hp_flush(ck_hp_record_t *);
void ck_hp_reclaim(ck_hp_record_t *);
void ck_hp_free(ck_hp_record_t *, ck_hp_hazard_t *, void *, void *);
void ck_hp_retire(ck_hp_record_t *, ck_hp_hazard_t *, void *, void *);
void ck_hp_purge(ck_hp_record_t *);
#endif /* _CK_HP_H */

@ -51,7 +51,7 @@ static unsigned int barrier;
static unsigned int e_barrier;
#ifndef PAIRS
#define PAIRS 1000000
#define PAIRS 5000000
#endif
struct node {
@ -109,6 +109,21 @@ thread(void *unused)
ck_pr_inc_uint(&e_barrier);
while (ck_pr_load_uint(&e_barrier) < n_threads);
fprintf(stderr, "Peak: %u\nReclamations: %" PRIu64 "\n\n",
record.n_peak, record.n_reclamations);
ck_hp_clear(&record);
ck_hp_purge(&record);
ck_pr_inc_uint(&e_barrier);
while (ck_pr_load_uint(&e_barrier) < (n_threads << 1));
if (record.n_pending != 0) {
fprintf(stderr, "ERROR: %u pending, expecting none.\n",
record.n_pending);
exit(EXIT_FAILURE);
}
return (NULL);
}

@ -71,23 +71,23 @@ main(int argc, char *argv[])
exit(EXIT_FAILURE);
}
ck_hp_register(&state, &record[0], pointers);
ck_hp_flush(&record[0]);
ck_hp_reclaim(&record[0]);
entry = malloc(sizeof *entry);
ck_hp_set(&record[0], 0, entry);
ck_hp_flush(&record[0]);
ck_hp_reclaim(&record[0]);
ck_hp_free(&record[0], &entry->hazard, entry, entry);
ck_hp_flush(&record[0]);
ck_hp_reclaim(&record[0]);
ck_hp_set(&record[0], 0, NULL);
ck_hp_flush(&record[0]);
ck_hp_reclaim(&record[0]);
entry = malloc(sizeof *entry);
ck_hp_set(&record[0], 0, entry);
ck_hp_flush(&record[0]);
ck_hp_reclaim(&record[0]);
ck_hp_free(&record[0], &entry->hazard, entry, entry);
ck_hp_flush(&record[0]);
ck_hp_reclaim(&record[0]);
ck_hp_set(&record[0], 0, NULL);
ck_hp_flush(&record[0]);
ck_hp_reclaim(&record[0]);
pointers = malloc(sizeof(void *));
if (pointers == NULL) {
@ -95,15 +95,15 @@ main(int argc, char *argv[])
exit(EXIT_FAILURE);
}
ck_hp_register(&state, &record[1], pointers);
ck_hp_flush(&record[1]);
ck_hp_reclaim(&record[1]);
entry = malloc(sizeof *entry);
ck_hp_set(&record[1], 0, entry);
ck_hp_flush(&record[1]);
ck_hp_reclaim(&record[1]);
ck_hp_free(&record[1], &entry->hazard, entry, entry);
ck_hp_flush(&record[1]);
ck_hp_reclaim(&record[1]);
ck_hp_set(&record[1], 0, NULL);
ck_hp_flush(&record[1]);
ck_hp_reclaim(&record[1]);
printf("Allocating entry and freeing in other HP record...\n");
entry = malloc(sizeof *entry);
@ -118,10 +118,10 @@ main(int argc, char *argv[])
ck_hp_free(&record[0], &other->hazard, other, other);
ck_pr_store_uint(&other->value, 32);
ck_hp_set(&record[0], 0, NULL);
ck_hp_flush(&record[1]);
ck_hp_reclaim(&record[1]);
ck_hp_set(&record[1], 0, NULL);
ck_hp_flush(&record[0]);
ck_hp_flush(&record[1]);
ck_hp_reclaim(&record[0]);
ck_hp_reclaim(&record[1]);
return 0;
}

@ -89,15 +89,6 @@ ck_hp_set_threshold(struct ck_hp *state, unsigned int threshold)
return;
}
void
ck_hp_unregister(struct ck_hp_record *entry)
{
ck_pr_store_int(&entry->state, CK_HP_FREE);
ck_pr_inc_uint(&entry->global->n_free);
return;
}
struct ck_hp_record *
ck_hp_recycle(struct ck_hp *global)
{
@ -123,16 +114,31 @@ ck_hp_recycle(struct ck_hp *global)
return (NULL);
}
void
ck_hp_unregister(struct ck_hp_record *entry)
{
entry->n_pending = 0;
entry->n_peak = 0;
entry->n_reclamations = 0;
ck_stack_init(&entry->pending);
ck_pr_store_int(&entry->state, CK_HP_FREE);
ck_pr_inc_uint(&entry->global->n_free);
return;
}
void
ck_hp_register(struct ck_hp *state,
struct ck_hp_record *entry,
void **pointers)
struct ck_hp_record *entry,
void **pointers)
{
entry->state = CK_HP_USED;
entry->global = state;
entry->pointers = pointers;
entry->n_pending = 0;
entry->n_peak = 0;
entry->n_reclamations = 0;
ck_stack_init(&entry->pending);
ck_stack_push_upmc(&state->subscribers, &entry->global_entry);
ck_pr_inc_uint(&state->n_subscribers);
@ -215,7 +221,7 @@ ck_hp_member_cache(struct ck_hp *global, void **cache, unsigned int *n_hazards)
}
void
ck_hp_flush(struct ck_hp_record *thread)
ck_hp_reclaim(struct ck_hp_record *thread)
{
struct ck_hp_hazard *hazard;
struct ck_hp *global = thread->global;
@ -235,7 +241,8 @@ ck_hp_flush(struct ck_hp_record *thread)
previous = NULL;
CK_STACK_FOREACH_SAFE(&thread->pending, entry, next) {
hazard = ck_hp_hazard_container(entry);
match = bsearch(&hazard->pointer, cache, n_hazards, sizeof(void *), hazard_compare);
match = bsearch(&hazard->pointer, cache, n_hazards,
sizeof(void *), hazard_compare);
if (match != NULL) {
previous = entry;
continue;
@ -257,22 +264,35 @@ ck_hp_flush(struct ck_hp_record *thread)
/* The entry is now safe to destroy. */
global->destroy(hazard->data);
thread->n_reclamations++;
}
return;
}
void
ck_hp_retire(struct ck_hp_record *thread, struct ck_hp_hazard *hazard, void *data, void *pointer)
ck_hp_retire(struct ck_hp_record *thread,
struct ck_hp_hazard *hazard,
void *data,
void *pointer)
{
ck_pr_store_ptr(&hazard->pointer, pointer);
ck_pr_store_ptr(&hazard->data, data);
ck_stack_push_spnc(&thread->pending, &hazard->pending_entry);
thread->n_pending += 1;
if (thread->n_pending > thread->n_peak)
thread->n_peak = thread->n_pending;
return;
}
void
ck_hp_free(struct ck_hp_record *thread, struct ck_hp_hazard *hazard, void *data, void *pointer)
ck_hp_free(struct ck_hp_record *thread,
struct ck_hp_hazard *hazard,
void *data,
void *pointer)
{
struct ck_hp *global;
@ -280,10 +300,27 @@ ck_hp_free(struct ck_hp_record *thread, struct ck_hp_hazard *hazard, void *data,
ck_pr_store_ptr(&hazard->data, data);
ck_pr_store_ptr(&hazard->pointer, pointer);
ck_stack_push_spnc(&thread->pending, &hazard->pending_entry);
thread->n_pending += 1;
if (thread->n_pending > thread->n_peak)
thread->n_peak = thread->n_pending;
if (thread->n_pending >= global->threshold)
ck_hp_flush(thread);
ck_hp_reclaim(thread);
return;
}
void
ck_hp_purge(struct ck_hp_record *thread)
{
ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
while (thread->n_pending > 0) {
ck_hp_reclaim(thread);
if (thread->n_pending > 0)
ck_backoff_gb(&backoff);
}
return;
}

Loading…
Cancel
Save