diff --git a/include/ck_array.h b/include/ck_array.h index e8e366b..dcde6bc 100644 --- a/include/ck_array.h +++ b/include/ck_array.h @@ -98,4 +98,3 @@ ck_array_initialized(struct ck_array *array) _ck_i++) #endif /* _CK_ARRAY_H */ - diff --git a/include/ck_backoff.h b/include/ck_backoff.h index f43c564..9e43847 100644 --- a/include/ck_backoff.h +++ b/include/ck_backoff.h @@ -55,4 +55,3 @@ ck_backoff_eb(volatile unsigned int *c) } #endif /* _CK_BACKOFF_H */ - diff --git a/include/ck_barrier.h b/include/ck_barrier.h index 52d3973..2f7acdd 100644 --- a/include/ck_barrier.h +++ b/include/ck_barrier.h @@ -162,4 +162,3 @@ void ck_barrier_mcs_subscribe(ck_barrier_mcs_t *, ck_barrier_mcs_state_t *); void ck_barrier_mcs(ck_barrier_mcs_t *, ck_barrier_mcs_state_t *); #endif /* _CK_BARRIER_H */ - diff --git a/include/ck_brlock.h b/include/ck_brlock.h index d59f0e9..b45e1b8 100644 --- a/include/ck_brlock.h +++ b/include/ck_brlock.h @@ -277,4 +277,3 @@ ck_brlock_read_unlock(struct ck_brlock_reader *reader) } #endif /* _CK_BRLOCK_H */ - diff --git a/include/ck_bytelock.h b/include/ck_bytelock.h index cd855cf..6755762 100644 --- a/include/ck_bytelock.h +++ b/include/ck_bytelock.h @@ -184,4 +184,3 @@ ck_bytelock_read_unlock(struct ck_bytelock *bytelock, unsigned int slot) } #endif /* _CK_BYTELOCK_H */ - diff --git a/include/ck_cohort.h b/include/ck_cohort.h index 5bc8baa..4bfcfe2 100644 --- a/include/ck_cohort.h +++ b/include/ck_cohort.h @@ -159,4 +159,3 @@ enum ck_cohort_state { } #endif /* _CK_COHORT_H */ - diff --git a/include/ck_elide.h b/include/ck_elide.h index 8ffff40..59d17c5 100644 --- a/include/ck_elide.h +++ b/include/ck_elide.h @@ -303,7 +303,7 @@ _ck_elide_fallback(int *retry, * semantics. In environments where jitter is low, this may yield a tighter * fast path. */ -#define CK_ELIDE_LOCK(NAME, LOCK) ck_elide_##NAME##_lock(LOCK) +#define CK_ELIDE_LOCK(NAME, LOCK) ck_elide_##NAME##_lock(LOCK) #define CK_ELIDE_UNLOCK(NAME, LOCK) ck_elide_##NAME##_unlock(LOCK) #define CK_ELIDE_TRYLOCK(NAME, LOCK) ck_elide_##NAME##_trylock(LOCK) @@ -319,4 +319,3 @@ _ck_elide_fallback(int *retry, ck_elide_##NAME##_unlock_adaptive(STAT, LOCK) #endif /* _CK_ELIDE_H */ - diff --git a/include/ck_epoch.h b/include/ck_epoch.h index 2e71599..ecfd173 100644 --- a/include/ck_epoch.h +++ b/include/ck_epoch.h @@ -160,4 +160,3 @@ void ck_epoch_barrier(ck_epoch_t *, ck_epoch_record_t *); void ck_epoch_reclaim(ck_epoch_record_t *); #endif /* _CK_EPOCH_H */ - diff --git a/include/ck_fifo.h b/include/ck_fifo.h index a65bcfc..a09a80d 100644 --- a/include/ck_fifo.h +++ b/include/ck_fifo.h @@ -472,4 +472,3 @@ ck_fifo_mpmc_trydequeue(struct ck_fifo_mpmc *fifo, #endif /* CK_F_PR_CAS_PTR_2 */ #endif /* _CK_FIFO_H */ - diff --git a/include/ck_hp.h b/include/ck_hp.h index 505de5b..7f503de 100644 --- a/include/ck_hp.h +++ b/include/ck_hp.h @@ -104,4 +104,3 @@ void ck_hp_retire(ck_hp_record_t *, ck_hp_hazard_t *, void *, void *); void ck_hp_purge(ck_hp_record_t *); #endif /* _CK_HP_H */ - diff --git a/include/ck_hp_fifo.h b/include/ck_hp_fifo.h index 59bf0e4..96ffa5e 100644 --- a/include/ck_hp_fifo.h +++ b/include/ck_hp_fifo.h @@ -219,4 +219,3 @@ ck_hp_fifo_trydequeue_mpmc(ck_hp_record_t *record, (entry) = (T)) #endif /* _CK_HP_FIFO_H */ - diff --git a/include/ck_hp_stack.h b/include/ck_hp_stack.h index 3b67fc4..3a14faa 100644 --- a/include/ck_hp_stack.h +++ b/include/ck_hp_stack.h @@ -111,4 +111,3 @@ leave: } #endif /* _CK_HP_STACK_H */ - diff --git a/include/ck_hs.h b/include/ck_hs.h index 564e9b6..e37918c 100644 --- a/include/ck_hs.h +++ b/include/ck_hs.h @@ -48,7 +48,7 @@ */ #define CK_HS_MODE_DIRECT 2 -/* +/* * Indicates that the values to be stored are pointers. * Allows for space optimizations in the presence of pointer * packing. Mutually exclusive with CK_HS_MODE_DIRECT. @@ -132,4 +132,3 @@ bool ck_hs_reset_size(ck_hs_t *, unsigned long); void ck_hs_stat(ck_hs_t *, struct ck_hs_stat *); #endif /* _CK_HS_H */ - diff --git a/include/ck_ht.h b/include/ck_ht.h index 86fb7f0..cb8e67d 100644 --- a/include/ck_ht.h +++ b/include/ck_ht.h @@ -259,4 +259,3 @@ uint64_t ck_ht_count(ck_ht_t *); #endif /* CK_F_PR_LOAD_64 && CK_F_PR_STORE_64 */ #endif /* _CK_HT_H */ - diff --git a/include/ck_limits.h b/include/ck_limits.h index c597763..b08e4a7 100644 --- a/include/ck_limits.h +++ b/include/ck_limits.h @@ -29,4 +29,3 @@ #else #include #endif /* __linux__ && __KERNEL__ */ - diff --git a/include/ck_malloc.h b/include/ck_malloc.h index 40f1898..a623e1a 100644 --- a/include/ck_malloc.h +++ b/include/ck_malloc.h @@ -37,4 +37,3 @@ struct ck_malloc { }; #endif /* _CK_MALLOC_H */ - diff --git a/include/ck_pflock.h b/include/ck_pflock.h index 58963e8..52c232a 100644 --- a/include/ck_pflock.h +++ b/include/ck_pflock.h @@ -140,4 +140,3 @@ leave: } #endif /* _CK_PFLOCK_H */ - diff --git a/include/ck_pr.h b/include/ck_pr.h index eb198f5..af4159e 100644 --- a/include/ck_pr.h +++ b/include/ck_pr.h @@ -1149,4 +1149,3 @@ CK_PR_FAS_S(8, uint8_t) #undef CK_PR_FAS #endif /* _CK_PR_H */ - diff --git a/include/ck_queue.h b/include/ck_queue.h index 3d8824f..f617b97 100644 --- a/include/ck_queue.h +++ b/include/ck_queue.h @@ -415,4 +415,3 @@ struct { \ } while (0) #endif /* _CK_QUEUE_H */ - diff --git a/include/ck_rhs.h b/include/ck_rhs.h index 7a28f7d..edf852f 100644 --- a/include/ck_rhs.h +++ b/include/ck_rhs.h @@ -48,7 +48,7 @@ */ #define CK_RHS_MODE_DIRECT 2 -/* +/* * Indicates that the values to be stored are pointers. * Allows for space optimizations in the presence of pointer * packing. Mutually exclusive with CK_RHS_MODE_DIRECT. diff --git a/include/ck_ring.h b/include/ck_ring.h index 88bb837..c70915c 100644 --- a/include/ck_ring.h +++ b/include/ck_ring.h @@ -307,7 +307,7 @@ _ck_ring_dequeue_spmc(struct ck_ring *ring, return false; ck_pr_fence_load(); - + target = (char *)buffer + size * (consumer & mask); memcpy(data, target, size); @@ -432,4 +432,3 @@ ck_ring_dequeue_spmc_##name(struct ck_ring *a, \ ck_ring_dequeue_spmc_##name(a, b, c) #endif /* _CK_RING_H */ - diff --git a/include/ck_rwcohort.h b/include/ck_rwcohort.h index e8e014e..c9b5d2a 100644 --- a/include/ck_rwcohort.h +++ b/include/ck_rwcohort.h @@ -315,4 +315,3 @@ } #endif /* _CK_RWCOHORT_H */ - diff --git a/include/ck_rwlock.h b/include/ck_rwlock.h index 63cb549..89a006a 100644 --- a/include/ck_rwlock.h +++ b/include/ck_rwlock.h @@ -295,4 +295,3 @@ ck_rwlock_recursive_read_unlock(ck_rwlock_recursive_t *rw) } #endif /* _CK_RWLOCK_H */ - diff --git a/include/ck_sequence.h b/include/ck_sequence.h index 14138ff..b7fcb48 100644 --- a/include/ck_sequence.h +++ b/include/ck_sequence.h @@ -123,4 +123,3 @@ ck_sequence_write_end(struct ck_sequence *sq) } #endif /* _CK_SEQUENCE_H */ - diff --git a/include/ck_spinlock.h b/include/ck_spinlock.h index 03f9900..3e07b77 100644 --- a/include/ck_spinlock.h +++ b/include/ck_spinlock.h @@ -59,4 +59,3 @@ CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock, ck_spinlock_t, ck_spinlock_locked, ck_spinlock_trylock) #endif /* _CK_SPINLOCK_H */ - diff --git a/include/ck_stack.h b/include/ck_stack.h index ac89a95..21d3f0f 100644 --- a/include/ck_stack.h +++ b/include/ck_stack.h @@ -352,4 +352,3 @@ ck_stack_init(struct ck_stack *stack) (entry) = (T)) #endif /* _CK_STACK_H */ - diff --git a/include/ck_stdint.h b/include/ck_stdint.h index 1dfd373..e62cafa 100644 --- a/include/ck_stdint.h +++ b/include/ck_stdint.h @@ -30,4 +30,3 @@ #else #include #endif /* __linux__ && __KERNEL__ */ - diff --git a/include/ck_swlock.h b/include/ck_swlock.h index 134df6c..d880aaf 100644 --- a/include/ck_swlock.h +++ b/include/ck_swlock.h @@ -215,4 +215,3 @@ CK_ELIDE_PROTOTYPE(ck_swlock_read, ck_swlock_t, ck_swlock_locked_reader, ck_swlock_read_unlock) #endif /* _CK_SWLOCK_H */ - diff --git a/include/ck_tflock.h b/include/ck_tflock.h index 391fb73..3bb43d5 100644 --- a/include/ck_tflock.h +++ b/include/ck_tflock.h @@ -32,7 +32,7 @@ * described in: * John M. Mellor-Crummey and Michael L. Scott. 1991. * Scalable reader-writer synchronization for shared-memory - * multiprocessors. SIGPLAN Not. 26, 7 (April 1991), 106-113. + * multiprocessors. SIGPLAN Not. 26, 7 (April 1991), 106-113. */ #include @@ -65,7 +65,7 @@ ck_tflock_ticket_fca_32(uint32_t *target, uint32_t mask, uint32_t delta) ck_pr_stall(); } - + return snapshot; } diff --git a/include/gcc/arm/ck_pr.h b/include/gcc/arm/ck_pr.h index 0c2400c..7222680 100644 --- a/include/gcc/arm/ck_pr.h +++ b/include/gcc/arm/ck_pr.h @@ -135,7 +135,7 @@ ck_pr_load_64(const uint64_t *target) register uint64_t ret asm("r0"); __asm __volatile("ldrd %0, [%1]" : "+r" (ret) - : "r" (target) + : "r" (target) : "memory", "cc"); return (ret); } @@ -262,7 +262,7 @@ ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *value) "=&r" (tmp) : "r" (target), "r" (set), - "r" (compare) + "r" (compare) : "memory", "cc"); *(void **)value = previous; return (previous == compare); @@ -283,7 +283,7 @@ ck_pr_cas_ptr(void *target, void *compare, void *set) "=&r" (tmp) : "r" (target), "r" (set), - "r" (compare) + "r" (compare) : "memory", "cc"); return (previous == compare); } diff --git a/include/spinlock/hclh.h b/include/spinlock/hclh.h index edaeaca..0eac6b9 100644 --- a/include/spinlock/hclh.h +++ b/include/spinlock/hclh.h @@ -98,7 +98,7 @@ ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_queue, /* We're head of the global queue, we're done */ if (ck_pr_load_uint(&previous->splice) == false) return; - } + } /* Now we need to splice the local queue into the global queue. */ local_tail = ck_pr_load_ptr(local_queue); diff --git a/regressions/ck_hp/validate/ck_hp_fifo_donner.c b/regressions/ck_hp/validate/ck_hp_fifo_donner.c index 43e00ae..a2f31c9 100644 --- a/regressions/ck_hp/validate/ck_hp_fifo_donner.c +++ b/regressions/ck_hp/validate/ck_hp_fifo_donner.c @@ -43,7 +43,7 @@ static ck_hp_fifo_t fifo; static ck_hp_t fifo_hp; /* thread local element count */ -static unsigned long *count; +static unsigned long *count; static unsigned long thread_count; @@ -77,10 +77,10 @@ queue_50_50(void *elements) record = malloc(sizeof(ck_hp_record_t)); assert(record); - + slots = malloc(CK_HP_FIFO_SLOTS_SIZE); assert(slots); - + /* different seed for each thread */ seed = 1337; /*(unsigned int) pthread_self(); */ @@ -166,7 +166,7 @@ main(int argc, char** argv) /* array for local operation count */ count = malloc(sizeof(unsigned long *) * thread_count); - + /* * Initialize global hazard pointer safe memory reclamation to execute free() * when a fifo_entry is safe to be deleted. diff --git a/regressions/ck_hs/benchmark/parallel_bytestring.c b/regressions/ck_hs/benchmark/parallel_bytestring.c index ea2eaec..e2e15c9 100644 --- a/regressions/ck_hs/benchmark/parallel_bytestring.c +++ b/regressions/ck_hs/benchmark/parallel_bytestring.c @@ -144,7 +144,7 @@ set_init(void) #ifdef HS_DELETE mode |= CK_HS_MODE_DELETE; -#endif +#endif ck_epoch_init(&epoch_hs); ck_epoch_register(&epoch_hs, &epoch_wr); diff --git a/regressions/ck_ring/validate/ck_ring_spmc.c b/regressions/ck_ring/validate/ck_ring_spmc.c index 23fe0fa..9563a8f 100644 --- a/regressions/ck_ring/validate/ck_ring_spmc.c +++ b/regressions/ck_ring/validate/ck_ring_spmc.c @@ -197,7 +197,7 @@ test(void *c) for (i = 0; i < ITERATIONS; i++) { for (j = 0; j < size; j++) { buffer = _context[context->previous].buffer; - while (ck_ring_dequeue_spmc(ring + context->previous, + while (ck_ring_dequeue_spmc(ring + context->previous, buffer, &entry) == false); if (context->previous != (unsigned int)entry->tid) { @@ -315,7 +315,7 @@ main(int argc, char *argv[]) /* Wait until queue is not full. */ if (l & 1) { while (ck_ring_enqueue_spmc(&ring_spmc, - buffer, + buffer, entry) == false) ck_pr_stall(); } else { diff --git a/regressions/ck_ring/validate/ck_ring_spmc_template.c b/regressions/ck_ring/validate/ck_ring_spmc_template.c index 9facbc7..456fb97 100644 --- a/regressions/ck_ring/validate/ck_ring_spmc_template.c +++ b/regressions/ck_ring/validate/ck_ring_spmc_template.c @@ -200,7 +200,7 @@ test(void *c) for (j = 0; j < size; j++) { buffer = _context[context->previous].buffer; while (CK_RING_DEQUEUE_SPMC(entry, - ring + context->previous, + ring + context->previous, buffer, &entry) == false); if (context->previous != (unsigned int)entry->tid) { diff --git a/regressions/common.h b/regressions/common.h index f100e89..4322a07 100644 --- a/regressions/common.h +++ b/regressions/common.h @@ -289,7 +289,7 @@ CK_CC_UNUSED static int aff_iterate_core(struct affinity *acb, unsigned int *core) { cpu_set_t s; - + *core = ck_pr_faa_uint(&acb->request, acb->delta); CPU_ZERO(&s); CPU_SET((*core) % CORES, &s); @@ -454,4 +454,3 @@ ck_error(const char *message, ...) va_end(ap); exit(EXIT_FAILURE); } - diff --git a/src/ck_epoch.c b/src/ck_epoch.c index 343667c..ebb0aaa 100644 --- a/src/ck_epoch.c +++ b/src/ck_epoch.c @@ -87,7 +87,7 @@ * at e_g - 1 to still be accessed at e_g as threads are "active" * at the same time (real-world time) mutating shared objects. * - * Now, if the epoch counter is ticked to e_g + 1, then no new + * Now, if the epoch counter is ticked to e_g + 1, then no new * hazardous references could exist to objects logically deleted at * e_g - 1. The reason for this is that at e_g + 1, all epoch read-side * critical sections started at e_g - 1 must have been completed. If @@ -118,7 +118,7 @@ * sufficient to represent e_g using only the values 0, 1 or 2. Every time * a thread re-visits a e_g (which can be determined with a non-empty deferral * list) it can assume objects in the e_g deferral list involved at least - * three e_g transitions and are thus, safe, for physical deletion. + * three e_g transitions and are thus, safe, for physical deletion. * * Blocking semantics for epoch reclamation have additional restrictions. * Though we only require three deferral lists, reasonable blocking semantics diff --git a/src/ck_hs.c b/src/ck_hs.c index 76bf832..ea77b9a 100644 --- a/src/ck_hs.c +++ b/src/ck_hs.c @@ -598,7 +598,7 @@ ck_hs_gc(struct ck_hs *hs, unsigned long cycles, unsigned long seed) if (maximum != map->probe_maximum) ck_pr_store_uint(&map->probe_maximum, maximum); - if (bounds != NULL) { + if (bounds != NULL) { for (i = 0; i < map->capacity; i++) CK_HS_STORE(&map->probe_bound[i], bounds[i]); @@ -851,7 +851,7 @@ ck_hs_get(struct ck_hs *hs, unsigned int g, g_p, probe; unsigned int *generation; - do { + do { map = ck_pr_load_ptr(&hs->map); generation = &map->generation[h & CK_HS_G_MASK]; g = ck_pr_load_uint(generation); diff --git a/src/ck_ht.c b/src/ck_ht.c index ea9de1e..b6d74c1 100644 --- a/src/ck_ht.c +++ b/src/ck_ht.c @@ -395,7 +395,7 @@ ck_ht_gc(struct ck_ht *ht, unsigned long cycles, unsigned long seed) return true; } - + if (cycles == 0) { maximum = 0; diff --git a/src/ck_ht_hash.h b/src/ck_ht_hash.h index 254e3b8..075c2c1 100644 --- a/src/ck_ht_hash.h +++ b/src/ck_ht_hash.h @@ -175,7 +175,7 @@ static inline uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed while(data != end) { uint64_t k; - + if (!((uintptr_t)data & 0x7)) k = *data++; else { diff --git a/src/ck_rhs.c b/src/ck_rhs.c index 962a94c..a043ac6 100644 --- a/src/ck_rhs.c +++ b/src/ck_rhs.c @@ -327,11 +327,11 @@ ck_rhs_map_create(struct ck_rhs *hs, unsigned long entries) if (hs->mode & CK_RHS_MODE_READ_MOSTLY) size = sizeof(struct ck_rhs_map) + (sizeof(void *) * n_entries + - sizeof(struct ck_rhs_no_entry_desc) * n_entries + + sizeof(struct ck_rhs_no_entry_desc) * n_entries + 2 * CK_MD_CACHELINE - 1); else size = sizeof(struct ck_rhs_map) + - (sizeof(struct ck_rhs_entry_desc) * n_entries + + (sizeof(struct ck_rhs_entry_desc) * n_entries + CK_MD_CACHELINE - 1); map = hs->m->malloc(size); if (map == NULL) @@ -408,7 +408,7 @@ ck_rhs_map_probe_next(struct ck_rhs_map *map, { if (probes & map->offset_mask) { - offset = (offset &~ map->offset_mask) + + offset = (offset &~ map->offset_mask) + ((offset + 1) & map->offset_mask); return offset; } else @@ -421,10 +421,10 @@ ck_rhs_map_probe_prev(struct ck_rhs_map *map, unsigned long offset, { if (probes & map->offset_mask) { - offset = (offset &~ map->offset_mask) + ((offset - 1) & + offset = (offset &~ map->offset_mask) + ((offset - 1) & map->offset_mask); return offset; - } else + } else return ((offset - probes) & map->mask); } @@ -616,7 +616,7 @@ ck_rhs_map_probe_rm(struct ck_rhs *hs, if (behavior != CK_RHS_PROBE_NO_RH) { struct ck_rhs_entry_desc *desc = (void *)&map->entries.no_entries.descs[offset]; - if (pr == -1 && + if (pr == -1 && desc->in_rh == false && desc->probes < probes) { pr = offset; *n_probes = probes; @@ -730,7 +730,7 @@ ck_rhs_map_probe(struct ck_rhs *hs, if ((behavior != CK_RHS_PROBE_NO_RH)) { struct ck_rhs_entry_desc *desc = &map->entries.descs[offset]; - if (pr == -1 && + if (pr == -1 && desc->in_rh == false && desc->probes < probes) { pr = offset; *n_probes = probes; @@ -818,7 +818,7 @@ ck_rhs_gc(struct ck_rhs *hs) } static void -ck_rhs_add_wanted(struct ck_rhs *hs, long end_offset, long old_slot, +ck_rhs_add_wanted(struct ck_rhs *hs, long end_offset, long old_slot, unsigned long h) { struct ck_rhs_map *map = hs->map; @@ -872,7 +872,7 @@ ck_rhs_get_first_offset(struct ck_rhs_map *map, unsigned long offset, unsigned i while (probes > (unsigned long)map->offset_mask + 1) { offset -= ((probes - 1) &~ map->offset_mask); offset &= map->mask; - offset = (offset &~ map->offset_mask) + + offset = (offset &~ map->offset_mask) + ((offset - map->offset_mask) & map->offset_mask); probes -= map->offset_mask + 1; } @@ -948,7 +948,7 @@ restart: prev = prevs[--prevs_nb]; ck_pr_store_ptr(ck_rhs_entry_addr(map, orig_slot), ck_rhs_entry(map, prev)); - h = ck_rhs_get_first_offset(map, orig_slot, + h = ck_rhs_get_first_offset(map, orig_slot, desc->probes); ck_rhs_add_wanted(hs, orig_slot, prev, h); ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]); @@ -966,7 +966,7 @@ ck_rhs_do_backward_shift_delete(struct ck_rhs *hs, long slot) struct ck_rhs_map *map = hs->map; struct ck_rhs_entry_desc *desc, *new_desc = NULL; unsigned long h; - + desc = ck_rhs_desc(map, slot); h = ck_rhs_remove_wanted(hs, slot, -1); while (desc->wanted > 0) { @@ -1140,7 +1140,7 @@ restart: * period if we can guarantee earlier position of * duplicate key. */ - ck_rhs_add_wanted(hs, first, -1, h); + ck_rhs_add_wanted(hs, first, -1, h); if (object != NULL) { ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]); ck_pr_fence_atomic_store(); @@ -1155,7 +1155,7 @@ restart: ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert); ck_rhs_set_probes(map, slot, n_probes); if (object == NULL) - ck_rhs_add_wanted(hs, slot, -1, h); + ck_rhs_add_wanted(hs, slot, -1, h); } if (object == NULL) { @@ -1209,12 +1209,12 @@ restart: /* Insert key into first bucket in probe sequence. */ ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert); desc->probes = n_probes; - ck_rhs_add_wanted(hs, first, -1, h); + ck_rhs_add_wanted(hs, first, -1, h); } else { /* An empty slot was found. */ ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert); ck_rhs_set_probes(map, slot, n_probes); - ck_rhs_add_wanted(hs, slot, -1, h); + ck_rhs_add_wanted(hs, slot, -1, h); } map->n_entries++; @@ -1253,7 +1253,7 @@ ck_rhs_get(struct ck_rhs *hs, unsigned int g, g_p, probe; unsigned int *generation; - do { + do { map = ck_pr_load_ptr(&hs->map); generation = &map->generation[h & CK_RHS_G_MASK]; g = ck_pr_load_uint(generation);