Make ck_pr_store_* and ck_pr_load_* a bit more type safe.

We use some macro trickery to enforce that ck_pr_store_* is actually
storing the correct type into the target variable, without any actual
side effects--by making the assignment into an rvalue and using a
comma expression, the compiler should optimize it away.

On the load side, we simply cast the result to the type of the target
variable for pointer loads.

There is an unsafe version of the store_ptr macro called
ck_pr_store_ptr_unsafe for those times when you are _really_ sure that
you know what you're doing.

This commit also updates some of the source files (ck_ht, ck_hs,
ck_rhs): ck_ht now uses the unsafe macro, as its conversion between
uintptr_t and void * is invalid under the new macros. ck_hs and ck_rhs
have had some casts added to preserve validity.
ck_pring
John Wittrock 10 years ago
parent 219e3a7dd8
commit 4ef225172e

@ -86,6 +86,10 @@
#define CK_CC_UNLIKELY(x) x
#endif
#ifndef CK_CC_TYPEOF
#define CK_CC_TYPEOF(X, DEFAULT) DEFAULT
#endif
#ifndef CK_F_CC_FFS
#define CK_F_CC_FFS
CK_CC_INLINE static int

@ -151,7 +151,7 @@ ck_fifo_spsc_dequeue(struct ck_fifo_spsc *fifo, void *value)
return false;
/* If entry is visible, guarantee store to value is visible. */
ck_pr_store_ptr(value, entry->value);
ck_pr_store_ptr_unsafe(value, entry->value);
ck_pr_fence_store();
ck_pr_store_ptr(&fifo->head, entry);
return true;

@ -168,7 +168,7 @@ ck_hp_fifo_dequeue_mpmc(ck_hp_record_t *record,
break;
}
ck_pr_store_ptr(value, next->value);
ck_pr_store_ptr_unsafe(value, next->value);
return head;
}
@ -202,7 +202,7 @@ ck_hp_fifo_trydequeue_mpmc(ck_hp_record_t *record,
} else if (ck_pr_cas_ptr(&fifo->head, head, next) == false)
return NULL;
ck_pr_store_ptr(value, next->value);
ck_pr_store_ptr_unsafe(value, next->value);
return head;
}

@ -155,7 +155,7 @@ ck_pr_rfo(const void *m)
{ \
T previous; \
C punt; \
punt = ck_pr_load_##S(target); \
punt = ck_pr_md_load_##S(target); \
previous = (T)punt; \
while (ck_pr_cas_##S##_value(target, \
(C)previous, \
@ -795,7 +795,7 @@ CK_PR_UNARY_Z_S(dec, 8, uint8_t, -, 1)
{ \
T previous; \
C punt; \
punt = (C)ck_pr_load_##S(target); \
punt = (C)ck_pr_md_load_##S(target); \
previous = (T)punt; \
while (ck_pr_cas_##S##_value(target, \
(C)previous, \
@ -812,7 +812,7 @@ CK_PR_UNARY_Z_S(dec, 8, uint8_t, -, 1)
{ \
T previous; \
C punt; \
punt = (C)ck_pr_load_##S(target); \
punt = (C)ck_pr_md_load_##S(target); \
previous = (T)punt; \
while (ck_pr_cas_##S##_value(target, \
(C)previous, \
@ -999,7 +999,7 @@ CK_PR_N_Z_S(8, uint8_t)
{ \
T previous; \
C punt; \
punt = (C)ck_pr_load_##S(target); \
punt = (C)ck_pr_md_load_##S(target); \
previous = (T)punt; \
while (ck_pr_cas_##S##_value(target, \
(C)previous, \
@ -1015,7 +1015,7 @@ CK_PR_N_Z_S(8, uint8_t)
ck_pr_fas_##S(M *target, C update) \
{ \
C previous; \
previous = ck_pr_load_##S(target); \
previous = ck_pr_md_load_##S(target); \
while (ck_pr_cas_##S##_value(target, \
previous, \
update, \
@ -1159,4 +1159,40 @@ CK_PR_FAS_S(8, uint8_t)
#undef CK_PR_FAA
#undef CK_PR_FAS
#define CK_PR_STORE_SAFE(DST, VAL, TYPE) \
ck_pr_md_store_##TYPE( \
((void)sizeof(*(DST) = (VAL)), (DST)), \
(VAL))
#define ck_pr_store_ptr(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), ptr)
#define ck_pr_store_char(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), char)
#define ck_pr_store_double(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), double)
#define ck_pr_store_uint(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), uint)
#define ck_pr_store_int(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), int)
#define ck_pr_store_32(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), 32)
#define ck_pr_store_16(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), 16)
#define ck_pr_store_8(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), 8)
#define ck_pr_store_ptr_unsafe(DST, VAL) ck_pr_md_store_ptr((DST), (VAL))
#ifdef CK_F_PR_LOAD_64
#define ck_pr_store_64(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), 64)
#endif /* CK_F_PR_LOAD_64 */
#define CK_PR_LOAD_PTR_SAFE(SRC) (CK_CC_TYPEOF(*(SRC), (void *)))ck_pr_md_load_ptr((SRC))
#define ck_pr_load_ptr(SRC) CK_PR_LOAD_PTR_SAFE((SRC))
#define CK_PR_LOAD_SAFE(SRC, TYPE) ck_pr_md_load_##TYPE((SRC))
#define ck_pr_load_char(SRC) CK_PR_LOAD_SAFE((SRC), char)
#define ck_pr_load_double(SRC) CK_PR_LOAD_SAFE((SRC), double)
#define ck_pr_load_uint(SRC) CK_PR_LOAD_SAFE((SRC), uint)
#define ck_pr_load_int(SRC) CK_PR_LOAD_SAFE((SRC), int)
#define ck_pr_load_32(SRC) CK_PR_LOAD_SAFE((SRC), 32)
#define ck_pr_load_16(SRC) CK_PR_LOAD_SAFE((SRC), 16)
#define ck_pr_load_8(SRC) CK_PR_LOAD_SAFE((SRC), 8)
#ifdef CK_F_PR_LOAD_64
#define ck_pr_load_64(SRC) CK_PR_LOAD_SAFE((SRC), 64)
#endif /* CK_F_PR_LOAD_64 */
#endif /* CK_PR_H */

@ -104,7 +104,7 @@ CK_PR_FENCE(release, CK_DMB)
#define CK_PR_LOAD(S, M, T, C, I) \
CK_CC_INLINE static T \
ck_pr_load_##S(const M *target) \
ck_pr_md_load_##S(const M *target) \
{ \
long r = 0; \
__asm__ __volatile__(I " %0, [%1];" \
@ -143,7 +143,7 @@ ck_pr_load_64(const uint64_t *target)
#define CK_PR_STORE(S, M, T, C, I) \
CK_CC_INLINE static void \
ck_pr_store_##S(M *target, T v) \
ck_pr_md_store_##S(M *target, T v) \
{ \
__asm__ __volatile__(I " %1, [%0]" \
: \

@ -97,6 +97,11 @@
*/
#define CK_CC_ALIASED __attribute__((__may_alias__))
/*
* Compile-time typeof
*/
#define CK_CC_TYPEOF(X, DEFAULT) __typeof__(X)
/*
* Portability wrappers for bitwise ops.
*/

@ -57,14 +57,14 @@ ck_pr_barrier(void)
#define CK_PR_LOAD(S, M, T) \
CK_CC_INLINE static T \
ck_pr_load_##S(const M *target) \
ck_pr_md_load_##S(const M *target) \
{ \
T r; \
r = CK_PR_ACCESS(*(T *)target); \
return (r); \
} \
CK_CC_INLINE static void \
ck_pr_store_##S(M *target, T v) \
ck_pr_md_store_##S(M *target, T v) \
{ \
CK_PR_ACCESS(*(T *)target) = v; \
return; \

@ -84,7 +84,7 @@ CK_PR_FENCE(release, "lwsync")
#define CK_PR_LOAD(S, M, T, C, I) \
CK_CC_INLINE static T \
ck_pr_load_##S(const M *target) \
ck_pr_md_load_##S(const M *target) \
{ \
T r; \
__asm__ __volatile__(I "%U1%X1 %0, %1" \
@ -111,7 +111,7 @@ CK_PR_LOAD_S(char, char, "lbz")
#define CK_PR_STORE(S, M, T, C, I) \
CK_CC_INLINE static void \
ck_pr_store_##S(M *target, T v) \
ck_pr_md_store_##S(M *target, T v) \
{ \
__asm__ __volatile__(I "%U0%X0 %1, %0" \
: "=m" (*(C *)target) \

@ -87,7 +87,7 @@ CK_PR_FENCE(release, "lwsync")
#define CK_PR_LOAD(S, M, T, C, I) \
CK_CC_INLINE static T \
ck_pr_load_##S(const M *target) \
ck_pr_md_load_##S(const M *target) \
{ \
T r; \
__asm__ __volatile__(I "%U1%X1 %0, %1" \
@ -116,7 +116,7 @@ CK_PR_LOAD_S(double, double, "ld")
#define CK_PR_STORE(S, M, T, C, I) \
CK_CC_INLINE static void \
ck_pr_store_##S(M *target, T v) \
ck_pr_md_store_##S(M *target, T v) \
{ \
__asm__ __volatile__(I "%U0%X0 %1, %0" \
: "=m" (*(C *)target) \

@ -84,7 +84,7 @@ CK_PR_FENCE(release, "membar #LoadStore | #StoreStore")
#define CK_PR_LOAD(S, M, T, C, I) \
CK_CC_INLINE static T \
ck_pr_load_##S(const M *target) \
ck_pr_md_load_##S(const M *target) \
{ \
T r; \
__asm__ __volatile__(I " [%1], %0" \
@ -109,7 +109,7 @@ CK_PR_LOAD_S(int, int, "ldsw")
#define CK_PR_STORE(S, M, T, C, I) \
CK_CC_INLINE static void \
ck_pr_store_##S(M *target, T v) \
ck_pr_md_store_##S(M *target, T v) \
{ \
__asm__ __volatile__(I " %0, [%1]" \
: \

@ -116,7 +116,7 @@ CK_PR_FAS_S(8, uint8_t, "xchgb")
#define CK_PR_LOAD(S, M, T, C, I) \
CK_CC_INLINE static T \
ck_pr_load_##S(const M *target) \
ck_pr_md_load_##S(const M *target) \
{ \
T r; \
__asm__ __volatile__(I " %1, %0" \
@ -142,7 +142,7 @@ CK_PR_LOAD_S(8, uint8_t, "movb")
#define CK_PR_STORE(S, M, T, C, I) \
CK_CC_INLINE static void \
ck_pr_store_##S(M *target, T v) \
ck_pr_md_store_##S(M *target, T v) \
{ \
__asm__ __volatile__(I " %1, %0" \
: "=m" (*(C *)target) \

@ -147,7 +147,7 @@ CK_PR_FAS_S(8, uint8_t, "xchgb")
*/
#define CK_PR_LOAD(S, M, T, C, I) \
CK_CC_INLINE static T \
ck_pr_load_##S(const M *target) \
ck_pr_md_load_##S(const M *target) \
{ \
T r; \
__asm__ __volatile__(I " %1, %0" \
@ -195,7 +195,7 @@ ck_pr_load_ptr_2(const void *t, void *v)
#define CK_PR_LOAD_2(S, W, T) \
CK_CC_INLINE static void \
ck_pr_load_##S##_##W(const T t[2], T v[2]) \
ck_pr_md_load_##S##_##W(const T t[2], T v[2]) \
{ \
ck_pr_load_64_2((const uint64_t *)(const void *)t, \
(uint64_t *)(void *)v); \
@ -216,7 +216,7 @@ CK_PR_LOAD_2(8, 16, uint8_t)
*/
#define CK_PR_STORE_IMM(S, M, T, C, I, K) \
CK_CC_INLINE static void \
ck_pr_store_##S(M *target, T v) \
ck_pr_md_store_##S(M *target, T v) \
{ \
__asm__ __volatile__(I " %1, %0" \
: "=m" (*(C *)target) \
@ -227,7 +227,7 @@ CK_PR_LOAD_2(8, 16, uint8_t)
#define CK_PR_STORE(S, M, T, C, I) \
CK_CC_INLINE static void \
ck_pr_store_##S(M *target, T v) \
ck_pr_md_store_##S(M *target, T v) \
{ \
__asm__ __volatile__(I " %1, %0" \
: "=m" (*(C *)target) \

@ -584,8 +584,7 @@ ck_hs_gc(struct ck_hs *hs, unsigned long cycles, unsigned long seed)
if (first != NULL) {
const void *insert = ck_hs_marshal(hs->mode, entry, h);
ck_pr_store_ptr(first, insert);
ck_pr_store_ptr_unsafe(first, insert);
ck_hs_map_signal(map, h);
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
}
@ -642,11 +641,11 @@ ck_hs_fas(struct ck_hs *hs,
insert = ck_hs_marshal(hs->mode, key, h);
if (first != NULL) {
ck_pr_store_ptr(first, insert);
ck_pr_store_ptr_unsafe(first, insert);
ck_hs_map_signal(map, h);
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
} else {
ck_pr_store_ptr(slot, insert);
ck_pr_store_ptr_unsafe(slot, insert);
}
*previous = object;
@ -717,7 +716,7 @@ restart:
* This follows the same semantics as ck_hs_set, please refer to that
* function for documentation.
*/
ck_pr_store_ptr(first, insert);
ck_pr_store_ptr_unsafe(first, insert);
if (object != NULL) {
ck_hs_map_signal(map, h);
@ -728,7 +727,7 @@ restart:
* If we are storing into same slot, then atomic store is sufficient
* for replacement.
*/
ck_pr_store_ptr(slot, insert);
ck_pr_store_ptr_unsafe(slot, insert);
}
if (object == NULL)
@ -766,7 +765,7 @@ restart:
if (first != NULL) {
/* If an earlier bucket was found, then store entry there. */
ck_pr_store_ptr(first, insert);
ck_pr_store_ptr_unsafe(first, insert);
/*
* If a duplicate key was found, then delete it after
@ -784,7 +783,7 @@ restart:
* If we are storing into same slot, then atomic store is sufficient
* for replacement.
*/
ck_pr_store_ptr(slot, insert);
ck_pr_store_ptr_unsafe(slot, insert);
}
if (object == NULL)
@ -827,10 +826,10 @@ restart:
if (first != NULL) {
/* Insert key into first bucket in probe sequence. */
ck_pr_store_ptr(first, insert);
ck_pr_store_ptr_unsafe(first, insert);
} else {
/* An empty slot was found. */
ck_pr_store_ptr(slot, insert);
ck_pr_store_ptr_unsafe(slot, insert);
}
ck_hs_map_postinsert(hs, map);

@ -458,13 +458,13 @@ ck_ht_gc(struct ck_ht *ht, unsigned long cycles, unsigned long seed)
ck_pr_store_64(&priority->key_length, entry->key_length);
ck_pr_store_64(&priority->hash, entry->hash);
#endif
ck_pr_store_ptr(&priority->value, (void *)entry->value);
ck_pr_store_ptr_unsafe(&priority->value, (void *)entry->value);
ck_pr_fence_store();
ck_pr_store_ptr(&priority->key, (void *)entry->key);
ck_pr_store_ptr_unsafe(&priority->key, (void *)entry->key);
ck_pr_fence_store();
ck_pr_store_64(&map->deletions, map->deletions + 1);
ck_pr_fence_store();
ck_pr_store_ptr(&entry->key, (void *)CK_HT_KEY_TOMBSTONE);
ck_pr_store_ptr_unsafe(&entry->key, (void *)CK_HT_KEY_TOMBSTONE);
ck_pr_fence_store();
}
@ -643,7 +643,7 @@ ck_ht_reset_size_spmc(struct ck_ht *table, uint64_t size)
if (update == NULL)
return false;
ck_pr_store_ptr(&table->map, update);
ck_pr_store_ptr_unsafe(&table->map, update);
ck_ht_map_destroy(table->m, map, true);
return true;
}
@ -739,7 +739,7 @@ restart:
}
ck_pr_fence_store();
ck_pr_store_ptr(&table->map, update);
ck_pr_store_ptr_unsafe(&table->map, update);
ck_ht_map_destroy(table->m, map, true);
return true;
}
@ -770,7 +770,7 @@ ck_ht_remove_spmc(struct ck_ht *table,
*entry = snapshot;
ck_pr_store_ptr(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE);
ck_pr_store_ptr_unsafe(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE);
ck_pr_fence_store();
ck_pr_store_64(&map->n_entries, map->n_entries - 1);
return true;
@ -887,9 +887,9 @@ ck_ht_set_spmc(struct ck_ht *table,
ck_pr_fence_store();
}
ck_pr_store_ptr(&priority->value, (void *)entry->value);
ck_pr_store_ptr_unsafe(&priority->value, (void *)entry->value);
ck_pr_fence_store();
ck_pr_store_ptr(&priority->key, (void *)entry->key);
ck_pr_store_ptr_unsafe(&priority->key, (void *)entry->key);
ck_pr_fence_store();
/*
@ -899,7 +899,7 @@ ck_ht_set_spmc(struct ck_ht *table,
ck_pr_store_64(&map->deletions, map->deletions + 1);
ck_pr_fence_store();
ck_pr_store_ptr(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE);
ck_pr_store_ptr_unsafe(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE);
ck_pr_fence_store();
} else {
/*
@ -922,15 +922,15 @@ ck_ht_set_spmc(struct ck_ht *table,
}
#ifdef CK_HT_PP
ck_pr_store_ptr(&candidate->value, (void *)entry->value);
ck_pr_store_ptr_unsafe(&candidate->value, (void *)entry->value);
ck_pr_fence_store();
ck_pr_store_ptr(&candidate->key, (void *)entry->key);
ck_pr_store_ptr_unsafe(&candidate->key, (void *)entry->key);
#else
ck_pr_store_64(&candidate->key_length, entry->key_length);
ck_pr_store_64(&candidate->hash, entry->hash);
ck_pr_store_ptr(&candidate->value, (void *)entry->value);
ck_pr_store_ptr_unsafe(&candidate->value, (void *)entry->value);
ck_pr_fence_store();
ck_pr_store_ptr(&candidate->key, (void *)entry->key);
ck_pr_store_ptr_unsafe(&candidate->key, (void *)entry->key);
#endif
/*
@ -1008,15 +1008,15 @@ ck_ht_put_spmc(struct ck_ht *table,
ck_ht_map_bound_set(map, h, probes);
#ifdef CK_HT_PP
ck_pr_store_ptr(&candidate->value, (void *)entry->value);
ck_pr_store_ptr_unsafe(&candidate->value, (void *)entry->value);
ck_pr_fence_store();
ck_pr_store_ptr(&candidate->key, (void *)entry->key);
ck_pr_store_ptr_unsafe(&candidate->key, (void *)entry->key);
#else
ck_pr_store_64(&candidate->key_length, entry->key_length);
ck_pr_store_64(&candidate->hash, entry->hash);
ck_pr_store_ptr(&candidate->value, (void *)entry->value);
ck_pr_store_ptr_unsafe(&candidate->value, (void *)entry->value);
ck_pr_fence_store();
ck_pr_store_ptr(&candidate->key, (void *)entry->key);
ck_pr_store_ptr_unsafe(&candidate->key, (void *)entry->key);
#endif
ck_pr_store_64(&map->n_entries, map->n_entries + 1);

@ -1077,14 +1077,14 @@ restart:
goto restart;
else if (CK_CC_UNLIKELY(ret != 0))
return false;
ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert);
ck_pr_store_ptr_unsafe(ck_rhs_entry_addr(map, first), insert);
ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
ck_pr_fence_atomic_store();
desc2->probes = n_probes;
ck_rhs_add_wanted(hs, first, -1, h);
ck_rhs_do_backward_shift_delete(hs, slot);
} else {
ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
ck_pr_store_ptr_unsafe(ck_rhs_entry_addr(map, slot), insert);
ck_rhs_set_probes(map, slot, n_probes);
}
*previous = object;
@ -1173,7 +1173,7 @@ restart:
if (CK_CC_UNLIKELY(ret == -1))
return false;
/* If an earlier bucket was found, then store entry there. */
ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert);
ck_pr_store_ptr_unsafe(ck_rhs_entry_addr(map, first), insert);
desc2->probes = n_probes;
/*
* If a duplicate key was found, then delete it after
@ -1193,7 +1193,7 @@ restart:
* If we are storing into same slot, then atomic store is sufficient
* for replacement.
*/
ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
ck_pr_store_ptr_unsafe(ck_rhs_entry_addr(map, slot), insert);
ck_rhs_set_probes(map, slot, n_probes);
if (object == NULL)
ck_rhs_add_wanted(hs, slot, -1, h);
@ -1250,7 +1250,7 @@ restart:
if (CK_CC_UNLIKELY(ret == -1))
return false;
/* If an earlier bucket was found, then store entry there. */
ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert);
ck_pr_store_ptr_unsafe(ck_rhs_entry_addr(map, first), insert);
desc2->probes = n_probes;
/*
* If a duplicate key was found, then delete it after
@ -1271,7 +1271,7 @@ restart:
* If we are storing into same slot, then atomic store is sufficient
* for replacement.
*/
ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
ck_pr_store_ptr_unsafe(ck_rhs_entry_addr(map, slot), insert);
ck_rhs_set_probes(map, slot, n_probes);
if (object == NULL)
ck_rhs_add_wanted(hs, slot, -1, h);
@ -1327,12 +1327,12 @@ restart:
else if (CK_CC_UNLIKELY(ret == -1))
return false;
/* Insert key into first bucket in probe sequence. */
ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert);
ck_pr_store_ptr_unsafe(ck_rhs_entry_addr(map, first), insert);
desc->probes = n_probes;
ck_rhs_add_wanted(hs, first, -1, h);
} else {
/* An empty slot was found. */
ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
ck_pr_store_ptr_unsafe(ck_rhs_entry_addr(map, slot), insert);
ck_rhs_set_probes(map, slot, n_probes);
ck_rhs_add_wanted(hs, slot, -1, h);
}

Loading…
Cancel
Save