diff --git a/include/spinlock/anderson.h b/include/spinlock/anderson.h index ae77429..9555564 100644 --- a/include/spinlock/anderson.h +++ b/include/spinlock/anderson.h @@ -137,7 +137,7 @@ ck_spinlock_anderson_lock(struct ck_spinlock_anderson *lock, /* Prepare slot for potential re-use by another thread. */ ck_pr_store_uint(&lock->slots[position].locked, true); - ck_pr_fence_memory(); + ck_pr_fence_acquire(); *slot = lock->slots + position; return; @@ -149,7 +149,7 @@ ck_spinlock_anderson_unlock(struct ck_spinlock_anderson *lock, { unsigned int position; - ck_pr_fence_memory(); + ck_pr_fence_release(); /* Mark next slot as available. */ if (lock->wrap == 0) diff --git a/include/spinlock/cas.h b/include/spinlock/cas.h index db893af..b2442c5 100644 --- a/include/spinlock/cas.h +++ b/include/spinlock/cas.h @@ -60,7 +60,7 @@ ck_spinlock_cas_trylock(struct ck_spinlock_cas *lock) value = ck_pr_fas_uint(&lock->value, true); if (value == false) - ck_pr_fence_memory(); + ck_pr_fence_acquire(); return !value; } @@ -82,7 +82,7 @@ ck_spinlock_cas_lock(struct ck_spinlock_cas *lock) ck_pr_stall(); } - ck_pr_fence_memory(); + ck_pr_fence_acquire(); return; } @@ -94,7 +94,7 @@ ck_spinlock_cas_lock_eb(struct ck_spinlock_cas *lock) while (ck_pr_cas_uint(&lock->value, false, true) == false) ck_backoff_eb(&backoff); - ck_pr_fence_memory(); + ck_pr_fence_acquire(); return; } @@ -103,7 +103,7 @@ ck_spinlock_cas_unlock(struct ck_spinlock_cas *lock) { /* Set lock state to unlocked. */ - ck_pr_fence_memory(); + ck_pr_fence_release(); ck_pr_store_uint(&lock->value, false); return; } diff --git a/include/spinlock/clh.h b/include/spinlock/clh.h index b76e413..cec219c 100644 --- a/include/spinlock/clh.h +++ b/include/spinlock/clh.h @@ -82,6 +82,7 @@ ck_spinlock_clh_lock(struct ck_spinlock_clh **queue, struct ck_spinlock_clh *thr while (ck_pr_load_uint(&previous->wait) == true) ck_pr_stall(); + ck_pr_fence_load(); return; } @@ -100,7 +101,7 @@ ck_spinlock_clh_unlock(struct ck_spinlock_clh **thread) previous = thread[0]->previous; /* We have to pay this cost anyways, use it as a compiler barrier too. */ - ck_pr_fence_memory(); + ck_pr_fence_release(); ck_pr_store_uint(&(*thread)->wait, false); /* diff --git a/include/spinlock/dec.h b/include/spinlock/dec.h index 16a53d4..7874ae2 100644 --- a/include/spinlock/dec.h +++ b/include/spinlock/dec.h @@ -62,7 +62,7 @@ ck_spinlock_dec_trylock(struct ck_spinlock_dec *lock) value = ck_pr_fas_uint(&lock->value, 0); if (value == 1) { - ck_pr_fence_memory(); + ck_pr_fence_acquire(); return true; } @@ -89,7 +89,6 @@ ck_spinlock_dec_lock(struct ck_spinlock_dec *lock) * UINT_MAX lock requests can happen while the lock is held. */ ck_pr_dec_uint_zero(&lock->value, &r); - ck_pr_fence_memory(); if (r == true) break; @@ -98,6 +97,7 @@ ck_spinlock_dec_lock(struct ck_spinlock_dec *lock) ck_pr_stall(); } + ck_pr_fence_acquire(); return; } @@ -115,7 +115,7 @@ ck_spinlock_dec_lock_eb(struct ck_spinlock_dec *lock) ck_backoff_eb(&backoff); } - ck_pr_fence_memory(); + ck_pr_fence_acquire(); return; } @@ -123,7 +123,7 @@ CK_CC_INLINE static void ck_spinlock_dec_unlock(struct ck_spinlock_dec *lock) { - ck_pr_fence_memory(); + ck_pr_fence_release(); /* Unconditionally set lock value to 1 so someone can decrement lock to 0. */ ck_pr_store_uint(&lock->value, 1); diff --git a/include/spinlock/fas.h b/include/spinlock/fas.h index b44a6e4..6803616 100644 --- a/include/spinlock/fas.h +++ b/include/spinlock/fas.h @@ -58,7 +58,7 @@ ck_spinlock_fas_trylock(struct ck_spinlock_fas *lock) value = ck_pr_fas_uint(&lock->value, true); if (value == false) - ck_pr_fence_memory(); + ck_pr_fence_acquire(); return !value; } @@ -80,7 +80,7 @@ ck_spinlock_fas_lock(struct ck_spinlock_fas *lock) ck_pr_stall(); } - ck_pr_fence_memory(); + ck_pr_fence_acquire(); return; } @@ -92,7 +92,7 @@ ck_spinlock_fas_lock_eb(struct ck_spinlock_fas *lock) while (ck_pr_fas_uint(&lock->value, true) == true) ck_backoff_eb(&backoff); - ck_pr_fence_memory(); + ck_pr_fence_acquire(); return; } @@ -100,7 +100,7 @@ CK_CC_INLINE static void ck_spinlock_fas_unlock(struct ck_spinlock_fas *lock) { - ck_pr_fence_memory(); + ck_pr_fence_release(); ck_pr_store_uint(&lock->value, false); return; } diff --git a/include/spinlock/hclh.h b/include/spinlock/hclh.h index c82ccd0..8e56c10 100644 --- a/include/spinlock/hclh.h +++ b/include/spinlock/hclh.h @@ -110,6 +110,7 @@ ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_queue, while (ck_pr_load_uint(&previous->wait) == true) ck_pr_stall(); + ck_pr_fence_load(); return; } @@ -128,7 +129,7 @@ ck_spinlock_hclh_unlock(struct ck_spinlock_hclh **thread) previous = thread[0]->previous; /* We have to pay this cost anyways, use it as a compiler barrier too. */ - ck_pr_fence_memory(); + ck_pr_fence_release(); ck_pr_store_uint(&(*thread)->wait, false); /* diff --git a/include/spinlock/mcs.h b/include/spinlock/mcs.h index 4c8e759..5ca51ae 100644 --- a/include/spinlock/mcs.h +++ b/include/spinlock/mcs.h @@ -61,7 +61,7 @@ ck_spinlock_mcs_trylock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs * ck_pr_fence_store_atomic(); if (ck_pr_cas_ptr(queue, NULL, node) == true) { - ck_pr_fence_load(); + ck_pr_fence_acquire(); return true; } @@ -111,7 +111,7 @@ ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *n { struct ck_spinlock_mcs *next; - ck_pr_fence_memory(); + ck_pr_fence_release(); next = ck_pr_load_ptr(&node->next); if (next == NULL) { diff --git a/include/spinlock/ticket.h b/include/spinlock/ticket.h index 288f4a0..1402a50 100644 --- a/include/spinlock/ticket.h +++ b/include/spinlock/ticket.h @@ -115,7 +115,7 @@ ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket) CK_SPINLOCK_TICKET_MASK; } - ck_pr_fence_memory(); + ck_pr_fence_acquire(); return; } @@ -142,7 +142,7 @@ ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c) ck_backoff_eb(&backoff); } - ck_pr_fence_memory(); + ck_pr_fence_acquire(); return; } @@ -163,7 +163,7 @@ ck_spinlock_ticket_trylock(struct ck_spinlock_ticket *ticket) return false; } - ck_pr_fence_memory(); + ck_pr_fence_acquire(); return true; } @@ -171,7 +171,7 @@ CK_CC_INLINE static void ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket) { - ck_pr_fence_memory(); + ck_pr_fence_release(); CK_SPINLOCK_TICKET_INC((CK_SPINLOCK_TICKET_TYPE_BASE *)(void *)&ticket->value); return; } @@ -235,7 +235,7 @@ ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket) while (ck_pr_load_uint(&ticket->position) != request) ck_pr_stall(); - ck_pr_fence_memory(); + ck_pr_fence_acquire(); return; } @@ -264,7 +264,7 @@ ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c) ck_backoff_eb(&backoff); } - ck_pr_fence_memory(); + ck_pr_fence_acquire(); return; } @@ -273,7 +273,7 @@ ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket) { unsigned int update; - ck_pr_fence_memory(); + ck_pr_fence_release(); /* * Update current ticket value so next lock request can proceed.