From e693cb9afeb2e64822d7a27f85d382d3bd05eca8 Mon Sep 17 00:00:00 2001 From: Samy Al Bahra Date: Sun, 28 Jun 2015 15:11:17 -0400 Subject: [PATCH] ck_*lock: Migrate all locking primitives to lock / unlock. This only affects RMO. This adds stricter semantics for critical section serialization. In addition to this, asymmetric synchronization primitives will now provide load ordering with respect to readers. This also modifies locked operations to have acquire semantics (they're there for elision predicates, and this doesn't impact them in any way). There are several performance improvements included in this as well (redundant fence was removed from days of wanting to support Alpha). --- include/ck_brlock.h | 12 ++++++------ include/ck_bytelock.h | 9 +++++---- include/ck_pflock.h | 8 ++++---- include/ck_rwlock.h | 27 ++++++++++++++----------- include/ck_swlock.h | 39 +++++++++++++++++++------------------ include/ck_tflock.h | 17 +++++++++------- include/spinlock/anderson.h | 17 +++++++++------- include/spinlock/cas.h | 15 +++++++------- include/spinlock/clh.h | 20 ++++++++++++------- include/spinlock/dec.h | 25 ++++++++++++------------ include/spinlock/fas.h | 15 +++++++------- include/spinlock/hclh.h | 11 ++++++----- include/spinlock/mcs.h | 20 +++++++++---------- include/spinlock/ticket.h | 26 ++++++++++++------------- 14 files changed, 141 insertions(+), 120 deletions(-) diff --git a/include/ck_brlock.h b/include/ck_brlock.h index e2f910a..2a5200c 100644 --- a/include/ck_brlock.h +++ b/include/ck_brlock.h @@ -91,7 +91,7 @@ ck_brlock_write_lock(struct ck_brlock *br) ck_pr_stall(); } - /* Already acquired with respect to other writers. */ + ck_pr_fence_lock(); return; } @@ -99,7 +99,7 @@ CK_CC_INLINE static void ck_brlock_write_unlock(struct ck_brlock *br) { - ck_pr_fence_release(); + ck_pr_fence_unlock(); ck_pr_store_uint(&br->writer, false); return; } @@ -134,7 +134,7 @@ ck_brlock_write_trylock(struct ck_brlock *br, unsigned int factor) } } - /* Already acquired with respect to other writers. */ + ck_pr_fence_lock(); return true; } @@ -212,7 +212,7 @@ ck_brlock_read_lock(struct ck_brlock *br, struct ck_brlock_reader *reader) ck_pr_store_uint(&reader->n_readers, 0); } - ck_pr_fence_load(); + ck_pr_fence_lock(); return; } @@ -263,7 +263,7 @@ ck_brlock_read_trylock(struct ck_brlock *br, return false; } - ck_pr_fence_load(); + ck_pr_fence_lock(); return true; } @@ -271,7 +271,7 @@ CK_CC_INLINE static void ck_brlock_read_unlock(struct ck_brlock_reader *reader) { - ck_pr_fence_load_store(); + ck_pr_fence_unlock(); ck_pr_store_uint(&reader->n_readers, reader->n_readers - 1); return; } diff --git a/include/ck_bytelock.h b/include/ck_bytelock.h index 771990c..c72bbc1 100644 --- a/include/ck_bytelock.h +++ b/include/ck_bytelock.h @@ -107,6 +107,7 @@ ck_bytelock_write_lock(struct ck_bytelock *bytelock, unsigned int slot) while (ck_pr_load_uint(&bytelock->n_readers) != 0) ck_pr_stall(); + ck_pr_fence_lock(); return; } @@ -118,7 +119,7 @@ CK_CC_INLINE static void ck_bytelock_write_unlock(struct ck_bytelock *bytelock) { - ck_pr_fence_release(); + ck_pr_fence_unlock(); ck_pr_store_uint(&bytelock->owner, 0); return; } @@ -147,7 +148,7 @@ ck_bytelock_read_lock(struct ck_bytelock *bytelock, unsigned int slot) ck_pr_stall(); } - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -169,7 +170,7 @@ ck_bytelock_read_lock(struct ck_bytelock *bytelock, unsigned int slot) ck_pr_stall(); } - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -177,7 +178,7 @@ CK_CC_INLINE static void ck_bytelock_read_unlock(struct ck_bytelock *bytelock, unsigned int slot) { - ck_pr_fence_release(); + ck_pr_fence_unlock(); if (slot > sizeof bytelock->readers) ck_pr_dec_uint(&bytelock->n_readers); diff --git a/include/ck_pflock.h b/include/ck_pflock.h index 2809ad1..61b42bd 100644 --- a/include/ck_pflock.h +++ b/include/ck_pflock.h @@ -71,7 +71,7 @@ CK_CC_INLINE static void ck_pflock_write_unlock(ck_pflock_t *pf) { - ck_pr_fence_release(); + ck_pr_fence_unlock(); /* Migrate from write phase to read phase. */ ck_pr_and_32(&pf->rin, CK_PFLOCK_LSB); @@ -103,7 +103,7 @@ ck_pflock_write_lock(ck_pflock_t *pf) while (ck_pr_load_32(&pf->rout) != ticket) ck_pr_stall(); - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -111,7 +111,7 @@ CK_CC_INLINE static void ck_pflock_read_unlock(ck_pflock_t *pf) { - ck_pr_fence_release(); + ck_pr_fence_unlock(); ck_pr_faa_32(&pf->rout, CK_PFLOCK_RINC); return; } @@ -135,7 +135,7 @@ ck_pflock_read_lock(ck_pflock_t *pf) leave: /* Acquire semantics with respect to readers. */ - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } diff --git a/include/ck_rwlock.h b/include/ck_rwlock.h index 4488117..ea05d03 100644 --- a/include/ck_rwlock.h +++ b/include/ck_rwlock.h @@ -54,7 +54,7 @@ CK_CC_INLINE static void ck_rwlock_write_unlock(ck_rwlock_t *rw) { - ck_pr_fence_release(); + ck_pr_fence_unlock(); ck_pr_store_uint(&rw->writer, 0); return; } @@ -62,9 +62,11 @@ ck_rwlock_write_unlock(ck_rwlock_t *rw) CK_CC_INLINE static bool ck_rwlock_locked_writer(ck_rwlock_t *rw) { + bool r; - ck_pr_fence_load(); - return ck_pr_load_uint(&rw->writer); + r = ck_pr_load_uint(&rw->writer); + ck_pr_fence_acquire(); + return r; } CK_CC_INLINE static void @@ -79,13 +81,12 @@ ck_rwlock_write_downgrade(ck_rwlock_t *rw) CK_CC_INLINE static bool ck_rwlock_locked(ck_rwlock_t *rw) { - unsigned int r; - - ck_pr_fence_load(); - r = ck_pr_load_uint(&rw->writer); - ck_pr_fence_load(); + bool l; - return ck_pr_load_uint(&rw->n_readers) | r; + l = ck_pr_load_uint(&rw->n_readers) | + ck_pr_load_uint(&rw->writer); + ck_pr_fence_acquire(); + return l; } CK_CC_INLINE static bool @@ -102,6 +103,7 @@ ck_rwlock_write_trylock(ck_rwlock_t *rw) return false; } + ck_pr_fence_lock(); return true; } @@ -120,6 +122,7 @@ ck_rwlock_write_lock(ck_rwlock_t *rw) while (ck_pr_load_uint(&rw->n_readers) != 0) ck_pr_stall(); + ck_pr_fence_lock(); return; } @@ -143,7 +146,7 @@ ck_rwlock_read_trylock(ck_rwlock_t *rw) ck_pr_fence_atomic_load(); if (ck_pr_load_uint(&rw->writer) == 0) { - ck_pr_fence_load(); + ck_pr_fence_lock(); return true; } @@ -230,6 +233,7 @@ ck_rwlock_recursive_write_lock(ck_rwlock_recursive_t *rw, unsigned int tid) while (ck_pr_load_uint(&rw->rw.n_readers) != 0) ck_pr_stall(); + ck_pr_fence_lock(); leave: rw->wc++; return; @@ -254,6 +258,7 @@ ck_rwlock_recursive_write_trylock(ck_rwlock_recursive_t *rw, unsigned int tid) return false; } + ck_pr_fence_lock(); leave: rw->wc++; return true; @@ -264,7 +269,7 @@ ck_rwlock_recursive_write_unlock(ck_rwlock_recursive_t *rw) { if (--rw->wc == 0) { - ck_pr_fence_release(); + ck_pr_fence_unlock(); ck_pr_store_uint(&rw->rw.writer, 0); } diff --git a/include/ck_swlock.h b/include/ck_swlock.h index c2c99e6..8f8c67c 100644 --- a/include/ck_swlock.h +++ b/include/ck_swlock.h @@ -58,7 +58,7 @@ CK_CC_INLINE static void ck_swlock_write_unlock(ck_swlock_t *rw) { - ck_pr_fence_release(); + ck_pr_fence_unlock(); ck_pr_and_32(&rw->value, CK_SWLOCK_READER_MASK); return; } @@ -66,9 +66,11 @@ ck_swlock_write_unlock(ck_swlock_t *rw) CK_CC_INLINE static bool ck_swlock_locked_writer(ck_swlock_t *rw) { + bool r; - ck_pr_fence_load(); - return ck_pr_load_32(&rw->value) & CK_SWLOCK_WRITER_BIT; + r = ck_pr_load_32(&rw->value) & CK_SWLOCK_WRITER_BIT; + ck_pr_fence_acquire(); + return r; } CK_CC_INLINE static void @@ -83,17 +85,21 @@ ck_swlock_write_downgrade(ck_swlock_t *rw) CK_CC_INLINE static bool ck_swlock_locked(ck_swlock_t *rw) { + bool r; - ck_pr_fence_load(); - return ck_pr_load_32(&rw->value); + r = ck_pr_load_32(&rw->value); + ck_pr_fence_acquire(); + return r; } CK_CC_INLINE static bool ck_swlock_write_trylock(ck_swlock_t *rw) { + bool r; - ck_pr_fence_acquire(); - return ck_pr_cas_32(&rw->value, 0, CK_SWLOCK_WRITER_BIT); + r = ck_pr_cas_32(&rw->value, 0, CK_SWLOCK_WRITER_BIT); + ck_pr_fence_lock(); + return r; } CK_ELIDE_TRYLOCK_PROTOTYPE(ck_swlock_write, ck_swlock_t, @@ -107,7 +113,7 @@ ck_swlock_write_lock(ck_swlock_t *rw) while (ck_pr_load_32(&rw->value) & CK_SWLOCK_READER_MASK) ck_pr_stall(); - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -126,7 +132,7 @@ ck_swlock_write_latch(ck_swlock_t *rw) } while (ck_pr_load_32(&rw->value) != CK_SWLOCK_WRITER_BIT); } - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -134,7 +140,7 @@ CK_CC_INLINE static void ck_swlock_write_unlatch(ck_swlock_t *rw) { - ck_pr_fence_release(); + ck_pr_fence_unlock(); ck_pr_store_32(&rw->value, 0); return; } @@ -155,15 +161,11 @@ ck_swlock_read_trylock(ck_swlock_t *rw) return false; l = ck_pr_faa_32(&rw->value, 1) & CK_SWLOCK_WRITER_MASK; - if (l == 0) { - ck_pr_fence_acquire(); - return true; - } - if (l == CK_SWLOCK_WRITER_BIT) ck_pr_dec_32(&rw->value); - return false; + ck_pr_fence_lock(); + return l == 0; } CK_CC_INLINE static void @@ -188,11 +190,10 @@ ck_swlock_read_lock(ck_swlock_t *rw) ck_pr_dec_32(&rw->value); } - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } - CK_CC_INLINE static bool ck_swlock_locked_reader(ck_swlock_t *rw) { @@ -205,7 +206,7 @@ CK_CC_INLINE static void ck_swlock_read_unlock(ck_swlock_t *rw) { - ck_pr_fence_release(); + ck_pr_fence_unlock(); ck_pr_dec_32(&rw->value); return; } diff --git a/include/ck_tflock.h b/include/ck_tflock.h index 7f15040..a1872ae 100644 --- a/include/ck_tflock.h +++ b/include/ck_tflock.h @@ -89,7 +89,7 @@ ck_tflock_ticket_write_lock(struct ck_tflock_ticket *lock) while (ck_pr_load_32(&lock->completion) != previous) ck_pr_stall(); - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -97,7 +97,7 @@ CK_CC_INLINE static void ck_tflock_ticket_write_unlock(struct ck_tflock_ticket *lock) { - ck_pr_fence_release(); + ck_pr_fence_unlock(); ck_tflock_ticket_fca_32(&lock->completion, CK_TFLOCK_TICKET_WC_TOPMSK, CK_TFLOCK_TICKET_WC_INCR); return; @@ -108,15 +108,18 @@ ck_tflock_ticket_read_lock(struct ck_tflock_ticket *lock) { uint32_t previous; - previous = ck_tflock_ticket_fca_32(&lock->request, CK_TFLOCK_TICKET_RC_TOPMSK, - CK_TFLOCK_TICKET_RC_INCR) & CK_TFLOCK_TICKET_W_MASK; + previous = ck_tflock_ticket_fca_32(&lock->request, + CK_TFLOCK_TICKET_RC_TOPMSK, CK_TFLOCK_TICKET_RC_INCR) & + CK_TFLOCK_TICKET_W_MASK; ck_pr_fence_atomic_load(); - while ((ck_pr_load_uint(&lock->completion) & CK_TFLOCK_TICKET_W_MASK) != previous) + while ((ck_pr_load_32(&lock->completion) & + CK_TFLOCK_TICKET_W_MASK) != previous) { ck_pr_stall(); + } - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -124,7 +127,7 @@ CK_CC_INLINE static void ck_tflock_ticket_read_unlock(struct ck_tflock_ticket *lock) { - ck_pr_fence_release(); + ck_pr_fence_unlock(); ck_tflock_ticket_fca_32(&lock->completion, CK_TFLOCK_TICKET_RC_TOPMSK, CK_TFLOCK_TICKET_RC_INCR); return; diff --git a/include/spinlock/anderson.h b/include/spinlock/anderson.h index e550583..49f349f 100644 --- a/include/spinlock/anderson.h +++ b/include/spinlock/anderson.h @@ -91,12 +91,12 @@ CK_CC_INLINE static bool ck_spinlock_anderson_locked(struct ck_spinlock_anderson *lock) { unsigned int position; + bool r; - ck_pr_fence_load(); position = ck_pr_load_uint(&lock->next) & lock->mask; - ck_pr_fence_load(); - - return ck_pr_load_uint(&lock->slots[position].locked); + r = ck_pr_load_uint(&lock->slots[position].locked); + ck_pr_fence_acquire(); + return r; } CK_CC_INLINE static void @@ -131,13 +131,16 @@ ck_spinlock_anderson_lock(struct ck_spinlock_anderson *lock, /* Serialize with respect to previous thread's store. */ ck_pr_fence_load(); - /* Spin until slot is marked as unlocked. First slot is initialized to false. */ + /* + * Spin until slot is marked as unlocked. First slot is initialized to + * false. + */ while (ck_pr_load_uint(&lock->slots[position].locked) == true) ck_pr_stall(); /* Prepare slot for potential re-use by another thread. */ ck_pr_store_uint(&lock->slots[position].locked, true); - ck_pr_fence_acquire(); + ck_pr_fence_lock(); *slot = lock->slots + position; return; @@ -149,7 +152,7 @@ ck_spinlock_anderson_unlock(struct ck_spinlock_anderson *lock, { unsigned int position; - ck_pr_fence_release(); + ck_pr_fence_unlock(); /* Mark next slot as available. */ if (lock->wrap == 0) diff --git a/include/spinlock/cas.h b/include/spinlock/cas.h index 25ef896..8d07f6e 100644 --- a/include/spinlock/cas.h +++ b/include/spinlock/cas.h @@ -60,18 +60,17 @@ ck_spinlock_cas_trylock(struct ck_spinlock_cas *lock) unsigned int value; value = ck_pr_fas_uint(&lock->value, true); - if (value == false) - ck_pr_fence_acquire(); - + ck_pr_fence_lock(); return !value; } CK_CC_INLINE static bool ck_spinlock_cas_locked(struct ck_spinlock_cas *lock) { + bool r = ck_pr_load_uint(&lock->value); - ck_pr_fence_load(); - return ck_pr_load_uint(&lock->value); + ck_pr_fence_acquire(); + return r; } CK_CC_INLINE static void @@ -83,7 +82,7 @@ ck_spinlock_cas_lock(struct ck_spinlock_cas *lock) ck_pr_stall(); } - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -95,7 +94,7 @@ ck_spinlock_cas_lock_eb(struct ck_spinlock_cas *lock) while (ck_pr_cas_uint(&lock->value, false, true) == false) ck_backoff_eb(&backoff); - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -104,7 +103,7 @@ ck_spinlock_cas_unlock(struct ck_spinlock_cas *lock) { /* Set lock state to unlocked. */ - ck_pr_fence_release(); + ck_pr_fence_unlock(); ck_pr_store_uint(&lock->value, false); return; } diff --git a/include/spinlock/clh.h b/include/spinlock/clh.h index f6950f0..1a3067c 100644 --- a/include/spinlock/clh.h +++ b/include/spinlock/clh.h @@ -57,11 +57,12 @@ CK_CC_INLINE static bool ck_spinlock_clh_locked(struct ck_spinlock_clh **queue) { struct ck_spinlock_clh *head; + bool r; - ck_pr_fence_load(); head = ck_pr_load_ptr(queue); - ck_pr_fence_load(); - return ck_pr_load_uint(&head->wait); + r = ck_pr_load_uint(&head->wait); + ck_pr_fence_acquire(); + return r; } CK_CC_INLINE static void @@ -73,7 +74,10 @@ ck_spinlock_clh_lock(struct ck_spinlock_clh **queue, struct ck_spinlock_clh *thr thread->wait = true; ck_pr_fence_store_atomic(); - /* Mark current request as last request. Save reference to previous request. */ + /* + * Mark current request as last request. Save reference to previous + * request. + */ previous = ck_pr_fas_ptr(queue, thread); thread->previous = previous; @@ -82,7 +86,7 @@ ck_spinlock_clh_lock(struct ck_spinlock_clh **queue, struct ck_spinlock_clh *thr while (ck_pr_load_uint(&previous->wait) == true) ck_pr_stall(); - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -100,8 +104,10 @@ ck_spinlock_clh_unlock(struct ck_spinlock_clh **thread) */ previous = thread[0]->previous; - /* We have to pay this cost anyways, use it as a compiler barrier too. */ - ck_pr_fence_release(); + /* + * We have to pay this cost anyways, use it as a compiler barrier too. + */ + ck_pr_fence_unlock(); ck_pr_store_uint(&(*thread)->wait, false); /* diff --git a/include/spinlock/dec.h b/include/spinlock/dec.h index bc9f263..1a5ed51 100644 --- a/include/spinlock/dec.h +++ b/include/spinlock/dec.h @@ -62,20 +62,18 @@ ck_spinlock_dec_trylock(struct ck_spinlock_dec *lock) unsigned int value; value = ck_pr_fas_uint(&lock->value, 0); - if (value == 1) { - ck_pr_fence_acquire(); - return true; - } - - return false; + ck_pr_fence_lock(); + return value == 1; } CK_CC_INLINE static bool ck_spinlock_dec_locked(struct ck_spinlock_dec *lock) { + bool r; - ck_pr_fence_load(); - return ck_pr_load_uint(&lock->value) != 1; + r = ck_pr_load_uint(&lock->value) != 1; + ck_pr_fence_acquire(); + return r; } CK_CC_INLINE static void @@ -98,7 +96,7 @@ ck_spinlock_dec_lock(struct ck_spinlock_dec *lock) ck_pr_stall(); } - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -116,7 +114,7 @@ ck_spinlock_dec_lock_eb(struct ck_spinlock_dec *lock) ck_backoff_eb(&backoff); } - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -124,9 +122,12 @@ CK_CC_INLINE static void ck_spinlock_dec_unlock(struct ck_spinlock_dec *lock) { - ck_pr_fence_release(); + ck_pr_fence_unlock(); - /* Unconditionally set lock value to 1 so someone can decrement lock to 0. */ + /* + * Unconditionally set lock value to 1 so someone can decrement lock + * to 0. + */ ck_pr_store_uint(&lock->value, 1); return; } diff --git a/include/spinlock/fas.h b/include/spinlock/fas.h index 4841a7f..184cdef 100644 --- a/include/spinlock/fas.h +++ b/include/spinlock/fas.h @@ -58,8 +58,7 @@ ck_spinlock_fas_trylock(struct ck_spinlock_fas *lock) bool value; value = ck_pr_fas_uint(&lock->value, true); - if (value == false) - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return !value; } @@ -67,9 +66,11 @@ ck_spinlock_fas_trylock(struct ck_spinlock_fas *lock) CK_CC_INLINE static bool ck_spinlock_fas_locked(struct ck_spinlock_fas *lock) { + bool r; - ck_pr_fence_load(); - return ck_pr_load_uint(&lock->value); + r = ck_pr_load_uint(&lock->value); + ck_pr_fence_acquire(); + return r; } CK_CC_INLINE static void @@ -81,7 +82,7 @@ ck_spinlock_fas_lock(struct ck_spinlock_fas *lock) ck_pr_stall(); } - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -93,7 +94,7 @@ ck_spinlock_fas_lock_eb(struct ck_spinlock_fas *lock) while (ck_pr_fas_uint(&lock->value, true) == true) ck_backoff_eb(&backoff); - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -101,7 +102,7 @@ CK_CC_INLINE static void ck_spinlock_fas_unlock(struct ck_spinlock_fas *lock) { - ck_pr_fence_release(); + ck_pr_fence_unlock(); ck_pr_store_uint(&lock->value, false); return; } diff --git a/include/spinlock/hclh.h b/include/spinlock/hclh.h index 3b61ec1..ad6ef16 100644 --- a/include/spinlock/hclh.h +++ b/include/spinlock/hclh.h @@ -62,11 +62,12 @@ CK_CC_INLINE static bool ck_spinlock_hclh_locked(struct ck_spinlock_hclh **queue) { struct ck_spinlock_hclh *head; + bool r; - ck_pr_fence_load(); head = ck_pr_load_ptr(queue); - ck_pr_fence_load(); - return ck_pr_load_uint(&head->wait); + r = ck_pr_load_uint(&head->wait); + ck_pr_fence_acquire(); + return r; } CK_CC_INLINE static void @@ -110,7 +111,7 @@ ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_queue, while (ck_pr_load_uint(&previous->wait) == true) ck_pr_stall(); - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -129,7 +130,7 @@ ck_spinlock_hclh_unlock(struct ck_spinlock_hclh **thread) previous = thread[0]->previous; /* We have to pay this cost anyways, use it as a compiler barrier too. */ - ck_pr_fence_release(); + ck_pr_fence_unlock(); ck_pr_store_uint(&(*thread)->wait, false); /* diff --git a/include/spinlock/mcs.h b/include/spinlock/mcs.h index c876710..bc92e6e 100644 --- a/include/spinlock/mcs.h +++ b/include/spinlock/mcs.h @@ -57,25 +57,25 @@ CK_CC_INLINE static bool ck_spinlock_mcs_trylock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *node) { + bool r; node->locked = true; node->next = NULL; ck_pr_fence_store_atomic(); - if (ck_pr_cas_ptr(queue, NULL, node) == true) { - ck_pr_fence_acquire(); - return true; - } - - return false; + r = ck_pr_cas_ptr(queue, NULL, node); + ck_pr_fence_lock(); + return r; } CK_CC_INLINE static bool ck_spinlock_mcs_locked(struct ck_spinlock_mcs **queue) { + bool r; - ck_pr_fence_load(); - return ck_pr_load_ptr(queue) != NULL; + r = ck_pr_load_ptr(queue) != NULL; + ck_pr_fence_acquire(); + return r; } CK_CC_INLINE static void @@ -108,7 +108,7 @@ ck_spinlock_mcs_lock(struct ck_spinlock_mcs **queue, ck_pr_stall(); } - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -118,7 +118,7 @@ ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, { struct ck_spinlock_mcs *next; - ck_pr_fence_release(); + ck_pr_fence_unlock(); next = ck_pr_load_ptr(&node->next); if (next == NULL) { diff --git a/include/spinlock/ticket.h b/include/spinlock/ticket.h index d0286d5..806006f 100644 --- a/include/spinlock/ticket.h +++ b/include/spinlock/ticket.h @@ -88,12 +88,11 @@ ck_spinlock_ticket_locked(struct ck_spinlock_ticket *ticket) { CK_SPINLOCK_TICKET_TYPE request, position; - ck_pr_fence_load(); - request = CK_SPINLOCK_TICKET_LOAD(&ticket->value); position = request & CK_SPINLOCK_TICKET_MASK; request >>= CK_SPINLOCK_TICKET_SHIFT; + ck_pr_fence_acquire(); return request != position; } @@ -115,7 +114,7 @@ ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket) CK_SPINLOCK_TICKET_MASK; } - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -142,7 +141,7 @@ ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c) ck_backoff_eb(&backoff); } - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -163,7 +162,7 @@ ck_spinlock_ticket_trylock(struct ck_spinlock_ticket *ticket) return false; } - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return true; } @@ -171,7 +170,7 @@ CK_CC_INLINE static void ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket) { - ck_pr_fence_release(); + ck_pr_fence_unlock(); CK_SPINLOCK_TICKET_INC((CK_SPINLOCK_TICKET_TYPE_BASE *)(void *)&ticket->value); return; } @@ -212,11 +211,12 @@ CK_CC_INLINE static bool ck_spinlock_ticket_locked(struct ck_spinlock_ticket *ticket) { unsigned int request; + bool r; - ck_pr_fence_load(); - request = ck_pr_load_uint(&ticket->next); - ck_pr_fence_load(); - return ck_pr_load_uint(&ticket->position) != request; + r = ck_pr_load_uint(&ticket->position) != + ck_pr_load_uint(&ticket->next); + ck_pr_fence_acquire(); + return r; } CK_CC_INLINE static void @@ -235,7 +235,7 @@ ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket) while (ck_pr_load_uint(&ticket->position) != request) ck_pr_stall(); - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -264,7 +264,7 @@ ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c) ck_backoff_eb(&backoff); } - ck_pr_fence_acquire(); + ck_pr_fence_lock(); return; } @@ -273,7 +273,7 @@ ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket) { unsigned int update; - ck_pr_fence_release(); + ck_pr_fence_unlock(); /* * Update current ticket value so next lock request can proceed.