From 2ba3f5937447133391199b3ff1bbc0cffde001c0 Mon Sep 17 00:00:00 2001 From: Samy Al Bahra Date: Mon, 13 May 2013 16:24:22 -0400 Subject: [PATCH] ck_spinlock: Migrate MCS to ck_pr_fence_X_Y. This includes fixing acquire semantics on mcs_lock fast path. This represents an additional fence on the fast path for acquire semantics post-acquisition. --- include/ck_spinlock.h | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/include/ck_spinlock.h b/include/ck_spinlock.h index 6900c09..323de5c 100644 --- a/include/ck_spinlock.h +++ b/include/ck_spinlock.h @@ -658,9 +658,9 @@ CK_CC_INLINE static bool ck_spinlock_mcs_trylock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *node) { - ck_pr_store_uint(&node->locked, true); - ck_pr_store_ptr(&node->next, NULL); - ck_pr_fence_store(); + node->locked = true; + node->next = NULL; + ck_pr_fence_store_atomic(); if (ck_pr_cas_ptr(queue, NULL, node) == true) { ck_pr_fence_load(); @@ -686,24 +686,24 @@ ck_spinlock_mcs_lock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *nod * In the case that there is a successor, let them know they must wait * for us to unlock. */ - ck_pr_store_uint(&node->locked, true); - ck_pr_store_ptr(&node->next, NULL); + node->locked = true; + node->next = NULL; + ck_pr_fence_store_atomic(); /* * Swap current tail with current lock request. If the swap operation * returns NULL, it means the queue was empty. If the queue was empty, * then the operation is complete. */ - ck_pr_fence_memory(); previous = ck_pr_fas_ptr(queue, node); - if (previous == NULL) - return; - - /* Let the previous lock holder know that we are waiting on them. */ - ck_pr_store_ptr(&previous->next, node); - while (ck_pr_load_uint(&node->locked) == true) - ck_pr_stall(); + if (previous != NULL) { + /* Let the previous lock holder know that we are waiting on them. */ + ck_pr_store_ptr(&previous->next, node); + while (ck_pr_load_uint(&node->locked) == true) + ck_pr_stall(); + } + ck_pr_fence_load(); return; } @@ -712,6 +712,8 @@ ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *n { struct ck_spinlock_mcs *next; + ck_pr_fence_memory(); + next = ck_pr_load_ptr(&node->next); if (next == NULL) { /* @@ -721,7 +723,6 @@ ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *n */ if (ck_pr_load_ptr(queue) == node && ck_pr_cas_ptr(queue, node, NULL) == true) { - ck_pr_fence_memory(); return; } @@ -740,9 +741,7 @@ ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *n } /* Allow the next lock operation to complete. */ - ck_pr_fence_memory(); ck_pr_store_uint(&next->locked, false); - return; } #endif /* CK_F_SPINLOCK_MCS */