ck_spinlock: Migrate MCS to ck_pr_fence_X_Y.

This includes fixing acquire semantics on mcs_lock fast path.
This represents an additional fence on the fast path for
acquire semantics post-acquisition.
ck_pring
Samy Al Bahra 12 years ago
parent 8540821f3f
commit 2ba3f59374

@ -658,9 +658,9 @@ CK_CC_INLINE static bool
ck_spinlock_mcs_trylock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *node) ck_spinlock_mcs_trylock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *node)
{ {
ck_pr_store_uint(&node->locked, true); node->locked = true;
ck_pr_store_ptr(&node->next, NULL); node->next = NULL;
ck_pr_fence_store(); ck_pr_fence_store_atomic();
if (ck_pr_cas_ptr(queue, NULL, node) == true) { if (ck_pr_cas_ptr(queue, NULL, node) == true) {
ck_pr_fence_load(); ck_pr_fence_load();
@ -686,24 +686,24 @@ ck_spinlock_mcs_lock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *nod
* In the case that there is a successor, let them know they must wait * In the case that there is a successor, let them know they must wait
* for us to unlock. * for us to unlock.
*/ */
ck_pr_store_uint(&node->locked, true); node->locked = true;
ck_pr_store_ptr(&node->next, NULL); node->next = NULL;
ck_pr_fence_store_atomic();
/* /*
* Swap current tail with current lock request. If the swap operation * Swap current tail with current lock request. If the swap operation
* returns NULL, it means the queue was empty. If the queue was empty, * returns NULL, it means the queue was empty. If the queue was empty,
* then the operation is complete. * then the operation is complete.
*/ */
ck_pr_fence_memory();
previous = ck_pr_fas_ptr(queue, node); previous = ck_pr_fas_ptr(queue, node);
if (previous == NULL) if (previous != NULL) {
return; /* Let the previous lock holder know that we are waiting on them. */
ck_pr_store_ptr(&previous->next, node);
/* Let the previous lock holder know that we are waiting on them. */ while (ck_pr_load_uint(&node->locked) == true)
ck_pr_store_ptr(&previous->next, node); ck_pr_stall();
while (ck_pr_load_uint(&node->locked) == true) }
ck_pr_stall();
ck_pr_fence_load();
return; return;
} }
@ -712,6 +712,8 @@ ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *n
{ {
struct ck_spinlock_mcs *next; struct ck_spinlock_mcs *next;
ck_pr_fence_memory();
next = ck_pr_load_ptr(&node->next); next = ck_pr_load_ptr(&node->next);
if (next == NULL) { if (next == NULL) {
/* /*
@ -721,7 +723,6 @@ ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *n
*/ */
if (ck_pr_load_ptr(queue) == node && if (ck_pr_load_ptr(queue) == node &&
ck_pr_cas_ptr(queue, node, NULL) == true) { ck_pr_cas_ptr(queue, node, NULL) == true) {
ck_pr_fence_memory();
return; return;
} }
@ -740,9 +741,7 @@ ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *n
} }
/* Allow the next lock operation to complete. */ /* Allow the next lock operation to complete. */
ck_pr_fence_memory();
ck_pr_store_uint(&next->locked, false); ck_pr_store_uint(&next->locked, false);
return; return;
} }
#endif /* CK_F_SPINLOCK_MCS */ #endif /* CK_F_SPINLOCK_MCS */

Loading…
Cancel
Save