ck_spinlock: Migrate MCS to ck_pr_fence_X_Y.

This includes fixing acquire semantics on mcs_lock fast path.
This represents an additional fence on the fast path for
acquire semantics post-acquisition.
ck_pring
Samy Al Bahra 12 years ago
parent 8540821f3f
commit 2ba3f59374

@ -658,9 +658,9 @@ CK_CC_INLINE static bool
ck_spinlock_mcs_trylock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *node)
{
ck_pr_store_uint(&node->locked, true);
ck_pr_store_ptr(&node->next, NULL);
ck_pr_fence_store();
node->locked = true;
node->next = NULL;
ck_pr_fence_store_atomic();
if (ck_pr_cas_ptr(queue, NULL, node) == true) {
ck_pr_fence_load();
@ -686,24 +686,24 @@ ck_spinlock_mcs_lock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *nod
* In the case that there is a successor, let them know they must wait
* for us to unlock.
*/
ck_pr_store_uint(&node->locked, true);
ck_pr_store_ptr(&node->next, NULL);
node->locked = true;
node->next = NULL;
ck_pr_fence_store_atomic();
/*
* Swap current tail with current lock request. If the swap operation
* returns NULL, it means the queue was empty. If the queue was empty,
* then the operation is complete.
*/
ck_pr_fence_memory();
previous = ck_pr_fas_ptr(queue, node);
if (previous == NULL)
return;
/* Let the previous lock holder know that we are waiting on them. */
ck_pr_store_ptr(&previous->next, node);
while (ck_pr_load_uint(&node->locked) == true)
ck_pr_stall();
if (previous != NULL) {
/* Let the previous lock holder know that we are waiting on them. */
ck_pr_store_ptr(&previous->next, node);
while (ck_pr_load_uint(&node->locked) == true)
ck_pr_stall();
}
ck_pr_fence_load();
return;
}
@ -712,6 +712,8 @@ ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *n
{
struct ck_spinlock_mcs *next;
ck_pr_fence_memory();
next = ck_pr_load_ptr(&node->next);
if (next == NULL) {
/*
@ -721,7 +723,6 @@ ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *n
*/
if (ck_pr_load_ptr(queue) == node &&
ck_pr_cas_ptr(queue, node, NULL) == true) {
ck_pr_fence_memory();
return;
}
@ -740,9 +741,7 @@ ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *n
}
/* Allow the next lock operation to complete. */
ck_pr_fence_memory();
ck_pr_store_uint(&next->locked, false);
return;
}
#endif /* CK_F_SPINLOCK_MCS */

Loading…
Cancel
Save