spinlock: Migrate to acquire / release interface.

ck_pring
Samy Al Bahra 11 years ago
parent 661f3948ed
commit e57bfd5865

@ -137,7 +137,7 @@ ck_spinlock_anderson_lock(struct ck_spinlock_anderson *lock,
/* Prepare slot for potential re-use by another thread. */
ck_pr_store_uint(&lock->slots[position].locked, true);
ck_pr_fence_memory();
ck_pr_fence_acquire();
*slot = lock->slots + position;
return;
@ -149,7 +149,7 @@ ck_spinlock_anderson_unlock(struct ck_spinlock_anderson *lock,
{
unsigned int position;
ck_pr_fence_memory();
ck_pr_fence_release();
/* Mark next slot as available. */
if (lock->wrap == 0)

@ -60,7 +60,7 @@ ck_spinlock_cas_trylock(struct ck_spinlock_cas *lock)
value = ck_pr_fas_uint(&lock->value, true);
if (value == false)
ck_pr_fence_memory();
ck_pr_fence_acquire();
return !value;
}
@ -82,7 +82,7 @@ ck_spinlock_cas_lock(struct ck_spinlock_cas *lock)
ck_pr_stall();
}
ck_pr_fence_memory();
ck_pr_fence_acquire();
return;
}
@ -94,7 +94,7 @@ ck_spinlock_cas_lock_eb(struct ck_spinlock_cas *lock)
while (ck_pr_cas_uint(&lock->value, false, true) == false)
ck_backoff_eb(&backoff);
ck_pr_fence_memory();
ck_pr_fence_acquire();
return;
}
@ -103,7 +103,7 @@ ck_spinlock_cas_unlock(struct ck_spinlock_cas *lock)
{
/* Set lock state to unlocked. */
ck_pr_fence_memory();
ck_pr_fence_release();
ck_pr_store_uint(&lock->value, false);
return;
}

@ -82,6 +82,7 @@ ck_spinlock_clh_lock(struct ck_spinlock_clh **queue, struct ck_spinlock_clh *thr
while (ck_pr_load_uint(&previous->wait) == true)
ck_pr_stall();
ck_pr_fence_load();
return;
}
@ -100,7 +101,7 @@ ck_spinlock_clh_unlock(struct ck_spinlock_clh **thread)
previous = thread[0]->previous;
/* We have to pay this cost anyways, use it as a compiler barrier too. */
ck_pr_fence_memory();
ck_pr_fence_release();
ck_pr_store_uint(&(*thread)->wait, false);
/*

@ -62,7 +62,7 @@ ck_spinlock_dec_trylock(struct ck_spinlock_dec *lock)
value = ck_pr_fas_uint(&lock->value, 0);
if (value == 1) {
ck_pr_fence_memory();
ck_pr_fence_acquire();
return true;
}
@ -89,7 +89,6 @@ ck_spinlock_dec_lock(struct ck_spinlock_dec *lock)
* UINT_MAX lock requests can happen while the lock is held.
*/
ck_pr_dec_uint_zero(&lock->value, &r);
ck_pr_fence_memory();
if (r == true)
break;
@ -98,6 +97,7 @@ ck_spinlock_dec_lock(struct ck_spinlock_dec *lock)
ck_pr_stall();
}
ck_pr_fence_acquire();
return;
}
@ -115,7 +115,7 @@ ck_spinlock_dec_lock_eb(struct ck_spinlock_dec *lock)
ck_backoff_eb(&backoff);
}
ck_pr_fence_memory();
ck_pr_fence_acquire();
return;
}
@ -123,7 +123,7 @@ CK_CC_INLINE static void
ck_spinlock_dec_unlock(struct ck_spinlock_dec *lock)
{
ck_pr_fence_memory();
ck_pr_fence_release();
/* Unconditionally set lock value to 1 so someone can decrement lock to 0. */
ck_pr_store_uint(&lock->value, 1);

@ -58,7 +58,7 @@ ck_spinlock_fas_trylock(struct ck_spinlock_fas *lock)
value = ck_pr_fas_uint(&lock->value, true);
if (value == false)
ck_pr_fence_memory();
ck_pr_fence_acquire();
return !value;
}
@ -80,7 +80,7 @@ ck_spinlock_fas_lock(struct ck_spinlock_fas *lock)
ck_pr_stall();
}
ck_pr_fence_memory();
ck_pr_fence_acquire();
return;
}
@ -92,7 +92,7 @@ ck_spinlock_fas_lock_eb(struct ck_spinlock_fas *lock)
while (ck_pr_fas_uint(&lock->value, true) == true)
ck_backoff_eb(&backoff);
ck_pr_fence_memory();
ck_pr_fence_acquire();
return;
}
@ -100,7 +100,7 @@ CK_CC_INLINE static void
ck_spinlock_fas_unlock(struct ck_spinlock_fas *lock)
{
ck_pr_fence_memory();
ck_pr_fence_release();
ck_pr_store_uint(&lock->value, false);
return;
}

@ -110,6 +110,7 @@ ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_queue,
while (ck_pr_load_uint(&previous->wait) == true)
ck_pr_stall();
ck_pr_fence_load();
return;
}
@ -128,7 +129,7 @@ ck_spinlock_hclh_unlock(struct ck_spinlock_hclh **thread)
previous = thread[0]->previous;
/* We have to pay this cost anyways, use it as a compiler barrier too. */
ck_pr_fence_memory();
ck_pr_fence_release();
ck_pr_store_uint(&(*thread)->wait, false);
/*

@ -61,7 +61,7 @@ ck_spinlock_mcs_trylock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *
ck_pr_fence_store_atomic();
if (ck_pr_cas_ptr(queue, NULL, node) == true) {
ck_pr_fence_load();
ck_pr_fence_acquire();
return true;
}
@ -111,7 +111,7 @@ ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *n
{
struct ck_spinlock_mcs *next;
ck_pr_fence_memory();
ck_pr_fence_release();
next = ck_pr_load_ptr(&node->next);
if (next == NULL) {

@ -115,7 +115,7 @@ ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket)
CK_SPINLOCK_TICKET_MASK;
}
ck_pr_fence_memory();
ck_pr_fence_acquire();
return;
}
@ -142,7 +142,7 @@ ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c)
ck_backoff_eb(&backoff);
}
ck_pr_fence_memory();
ck_pr_fence_acquire();
return;
}
@ -163,7 +163,7 @@ ck_spinlock_ticket_trylock(struct ck_spinlock_ticket *ticket)
return false;
}
ck_pr_fence_memory();
ck_pr_fence_acquire();
return true;
}
@ -171,7 +171,7 @@ CK_CC_INLINE static void
ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket)
{
ck_pr_fence_memory();
ck_pr_fence_release();
CK_SPINLOCK_TICKET_INC((CK_SPINLOCK_TICKET_TYPE_BASE *)(void *)&ticket->value);
return;
}
@ -235,7 +235,7 @@ ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket)
while (ck_pr_load_uint(&ticket->position) != request)
ck_pr_stall();
ck_pr_fence_memory();
ck_pr_fence_acquire();
return;
}
@ -264,7 +264,7 @@ ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c)
ck_backoff_eb(&backoff);
}
ck_pr_fence_memory();
ck_pr_fence_acquire();
return;
}
@ -273,7 +273,7 @@ ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket)
{
unsigned int update;
ck_pr_fence_memory();
ck_pr_fence_release();
/*
* Update current ticket value so next lock request can proceed.

Loading…
Cancel
Save