spinlock: Migrate to acquire / release interface.

ck_pring
Samy Al Bahra 12 years ago
parent 661f3948ed
commit e57bfd5865

@ -137,7 +137,7 @@ ck_spinlock_anderson_lock(struct ck_spinlock_anderson *lock,
/* Prepare slot for potential re-use by another thread. */ /* Prepare slot for potential re-use by another thread. */
ck_pr_store_uint(&lock->slots[position].locked, true); ck_pr_store_uint(&lock->slots[position].locked, true);
ck_pr_fence_memory(); ck_pr_fence_acquire();
*slot = lock->slots + position; *slot = lock->slots + position;
return; return;
@ -149,7 +149,7 @@ ck_spinlock_anderson_unlock(struct ck_spinlock_anderson *lock,
{ {
unsigned int position; unsigned int position;
ck_pr_fence_memory(); ck_pr_fence_release();
/* Mark next slot as available. */ /* Mark next slot as available. */
if (lock->wrap == 0) if (lock->wrap == 0)

@ -60,7 +60,7 @@ ck_spinlock_cas_trylock(struct ck_spinlock_cas *lock)
value = ck_pr_fas_uint(&lock->value, true); value = ck_pr_fas_uint(&lock->value, true);
if (value == false) if (value == false)
ck_pr_fence_memory(); ck_pr_fence_acquire();
return !value; return !value;
} }
@ -82,7 +82,7 @@ ck_spinlock_cas_lock(struct ck_spinlock_cas *lock)
ck_pr_stall(); ck_pr_stall();
} }
ck_pr_fence_memory(); ck_pr_fence_acquire();
return; return;
} }
@ -94,7 +94,7 @@ ck_spinlock_cas_lock_eb(struct ck_spinlock_cas *lock)
while (ck_pr_cas_uint(&lock->value, false, true) == false) while (ck_pr_cas_uint(&lock->value, false, true) == false)
ck_backoff_eb(&backoff); ck_backoff_eb(&backoff);
ck_pr_fence_memory(); ck_pr_fence_acquire();
return; return;
} }
@ -103,7 +103,7 @@ ck_spinlock_cas_unlock(struct ck_spinlock_cas *lock)
{ {
/* Set lock state to unlocked. */ /* Set lock state to unlocked. */
ck_pr_fence_memory(); ck_pr_fence_release();
ck_pr_store_uint(&lock->value, false); ck_pr_store_uint(&lock->value, false);
return; return;
} }

@ -82,6 +82,7 @@ ck_spinlock_clh_lock(struct ck_spinlock_clh **queue, struct ck_spinlock_clh *thr
while (ck_pr_load_uint(&previous->wait) == true) while (ck_pr_load_uint(&previous->wait) == true)
ck_pr_stall(); ck_pr_stall();
ck_pr_fence_load();
return; return;
} }
@ -100,7 +101,7 @@ ck_spinlock_clh_unlock(struct ck_spinlock_clh **thread)
previous = thread[0]->previous; previous = thread[0]->previous;
/* We have to pay this cost anyways, use it as a compiler barrier too. */ /* We have to pay this cost anyways, use it as a compiler barrier too. */
ck_pr_fence_memory(); ck_pr_fence_release();
ck_pr_store_uint(&(*thread)->wait, false); ck_pr_store_uint(&(*thread)->wait, false);
/* /*

@ -62,7 +62,7 @@ ck_spinlock_dec_trylock(struct ck_spinlock_dec *lock)
value = ck_pr_fas_uint(&lock->value, 0); value = ck_pr_fas_uint(&lock->value, 0);
if (value == 1) { if (value == 1) {
ck_pr_fence_memory(); ck_pr_fence_acquire();
return true; return true;
} }
@ -89,7 +89,6 @@ ck_spinlock_dec_lock(struct ck_spinlock_dec *lock)
* UINT_MAX lock requests can happen while the lock is held. * UINT_MAX lock requests can happen while the lock is held.
*/ */
ck_pr_dec_uint_zero(&lock->value, &r); ck_pr_dec_uint_zero(&lock->value, &r);
ck_pr_fence_memory();
if (r == true) if (r == true)
break; break;
@ -98,6 +97,7 @@ ck_spinlock_dec_lock(struct ck_spinlock_dec *lock)
ck_pr_stall(); ck_pr_stall();
} }
ck_pr_fence_acquire();
return; return;
} }
@ -115,7 +115,7 @@ ck_spinlock_dec_lock_eb(struct ck_spinlock_dec *lock)
ck_backoff_eb(&backoff); ck_backoff_eb(&backoff);
} }
ck_pr_fence_memory(); ck_pr_fence_acquire();
return; return;
} }
@ -123,7 +123,7 @@ CK_CC_INLINE static void
ck_spinlock_dec_unlock(struct ck_spinlock_dec *lock) ck_spinlock_dec_unlock(struct ck_spinlock_dec *lock)
{ {
ck_pr_fence_memory(); ck_pr_fence_release();
/* Unconditionally set lock value to 1 so someone can decrement lock to 0. */ /* Unconditionally set lock value to 1 so someone can decrement lock to 0. */
ck_pr_store_uint(&lock->value, 1); ck_pr_store_uint(&lock->value, 1);

@ -58,7 +58,7 @@ ck_spinlock_fas_trylock(struct ck_spinlock_fas *lock)
value = ck_pr_fas_uint(&lock->value, true); value = ck_pr_fas_uint(&lock->value, true);
if (value == false) if (value == false)
ck_pr_fence_memory(); ck_pr_fence_acquire();
return !value; return !value;
} }
@ -80,7 +80,7 @@ ck_spinlock_fas_lock(struct ck_spinlock_fas *lock)
ck_pr_stall(); ck_pr_stall();
} }
ck_pr_fence_memory(); ck_pr_fence_acquire();
return; return;
} }
@ -92,7 +92,7 @@ ck_spinlock_fas_lock_eb(struct ck_spinlock_fas *lock)
while (ck_pr_fas_uint(&lock->value, true) == true) while (ck_pr_fas_uint(&lock->value, true) == true)
ck_backoff_eb(&backoff); ck_backoff_eb(&backoff);
ck_pr_fence_memory(); ck_pr_fence_acquire();
return; return;
} }
@ -100,7 +100,7 @@ CK_CC_INLINE static void
ck_spinlock_fas_unlock(struct ck_spinlock_fas *lock) ck_spinlock_fas_unlock(struct ck_spinlock_fas *lock)
{ {
ck_pr_fence_memory(); ck_pr_fence_release();
ck_pr_store_uint(&lock->value, false); ck_pr_store_uint(&lock->value, false);
return; return;
} }

@ -110,6 +110,7 @@ ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_queue,
while (ck_pr_load_uint(&previous->wait) == true) while (ck_pr_load_uint(&previous->wait) == true)
ck_pr_stall(); ck_pr_stall();
ck_pr_fence_load();
return; return;
} }
@ -128,7 +129,7 @@ ck_spinlock_hclh_unlock(struct ck_spinlock_hclh **thread)
previous = thread[0]->previous; previous = thread[0]->previous;
/* We have to pay this cost anyways, use it as a compiler barrier too. */ /* We have to pay this cost anyways, use it as a compiler barrier too. */
ck_pr_fence_memory(); ck_pr_fence_release();
ck_pr_store_uint(&(*thread)->wait, false); ck_pr_store_uint(&(*thread)->wait, false);
/* /*

@ -61,7 +61,7 @@ ck_spinlock_mcs_trylock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *
ck_pr_fence_store_atomic(); ck_pr_fence_store_atomic();
if (ck_pr_cas_ptr(queue, NULL, node) == true) { if (ck_pr_cas_ptr(queue, NULL, node) == true) {
ck_pr_fence_load(); ck_pr_fence_acquire();
return true; return true;
} }
@ -111,7 +111,7 @@ ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *n
{ {
struct ck_spinlock_mcs *next; struct ck_spinlock_mcs *next;
ck_pr_fence_memory(); ck_pr_fence_release();
next = ck_pr_load_ptr(&node->next); next = ck_pr_load_ptr(&node->next);
if (next == NULL) { if (next == NULL) {

@ -115,7 +115,7 @@ ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket)
CK_SPINLOCK_TICKET_MASK; CK_SPINLOCK_TICKET_MASK;
} }
ck_pr_fence_memory(); ck_pr_fence_acquire();
return; return;
} }
@ -142,7 +142,7 @@ ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c)
ck_backoff_eb(&backoff); ck_backoff_eb(&backoff);
} }
ck_pr_fence_memory(); ck_pr_fence_acquire();
return; return;
} }
@ -163,7 +163,7 @@ ck_spinlock_ticket_trylock(struct ck_spinlock_ticket *ticket)
return false; return false;
} }
ck_pr_fence_memory(); ck_pr_fence_acquire();
return true; return true;
} }
@ -171,7 +171,7 @@ CK_CC_INLINE static void
ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket) ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket)
{ {
ck_pr_fence_memory(); ck_pr_fence_release();
CK_SPINLOCK_TICKET_INC((CK_SPINLOCK_TICKET_TYPE_BASE *)(void *)&ticket->value); CK_SPINLOCK_TICKET_INC((CK_SPINLOCK_TICKET_TYPE_BASE *)(void *)&ticket->value);
return; return;
} }
@ -235,7 +235,7 @@ ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket)
while (ck_pr_load_uint(&ticket->position) != request) while (ck_pr_load_uint(&ticket->position) != request)
ck_pr_stall(); ck_pr_stall();
ck_pr_fence_memory(); ck_pr_fence_acquire();
return; return;
} }
@ -264,7 +264,7 @@ ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c)
ck_backoff_eb(&backoff); ck_backoff_eb(&backoff);
} }
ck_pr_fence_memory(); ck_pr_fence_acquire();
return; return;
} }
@ -273,7 +273,7 @@ ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket)
{ {
unsigned int update; unsigned int update;
ck_pr_fence_memory(); ck_pr_fence_release();
/* /*
* Update current ticket value so next lock request can proceed. * Update current ticket value so next lock request can proceed.

Loading…
Cancel
Save