|
|
|
@ -83,6 +83,8 @@ ck_brlock_write_lock(struct ck_brlock *br)
|
|
|
|
|
while (ck_pr_fas_uint(&br->writer, true) == true)
|
|
|
|
|
ck_pr_stall();
|
|
|
|
|
|
|
|
|
|
ck_pr_fence_memory();
|
|
|
|
|
|
|
|
|
|
/* The reader list is protected under the writer br. */
|
|
|
|
|
for (cursor = br->readers; cursor != NULL; cursor = cursor->next) {
|
|
|
|
|
while (ck_pr_load_uint(&cursor->n_readers) != 0)
|
|
|
|
@ -115,6 +117,12 @@ ck_brlock_write_trylock(struct ck_brlock *br, unsigned int factor)
|
|
|
|
|
ck_pr_stall();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We do not require a strict fence here as atomic RMW operations
|
|
|
|
|
* are serializing.
|
|
|
|
|
*/
|
|
|
|
|
ck_pr_fence_memory();
|
|
|
|
|
|
|
|
|
|
for (cursor = br->readers; cursor != NULL; cursor = cursor->next) {
|
|
|
|
|
while (ck_pr_load_uint(&cursor->n_readers) != 0) {
|
|
|
|
|
if (++steps >= factor) {
|
|
|
|
@ -180,6 +188,8 @@ ck_brlock_read_lock(struct ck_brlock *br, struct ck_brlock_reader *reader)
|
|
|
|
|
ck_pr_stall();
|
|
|
|
|
|
|
|
|
|
ck_pr_store_uint(&reader->n_readers, 1);
|
|
|
|
|
|
|
|
|
|
/* Loads are re-ordered with respect to prior stores. */
|
|
|
|
|
ck_pr_fence_strict_memory();
|
|
|
|
|
|
|
|
|
|
if (ck_pr_load_uint(&br->writer) == false)
|
|
|
|
@ -212,6 +222,8 @@ ck_brlock_read_trylock(struct ck_brlock *br,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ck_pr_store_uint(&reader->n_readers, 1);
|
|
|
|
|
|
|
|
|
|
/* Loads are re-ordered with respect to prior stores. */
|
|
|
|
|
ck_pr_fence_strict_memory();
|
|
|
|
|
|
|
|
|
|
if (ck_pr_load_uint(&br->writer) == false)
|
|
|
|
@ -230,6 +242,7 @@ CK_CC_INLINE static void
|
|
|
|
|
ck_brlock_read_unlock(struct ck_brlock_reader *reader)
|
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
ck_pr_fence_load();
|
|
|
|
|
ck_pr_store_uint(&reader->n_readers, reader->n_readers - 1);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|