|
|
@ -74,7 +74,8 @@ ck_rwlock_write_trylock(ck_rwlock_t *rw)
|
|
|
|
if (ck_pr_fas_uint(&rw->writer, 1) != 0)
|
|
|
|
if (ck_pr_fas_uint(&rw->writer, 1) != 0)
|
|
|
|
return false;
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_fence_memory();
|
|
|
|
ck_pr_fence_atomic_load();
|
|
|
|
|
|
|
|
|
|
|
|
if (ck_pr_load_uint(&rw->n_readers) != 0) {
|
|
|
|
if (ck_pr_load_uint(&rw->n_readers) != 0) {
|
|
|
|
ck_rwlock_write_unlock(rw);
|
|
|
|
ck_rwlock_write_unlock(rw);
|
|
|
|
return false;
|
|
|
|
return false;
|
|
|
@ -90,7 +91,7 @@ ck_rwlock_write_lock(ck_rwlock_t *rw)
|
|
|
|
while (ck_pr_fas_uint(&rw->writer, 1) != 0)
|
|
|
|
while (ck_pr_fas_uint(&rw->writer, 1) != 0)
|
|
|
|
ck_pr_stall();
|
|
|
|
ck_pr_stall();
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_fence_memory();
|
|
|
|
ck_pr_fence_atomic_load();
|
|
|
|
|
|
|
|
|
|
|
|
while (ck_pr_load_uint(&rw->n_readers) != 0)
|
|
|
|
while (ck_pr_load_uint(&rw->n_readers) != 0)
|
|
|
|
ck_pr_stall();
|
|
|
|
ck_pr_stall();
|
|
|
@ -111,18 +112,17 @@ ck_rwlock_read_trylock(ck_rwlock_t *rw)
|
|
|
|
* Serialize with respect to concurrent write
|
|
|
|
* Serialize with respect to concurrent write
|
|
|
|
* lock operation.
|
|
|
|
* lock operation.
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
ck_pr_fence_memory();
|
|
|
|
ck_pr_fence_atomic_load();
|
|
|
|
if (ck_pr_load_uint(&rw->writer) == 0)
|
|
|
|
|
|
|
|
goto leave;
|
|
|
|
|
|
|
|
ck_pr_dec_uint(&rw->n_readers);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
leave:
|
|
|
|
if (ck_pr_load_uint(&rw->writer) == 0) {
|
|
|
|
/* Acquire semantics are necessary. */
|
|
|
|
|
|
|
|
ck_pr_fence_load();
|
|
|
|
ck_pr_fence_load();
|
|
|
|
return true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_dec_uint(&rw->n_readers);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
CK_CC_INLINE static void
|
|
|
|
CK_CC_INLINE static void
|
|
|
|
ck_rwlock_read_lock(ck_rwlock_t *rw)
|
|
|
|
ck_rwlock_read_lock(ck_rwlock_t *rw)
|
|
|
|
{
|
|
|
|
{
|
|
|
@ -137,7 +137,8 @@ ck_rwlock_read_lock(ck_rwlock_t *rw)
|
|
|
|
* Serialize with respect to concurrent write
|
|
|
|
* Serialize with respect to concurrent write
|
|
|
|
* lock operation.
|
|
|
|
* lock operation.
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
ck_pr_fence_memory();
|
|
|
|
ck_pr_fence_atomic_load();
|
|
|
|
|
|
|
|
|
|
|
|
if (ck_pr_load_uint(&rw->writer) == 0)
|
|
|
|
if (ck_pr_load_uint(&rw->writer) == 0)
|
|
|
|
break;
|
|
|
|
break;
|
|
|
|
ck_pr_dec_uint(&rw->n_readers);
|
|
|
|
ck_pr_dec_uint(&rw->n_readers);
|
|
|
@ -180,7 +181,7 @@ ck_rwlock_recursive_write_lock(ck_rwlock_recursive_t *rw, unsigned int tid)
|
|
|
|
while (ck_pr_cas_uint(&rw->rw.writer, 0, tid) == false)
|
|
|
|
while (ck_pr_cas_uint(&rw->rw.writer, 0, tid) == false)
|
|
|
|
ck_pr_stall();
|
|
|
|
ck_pr_stall();
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_fence_memory();
|
|
|
|
ck_pr_fence_atomic_load();
|
|
|
|
|
|
|
|
|
|
|
|
while (ck_pr_load_uint(&rw->rw.n_readers) != 0)
|
|
|
|
while (ck_pr_load_uint(&rw->rw.n_readers) != 0)
|
|
|
|
ck_pr_stall();
|
|
|
|
ck_pr_stall();
|
|
|
@ -202,7 +203,7 @@ ck_rwlock_recursive_write_trylock(ck_rwlock_recursive_t *rw, unsigned int tid)
|
|
|
|
if (ck_pr_cas_uint(&rw->rw.writer, 0, tid) == false)
|
|
|
|
if (ck_pr_cas_uint(&rw->rw.writer, 0, tid) == false)
|
|
|
|
return false;
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_fence_memory();
|
|
|
|
ck_pr_fence_atomic_load();
|
|
|
|
|
|
|
|
|
|
|
|
if (ck_pr_load_uint(&rw->rw.n_readers) != 0) {
|
|
|
|
if (ck_pr_load_uint(&rw->rw.n_readers) != 0) {
|
|
|
|
ck_pr_store_uint(&rw->rw.writer, 0);
|
|
|
|
ck_pr_store_uint(&rw->rw.writer, 0);
|
|
|
|