|
|
@ -1,5 +1,6 @@
|
|
|
|
/*
|
|
|
|
/*
|
|
|
|
* Copyright 2011-2014 Samy Al Bahra.
|
|
|
|
* Copyright 2014 Samy Al Bahra.
|
|
|
|
|
|
|
|
* Copyright 2014 Jaidev Sridhar.
|
|
|
|
* All rights reserved.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
@ -28,6 +29,7 @@
|
|
|
|
#define _CK_SWLOCK_H
|
|
|
|
#define _CK_SWLOCK_H
|
|
|
|
|
|
|
|
|
|
|
|
#include <ck_elide.h>
|
|
|
|
#include <ck_elide.h>
|
|
|
|
|
|
|
|
#include <ck_limits.h>
|
|
|
|
#include <ck_pr.h>
|
|
|
|
#include <ck_pr.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stddef.h>
|
|
|
@ -39,7 +41,7 @@ struct ck_swlock {
|
|
|
|
typedef struct ck_swlock ck_swlock_t;
|
|
|
|
typedef struct ck_swlock ck_swlock_t;
|
|
|
|
|
|
|
|
|
|
|
|
#define CK_SWLOCK_INITIALIZER {0, 0}
|
|
|
|
#define CK_SWLOCK_INITIALIZER {0, 0}
|
|
|
|
#define CK_SWLOCK_LATCH_BIT (1 << 31)
|
|
|
|
#define CK_SWLOCK_LATCH_BIT (1UL << 31)
|
|
|
|
#define CK_SWLOCK_READER_BITS (UINT32_MAX ^ CK_SWLOCK_LATCH_BIT)
|
|
|
|
#define CK_SWLOCK_READER_BITS (UINT32_MAX ^ CK_SWLOCK_LATCH_BIT)
|
|
|
|
|
|
|
|
|
|
|
|
CK_CC_INLINE static void
|
|
|
|
CK_CC_INLINE static void
|
|
|
@ -48,7 +50,6 @@ ck_swlock_init(struct ck_swlock *rw)
|
|
|
|
|
|
|
|
|
|
|
|
rw->writer = 0;
|
|
|
|
rw->writer = 0;
|
|
|
|
rw->n_readers = 0;
|
|
|
|
rw->n_readers = 0;
|
|
|
|
ck_pr_fence_store();
|
|
|
|
|
|
|
|
return;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@ -56,6 +57,7 @@ CK_CC_INLINE static void
|
|
|
|
ck_swlock_write_unlock(ck_swlock_t *rw)
|
|
|
|
ck_swlock_write_unlock(ck_swlock_t *rw)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_fence_release();
|
|
|
|
ck_pr_store_32(&rw->writer, 0);
|
|
|
|
ck_pr_store_32(&rw->writer, 0);
|
|
|
|
return;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -64,6 +66,7 @@ CK_CC_INLINE static bool
|
|
|
|
ck_swlock_locked_writer(ck_swlock_t *rw)
|
|
|
|
ck_swlock_locked_writer(ck_swlock_t *rw)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_fence_load();
|
|
|
|
return ck_pr_load_32(&rw->writer);
|
|
|
|
return ck_pr_load_32(&rw->writer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@ -79,11 +82,8 @@ ck_swlock_write_downgrade(ck_swlock_t *rw)
|
|
|
|
CK_CC_INLINE static bool
|
|
|
|
CK_CC_INLINE static bool
|
|
|
|
ck_swlock_locked(ck_swlock_t *rw)
|
|
|
|
ck_swlock_locked(ck_swlock_t *rw)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
uint32_t r;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
r = ck_pr_load_32(&rw->writer);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return ck_pr_load_32(&rw->n_readers) | r;
|
|
|
|
return ck_pr_load_32(&rw->n_readers) | ck_pr_load_32(&rw->writer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
CK_CC_INLINE static bool
|
|
|
|
CK_CC_INLINE static bool
|
|
|
@ -127,10 +127,9 @@ ck_swlock_write_latch(ck_swlock_t *rw)
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_fence_atomic_load();
|
|
|
|
ck_pr_fence_atomic_load();
|
|
|
|
|
|
|
|
|
|
|
|
while (ck_pr_cas_32(&rw->n_readers, 0, CK_SWLOCK_LATCH_BIT) == false) {
|
|
|
|
/* Stall until readers have seen the latch and cleared. */
|
|
|
|
/* Stall until readers have seen the latch and cleared */
|
|
|
|
while (ck_pr_cas_32(&rw->n_readers, 0, CK_SWLOCK_LATCH_BIT) == false)
|
|
|
|
ck_pr_stall();
|
|
|
|
ck_pr_stall();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -140,9 +139,7 @@ ck_swlock_write_unlatch(ck_swlock_t *rw)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_store_32(&rw->n_readers, 0);
|
|
|
|
ck_pr_store_32(&rw->n_readers, 0);
|
|
|
|
|
|
|
|
|
|
|
|
ck_swlock_write_unlock(rw);
|
|
|
|
ck_swlock_write_unlock(rw);
|
|
|
|
|
|
|
|
|
|
|
|
return;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@ -157,9 +154,10 @@ ck_swlock_read_trylock(ck_swlock_t *rw)
|
|
|
|
if (ck_pr_load_32(&rw->writer) != 0)
|
|
|
|
if (ck_pr_load_32(&rw->writer) != 0)
|
|
|
|
return false;
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
|
|
if (ck_pr_faa_32(&rw->n_readers, 1) & CK_SWLOCK_LATCH_BIT) {
|
|
|
|
ck_pr_fence_load_atomic();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (ck_pr_faa_32(&rw->n_readers, 1) & CK_SWLOCK_LATCH_BIT)
|
|
|
|
return false;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
/*
|
|
|
|
* Serialize with respect to concurrent write
|
|
|
|
* Serialize with respect to concurrent write
|
|
|
@ -211,12 +209,15 @@ ck_swlock_read_latchlock(ck_swlock_t *rw)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
for (;;) {
|
|
|
|
|
|
|
|
|
|
|
|
while (ck_pr_load_32(&rw->writer) != 0)
|
|
|
|
while (ck_pr_load_32(&rw->writer) != 0)
|
|
|
|
ck_pr_stall();
|
|
|
|
ck_pr_stall();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Writer has latched, stall the reader */
|
|
|
|
if (ck_pr_faa_32(&rw->n_readers, 1) & CK_SWLOCK_LATCH_BIT) {
|
|
|
|
if (ck_pr_faa_32(&rw->n_readers, 1) & CK_SWLOCK_LATCH_BIT) {
|
|
|
|
/* Writer has latched, stall the reader */
|
|
|
|
do {
|
|
|
|
|
|
|
|
ck_pr_stall();
|
|
|
|
|
|
|
|
} while (ck_pr_load_32(&rw->n_readers) & CK_SWLOCK_LATCH_BIT);
|
|
|
|
|
|
|
|
|
|
|
|
continue;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@ -243,14 +244,14 @@ ck_swlock_locked_reader(ck_swlock_t *rw)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_fence_load();
|
|
|
|
ck_pr_fence_load();
|
|
|
|
return (ck_pr_load_32(&rw->n_readers) & CK_SWLOCK_READER_BITS);
|
|
|
|
return ck_pr_load_32(&rw->n_readers) & CK_SWLOCK_READER_BITS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
CK_CC_INLINE static void
|
|
|
|
CK_CC_INLINE static void
|
|
|
|
ck_swlock_read_unlock(ck_swlock_t *rw)
|
|
|
|
ck_swlock_read_unlock(ck_swlock_t *rw)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_fence_load_atomic();
|
|
|
|
ck_pr_fence_release();
|
|
|
|
ck_pr_dec_32(&rw->n_readers);
|
|
|
|
ck_pr_dec_32(&rw->n_readers);
|
|
|
|
return;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -275,8 +276,7 @@ ck_swlock_recursive_write_lock(ck_swlock_recursive_t *rw)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_store_32(&rw->rw.writer, 1);
|
|
|
|
ck_pr_store_32(&rw->rw.writer, 1);
|
|
|
|
|
|
|
|
ck_pr_fence_store_load();
|
|
|
|
ck_pr_fence_atomic_load();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
while (ck_pr_load_32(&rw->rw.n_readers) != 0)
|
|
|
|
while (ck_pr_load_32(&rw->rw.n_readers) != 0)
|
|
|
|
ck_pr_stall();
|
|
|
|
ck_pr_stall();
|
|
|
@ -288,9 +288,9 @@ ck_swlock_recursive_write_lock(ck_swlock_recursive_t *rw)
|
|
|
|
CK_CC_INLINE static void
|
|
|
|
CK_CC_INLINE static void
|
|
|
|
ck_swlock_recursive_write_latch(ck_swlock_recursive_t *rw)
|
|
|
|
ck_swlock_recursive_write_latch(ck_swlock_recursive_t *rw)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
ck_pr_store_32(&rw->rw.writer, 1);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_fence_atomic_load();
|
|
|
|
ck_pr_store_32(&rw->rw.writer, 1);
|
|
|
|
|
|
|
|
ck_pr_fence_store_load();
|
|
|
|
|
|
|
|
|
|
|
|
while (ck_pr_cas_32(&rw->rw.n_readers, 0, CK_SWLOCK_LATCH_BIT) == false)
|
|
|
|
while (ck_pr_cas_32(&rw->rw.n_readers, 0, CK_SWLOCK_LATCH_BIT) == false)
|
|
|
|
ck_pr_stall();
|
|
|
|
ck_pr_stall();
|
|
|
@ -304,8 +304,7 @@ ck_swlock_recursive_write_trylock(ck_swlock_recursive_t *rw)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_store_32(&rw->rw.writer, 1);
|
|
|
|
ck_pr_store_32(&rw->rw.writer, 1);
|
|
|
|
|
|
|
|
ck_pr_fence_store_load();
|
|
|
|
ck_pr_fence_atomic_load();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (ck_pr_load_32(&rw->rw.n_readers) != 0) {
|
|
|
|
if (ck_pr_load_32(&rw->rw.n_readers) != 0) {
|
|
|
|
ck_pr_store_32(&rw->rw.writer, 0);
|
|
|
|
ck_pr_store_32(&rw->rw.writer, 0);
|
|
|
@ -320,21 +319,20 @@ CK_CC_INLINE static void
|
|
|
|
ck_swlock_recursive_write_unlock(ck_swlock_recursive_t *rw)
|
|
|
|
ck_swlock_recursive_write_unlock(ck_swlock_recursive_t *rw)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
if (--rw->wc == 0) {
|
|
|
|
if (--rw->wc != 0)
|
|
|
|
ck_pr_fence_release();
|
|
|
|
return;
|
|
|
|
ck_pr_store_32(&rw->rw.writer, 0);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_fence_release();
|
|
|
|
|
|
|
|
ck_pr_store_32(&rw->rw.writer, 0);
|
|
|
|
return;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
CK_CC_INLINE static void
|
|
|
|
CK_CC_INLINE static void
|
|
|
|
ck_swlock_recursive_write_unlatch(ck_swlock_recursive_t *rw)
|
|
|
|
ck_swlock_recursive_write_unlatch(ck_swlock_recursive_t *rw)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
ck_pr_store_32(&rw->rw.n_readers, 0);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_store_32(&rw->rw.n_readers, 0);
|
|
|
|
ck_swlock_recursive_write_unlock(rw);
|
|
|
|
ck_swlock_recursive_write_unlock(rw);
|
|
|
|
|
|
|
|
|
|
|
|
return;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|