|
|
@ -98,6 +98,7 @@
|
|
|
|
struct type *data) \
|
|
|
|
struct type *data) \
|
|
|
|
{ \
|
|
|
|
{ \
|
|
|
|
unsigned int consumer, producer; \
|
|
|
|
unsigned int consumer, producer; \
|
|
|
|
|
|
|
|
unsigned int mask = ring->mask; \
|
|
|
|
\
|
|
|
|
\
|
|
|
|
consumer = ring->c_head; \
|
|
|
|
consumer = ring->c_head; \
|
|
|
|
producer = ck_pr_load_uint(&ring->p_tail); \
|
|
|
|
producer = ck_pr_load_uint(&ring->p_tail); \
|
|
|
@ -106,7 +107,7 @@
|
|
|
|
return (false); \
|
|
|
|
return (false); \
|
|
|
|
\
|
|
|
|
\
|
|
|
|
ck_pr_fence_load(); \
|
|
|
|
ck_pr_fence_load(); \
|
|
|
|
*data = ring->ring[consumer & ring->mask]; \
|
|
|
|
*data = ring->ring[consumer & mask]; \
|
|
|
|
ck_pr_fence_store(); \
|
|
|
|
ck_pr_fence_store(); \
|
|
|
|
ck_pr_store_uint(&ring->c_head, consumer + 1); \
|
|
|
|
ck_pr_store_uint(&ring->c_head, consumer + 1); \
|
|
|
|
\
|
|
|
|
\
|
|
|
@ -123,6 +124,7 @@
|
|
|
|
struct type *data) \
|
|
|
|
struct type *data) \
|
|
|
|
{ \
|
|
|
|
{ \
|
|
|
|
unsigned int consumer, producer; \
|
|
|
|
unsigned int consumer, producer; \
|
|
|
|
|
|
|
|
unsigned int mask = ring->mask; \
|
|
|
|
\
|
|
|
|
\
|
|
|
|
consumer = ck_pr_load_uint(&ring->c_head); \
|
|
|
|
consumer = ck_pr_load_uint(&ring->c_head); \
|
|
|
|
ck_pr_fence_load(); \
|
|
|
|
ck_pr_fence_load(); \
|
|
|
@ -132,7 +134,7 @@
|
|
|
|
return false; \
|
|
|
|
return false; \
|
|
|
|
\
|
|
|
|
\
|
|
|
|
ck_pr_fence_load(); \
|
|
|
|
ck_pr_fence_load(); \
|
|
|
|
*data = ring->ring[consumer & ring->mask]; \
|
|
|
|
*data = ring->ring[consumer & mask]; \
|
|
|
|
ck_pr_fence_memory(); \
|
|
|
|
ck_pr_fence_memory(); \
|
|
|
|
return ck_pr_cas_uint(&ring->c_head, \
|
|
|
|
return ck_pr_cas_uint(&ring->c_head, \
|
|
|
|
consumer, \
|
|
|
|
consumer, \
|
|
|
@ -284,6 +286,7 @@ CK_CC_INLINE static bool
|
|
|
|
ck_ring_trydequeue_spmc(struct ck_ring *ring, void *data)
|
|
|
|
ck_ring_trydequeue_spmc(struct ck_ring *ring, void *data)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
unsigned int consumer, producer;
|
|
|
|
unsigned int consumer, producer;
|
|
|
|
|
|
|
|
unsigned int mask = ring->mask;
|
|
|
|
|
|
|
|
|
|
|
|
consumer = ck_pr_load_uint(&ring->c_head);
|
|
|
|
consumer = ck_pr_load_uint(&ring->c_head);
|
|
|
|
ck_pr_fence_load();
|
|
|
|
ck_pr_fence_load();
|
|
|
@ -293,7 +296,7 @@ ck_ring_trydequeue_spmc(struct ck_ring *ring, void *data)
|
|
|
|
return false;
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
|
|
ck_pr_fence_load();
|
|
|
|
ck_pr_fence_load();
|
|
|
|
ck_pr_store_ptr(data, ring->ring[consumer & ring->mask]);
|
|
|
|
ck_pr_store_ptr(data, ring->ring[consumer & mask]);
|
|
|
|
ck_pr_fence_memory();
|
|
|
|
ck_pr_fence_memory();
|
|
|
|
|
|
|
|
|
|
|
|
return ck_pr_cas_uint(&ring->c_head, consumer, consumer + 1);
|
|
|
|
return ck_pr_cas_uint(&ring->c_head, consumer, consumer + 1);
|
|
|
@ -303,6 +306,7 @@ CK_CC_INLINE static bool
|
|
|
|
ck_ring_dequeue_spmc(struct ck_ring *ring, void *data)
|
|
|
|
ck_ring_dequeue_spmc(struct ck_ring *ring, void *data)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
unsigned int consumer, producer;
|
|
|
|
unsigned int consumer, producer;
|
|
|
|
|
|
|
|
unsigned int mask = ring->mask;
|
|
|
|
void *r;
|
|
|
|
void *r;
|
|
|
|
|
|
|
|
|
|
|
|
consumer = ck_pr_load_uint(&ring->c_head);
|
|
|
|
consumer = ck_pr_load_uint(&ring->c_head);
|
|
|
@ -327,7 +331,7 @@ ck_ring_dequeue_spmc(struct ck_ring *ring, void *data)
|
|
|
|
* volatile load to force volatile semantics while allowing
|
|
|
|
* volatile load to force volatile semantics while allowing
|
|
|
|
* for r itself to remain aliased across the loop.
|
|
|
|
* for r itself to remain aliased across the loop.
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
r = ck_pr_load_ptr(&ring->ring[consumer & ring->mask]);
|
|
|
|
r = ck_pr_load_ptr(&ring->ring[consumer & mask]);
|
|
|
|
|
|
|
|
|
|
|
|
/* Serialize load with respect to head update. */
|
|
|
|
/* Serialize load with respect to head update. */
|
|
|
|
ck_pr_fence_memory();
|
|
|
|
ck_pr_fence_memory();
|
|
|
|