@ -36,187 +36,6 @@
/*
* Concurrent ring buffer .
*/
# define CK_RING(type, name) \
struct ck_ring_ # # name { \
unsigned int c_head ; \
char pad [ CK_MD_CACHELINE - sizeof ( unsigned int ) ] ; \
unsigned int p_tail ; \
char _pad [ CK_MD_CACHELINE - sizeof ( unsigned int ) ] ; \
unsigned int size ; \
unsigned int mask ; \
struct type * ring ; \
} ; \
CK_CC_INLINE static void \
ck_ring_init_ # # name ( struct ck_ring_ # # name * ring , \
struct type * buffer , \
unsigned int size ) \
{ \
\
ring - > size = size ; \
ring - > mask = size - 1 ; \
ring - > p_tail = 0 ; \
ring - > c_head = 0 ; \
ring - > ring = buffer ; \
return ; \
} \
CK_CC_INLINE static unsigned int \
ck_ring_size_ # # name ( struct ck_ring_ # # name * ring ) \
{ \
unsigned int c , p ; \
\
c = ck_pr_load_uint ( & ring - > c_head ) ; \
p = ck_pr_load_uint ( & ring - > p_tail ) ; \
return ( p - c ) & ring - > mask ; \
} \
CK_CC_INLINE static unsigned int \
ck_ring_capacity_ # # name ( struct ck_ring_ # # name * ring ) \
{ \
\
return ring - > size ; \
} \
CK_CC_INLINE static bool \
ck_ring_enqueue_spsc_size_ # # name ( struct ck_ring_ # # name * ring , \
struct type * entry , \
unsigned int * size ) \
{ \
unsigned int consumer , producer , delta ; \
unsigned int mask = ring - > mask ; \
\
consumer = ck_pr_load_uint ( & ring - > c_head ) ; \
producer = ring - > p_tail ; \
delta = producer + 1 ; \
* size = ( producer - consumer ) & mask ; \
\
if ( ( delta & mask ) = = ( consumer & mask ) ) \
return false ; \
\
ring - > ring [ producer & mask ] = * entry ; \
ck_pr_fence_store ( ) ; \
ck_pr_store_uint ( & ring - > p_tail , delta ) ; \
return true ; \
} \
CK_CC_INLINE static bool \
ck_ring_enqueue_spsc_ # # name ( struct ck_ring_ # # name * ring , \
struct type * entry ) \
{ \
unsigned int consumer , producer , delta ; \
unsigned int mask = ring - > mask ; \
\
consumer = ck_pr_load_uint ( & ring - > c_head ) ; \
producer = ring - > p_tail ; \
delta = producer + 1 ; \
\
if ( ( delta & mask ) = = ( consumer & mask ) ) \
return false ; \
\
ring - > ring [ producer & mask ] = * entry ; \
ck_pr_fence_store ( ) ; \
ck_pr_store_uint ( & ring - > p_tail , delta ) ; \
return true ; \
} \
CK_CC_INLINE static bool \
ck_ring_dequeue_spsc_ # # name ( struct ck_ring_ # # name * ring , \
struct type * data ) \
{ \
unsigned int consumer , producer ; \
unsigned int mask = ring - > mask ; \
\
consumer = ring - > c_head ; \
producer = ck_pr_load_uint ( & ring - > p_tail ) ; \
\
if ( consumer = = producer ) \
return false ; \
\
ck_pr_fence_load ( ) ; \
* data = ring - > ring [ consumer & mask ] ; \
ck_pr_fence_store ( ) ; \
ck_pr_store_uint ( & ring - > c_head , consumer + 1 ) ; \
\
return true ; \
} \
CK_CC_INLINE static bool \
ck_ring_enqueue_spmc_size_ # # name ( struct ck_ring_ # # name * ring , \
void * entry , unsigned int * size ) \
{ \
\
return ck_ring_enqueue_spsc_size_ # # name ( ring , entry , size ) ; \
} \
CK_CC_INLINE static bool \
ck_ring_enqueue_spmc_ # # name ( struct ck_ring_ # # name * ring , void * entry ) \
{ \
\
return ck_ring_enqueue_spsc_ # # name ( ring , entry ) ; \
} \
CK_CC_INLINE static bool \
ck_ring_trydequeue_spmc_ # # name ( struct ck_ring_ # # name * ring , \
struct type * data ) \
{ \
unsigned int consumer , producer ; \
unsigned int mask = ring - > mask ; \
\
consumer = ck_pr_load_uint ( & ring - > c_head ) ; \
ck_pr_fence_load ( ) ; \
producer = ck_pr_load_uint ( & ring - > p_tail ) ; \
\
if ( consumer = = producer ) \
return false ; \
\
ck_pr_fence_load ( ) ; \
* data = ring - > ring [ consumer & mask ] ; \
ck_pr_fence_memory ( ) ; \
return ck_pr_cas_uint ( & ring - > c_head , \
consumer , \
consumer + 1 ) ; \
} \
CK_CC_INLINE static bool \
ck_ring_dequeue_spmc_ # # name ( struct ck_ring_ # # name * ring , \
struct type * data ) \
{ \
unsigned int consumer , producer ; \
unsigned int mask = ring - > mask ; \
\
consumer = ck_pr_load_uint ( & ring - > c_head ) ; \
do { \
ck_pr_fence_load ( ) ; \
producer = ck_pr_load_uint ( & ring - > p_tail ) ; \
\
if ( consumer = = producer ) \
return false ; \
\
ck_pr_fence_load ( ) ; \
* data = ring - > ring [ consumer & mask ] ; \
ck_pr_fence_memory ( ) ; \
} while ( ck_pr_cas_uint_value ( & ring - > c_head , \
consumer , \
consumer + 1 , \
& consumer ) = = false ) ; \
\
return true ; \
}
# define CK_RING_INSTANCE(name) \
struct ck_ring_ # # name
# define CK_RING_INIT(name, object, buffer, size) \
ck_ring_init_ # # name ( object , buffer , size )
# define CK_RING_SIZE(name, object) \
ck_ring_size_ # # name ( object )
# define CK_RING_CAPACITY(name, object) \
ck_ring_capacity_ # # name ( object )
# define CK_RING_ENQUEUE_SPSC_SIZE(name, object, value, s) \
ck_ring_enqueue_spsc_size_ # # name ( object , value , s )
# define CK_RING_ENQUEUE_SPSC(name, object, value) \
ck_ring_enqueue_spsc_ # # name ( object , value )
# define CK_RING_DEQUEUE_SPSC(name, object, value) \
ck_ring_dequeue_spsc_ # # name ( object , value )
# define CK_RING_DEQUEUE_SPMC(name, object, value) \
ck_ring_dequeue_spmc_ # # name ( object , value )
# define CK_RING_TRYDEQUEUE_SPMC(name, object, value) \
ck_ring_trydequeue_spmc_ # # name ( object , value )
# define CK_RING_ENQUEUE_SPMC_SIZE(name, object, value, s) \
ck_ring_enqueue_spmc_size_ # # name ( object , value , s )
# define CK_RING_ENQUEUE_SPMC(name, object, value) \
ck_ring_enqueue_spmc_ # # name ( object , value )
struct ck_ring {
unsigned int c_head ;
@ -225,7 +44,6 @@ struct ck_ring {
char _pad [ CK_MD_CACHELINE - sizeof ( unsigned int ) ] ;
unsigned int size ;
unsigned int mask ;
void * * ring ;
} ;
typedef struct ck_ring ck_ring_t ;
@ -246,6 +64,12 @@ ck_ring_capacity(struct ck_ring *ring)
return ring - > size ;
}
struct ck_ring_buffer {
void * ring ;
} ;
typedef struct ck_ring_buffer ck_ring_buffer_t ;
/*
* Atomically enqueues the specified entry . Returns true on success , returns
* false if the ck_ring is full . This operation only support one active
@ -258,12 +82,13 @@ ck_ring_capacity(struct ck_ring *ring)
* writer .
*/
CK_CC_INLINE static bool
ck_ring_enqueue_spsc_size ( struct ck_ring * ring ,
ck_ring_enqueue_spsc_size ( struct ck_ring * ring , ck_ring_buffer_t buf ,
void * entry ,
unsigned int * size )
{
unsigned int consumer , producer , delta ;
unsigned int mask = ring - > mask ;
void * * ring_buf = buf . ring ;
consumer = ck_pr_load_uint ( & ring - > c_head ) ;
producer = ring - > p_tail ;
@ -273,7 +98,7 @@ ck_ring_enqueue_spsc_size(struct ck_ring *ring,
if ( ( delta & mask ) = = ( consumer & mask ) )
return false ;
ring - > ring [ producer & mask ] = entry ;
ring _buf [ producer & mask ] = entry ;
/*
* Make sure to update slot value before indicating
@ -291,10 +116,11 @@ ck_ring_enqueue_spsc_size(struct ck_ring *ring,
* of ck_ring_dequeue_spsc .
*/
CK_CC_INLINE static bool
ck_ring_enqueue_spsc ( struct ck_ring * ring , void * entry )
ck_ring_enqueue_spsc ( struct ck_ring * ring , ck_ring_buffer_t buf , void * entry )
{
unsigned int consumer , producer , delta ;
unsigned int mask = ring - > mask ;
void * * ring_buf = buf . ring ;
consumer = ck_pr_load_uint ( & ring - > c_head ) ;
producer = ring - > p_tail ;
@ -303,7 +129,7 @@ ck_ring_enqueue_spsc(struct ck_ring *ring, void *entry)
if ( ( delta & mask ) = = ( consumer & mask ) )
return false ;
ring - > ring [ producer & mask ] = entry ;
ring _buf [ producer & mask ] = entry ;
/*
* Make sure to update slot value before indicating
@ -318,10 +144,11 @@ ck_ring_enqueue_spsc(struct ck_ring *ring, void *entry)
* Single consumer and single producer ring buffer dequeue ( consumer ) .
*/
CK_CC_INLINE static bool
ck_ring_dequeue_spsc ( struct ck_ring * ring , void * data )
ck_ring_dequeue_spsc ( struct ck_ring * ring , ck_ring_buffer_t buf , void * data )
{
unsigned int consumer , producer ;
unsigned int mask = ring - > mask ;
void * * ring_buf = buf . ring ;
consumer = ring - > c_head ;
producer = ck_pr_load_uint ( & ring - > p_tail ) ;
@ -342,7 +169,7 @@ ck_ring_dequeue_spsc(struct ck_ring *ring, void *data)
* troublesome on platforms where sizeof ( void * )
* is not guaranteed to be sizeof ( T * ) .
*/
ck_pr_store_ptr ( data , ring - > ring [ consumer & mask ] ) ;
ck_pr_store_ptr ( data , ring _buf [ consumer & mask ] ) ;
ck_pr_fence_store ( ) ;
ck_pr_store_uint ( & ring - > c_head , consumer + 1 ) ;
return true ;
@ -360,12 +187,12 @@ ck_ring_dequeue_spsc(struct ck_ring *ring, void *data)
* writer .
*/
CK_CC_INLINE static bool
ck_ring_enqueue_spmc_size ( struct ck_ring * ring ,
ck_ring_enqueue_spmc_size ( struct ck_ring * ring , ck_ring_buffer_t buf ,
void * entry ,
unsigned int * size )
{
return ck_ring_enqueue_spsc_size ( ring , entry, size ) ;
return ck_ring_enqueue_spsc_size ( ring , buf, entry, size ) ;
}
/*
@ -375,17 +202,18 @@ ck_ring_enqueue_spmc_size(struct ck_ring *ring,
* invocations of ck_ring_dequeue_spmc .
*/
CK_CC_INLINE static bool
ck_ring_enqueue_spmc ( struct ck_ring * ring , void * entry )
ck_ring_enqueue_spmc ( struct ck_ring * ring , ck_ring_buffer_t buf , void * entry )
{
return ck_ring_enqueue_spsc ( ring , entry) ;
return ck_ring_enqueue_spsc ( ring , buf, entry) ;
}
CK_CC_INLINE static bool
ck_ring_trydequeue_spmc ( struct ck_ring * ring , void * data )
ck_ring_trydequeue_spmc ( struct ck_ring * ring , ck_ring_buffer_t buf , void * data )
{
unsigned int consumer , producer ;
unsigned int mask = ring - > mask ;
void * * ring_buf = buf . ring ;
consumer = ck_pr_load_uint ( & ring - > c_head ) ;
ck_pr_fence_load ( ) ;
@ -395,18 +223,19 @@ ck_ring_trydequeue_spmc(struct ck_ring *ring, void *data)
return false ;
ck_pr_fence_load ( ) ;
ck_pr_store_ptr ( data , ring - > ring [ consumer & mask ] ) ;
ck_pr_store_ptr ( data , ring _buf [ consumer & mask ] ) ;
ck_pr_fence_memory ( ) ;
return ck_pr_cas_uint ( & ring - > c_head , consumer , consumer + 1 ) ;
}
CK_CC_INLINE static bool
ck_ring_dequeue_spmc ( struct ck_ring * ring , void * data )
ck_ring_dequeue_spmc ( struct ck_ring * ring , ck_ring_buffer_t buf , void * data )
{
unsigned int consumer , producer ;
unsigned int mask = ring - > mask ;
void * r ;
void * * ring_buf = buf . ring ;
consumer = ck_pr_load_uint ( & ring - > c_head ) ;
@ -430,7 +259,7 @@ ck_ring_dequeue_spmc(struct ck_ring *ring, void *data)
* volatile load to force volatile semantics while allowing
* for r itself to remain aliased across the loop .
*/
r = ck_pr_load_ptr ( & ring - > ring [ consumer & mask ] ) ;
r = ck_pr_load_ptr ( & ring _buf [ consumer & mask ] ) ;
/* Serialize load with respect to head update. */
ck_pr_fence_memory ( ) ;
@ -448,15 +277,13 @@ ck_ring_dequeue_spmc(struct ck_ring *ring, void *data)
}
CK_CC_INLINE static void
ck_ring_init ( struct ck_ring * ring , void * buffer , unsigned int size )
ck_ring_init ( struct ck_ring * ring , unsigned int size )
{
memset ( buffer , 0 , sizeof ( void * ) * size ) ;
ring - > size = size ;
ring - > mask = size - 1 ;
ring - > p_tail = 0 ;
ring - > c_head = 0 ;
ring - > ring = buffer ;
return ;
}