ck_ring: add reserve and commit interface to enqueue.

This new interface allows for slot reservation to avoid additional
copy-overhead from consumer. The primary use-case is for the type-specialized
variant of ck_ring. The initial patch-set does not migrate enqueue and
dequeue to be implemented in terms of reserve and commit but will be a future
commit.
awsm
Samy Al Bahra 5 years ago
parent 37007e0699
commit 1321f703c8

@ -84,6 +84,45 @@ ck_ring_init(struct ck_ring *ring, unsigned int size)
/* /*
* The _ck_ring_* namespace is internal only and must not used externally. * The _ck_ring_* namespace is internal only and must not used externally.
*/ */
/*
* This function will return a region of memory to write for the next value
* for a single producer.
*/
CK_CC_FORCE_INLINE static void *
_ck_ring_enqueue_reserve_sp(struct ck_ring *ring,
void *CK_CC_RESTRICT buffer,
unsigned int ts,
unsigned int *size)
{
const unsigned int mask = ring->mask;
unsigned int consumer, producer, delta;
consumer = ck_pr_load_uint(&ring->c_head);
producer = ring->p_tail;
delta = producer + 1;
if (size != NULL)
*size = (producer - consumer) & mask;
if (CK_CC_UNLIKELY((delta & mask) == (consumer & mask)))
return NULL;
return (char *)buffer + ts * (producer & mask);
}
/*
* This is to be called to commit and make visible a region of previously
* reserved with reverse_sp.
*/
CK_CC_FORCE_INLINE static void
_ck_ring_enqueue_commit_sp(struct ck_ring *ring)
{
ck_pr_fence_store();
ck_pr_store_uint(&ring->p_tail, ring->p_tail + 1);
return;
}
CK_CC_FORCE_INLINE static bool CK_CC_FORCE_INLINE static bool
_ck_ring_enqueue_sp(struct ck_ring *ring, _ck_ring_enqueue_sp(struct ck_ring *ring,
void *CK_CC_RESTRICT buffer, void *CK_CC_RESTRICT buffer,
@ -163,6 +202,65 @@ _ck_ring_dequeue_sc(struct ck_ring *ring,
return true; return true;
} }
CK_CC_FORCE_INLINE static void *
_ck_ring_enqueue_reserve_mp(struct ck_ring *ring,
void *buffer,
unsigned int ts,
unsigned int *ticket,
unsigned int *size)
{
const unsigned int mask = ring->mask;
unsigned int producer, consumer, delta;
producer = ck_pr_load_uint(&ring->p_head);
for (;;) {
ck_pr_fence_load();
consumer = ck_pr_load_uint(&ring->c_head);
delta = producer + 1;
if (CK_CC_LIKELY((producer - consumer) < mask)) {
if (ck_pr_cas_uint_value(&ring->p_head,
producer, delta, &producer) == true) {
break;
}
} else {
unsigned int new_producer;
ck_pr_fence_load();
new_producer = ck_pr_load_uint(&ring->p_head);
if (producer == new_producer) {
if (size != NULL)
*size = (producer - consumer) & mask;
return false;
}
producer = new_producer;
}
}
*ticket = producer;
if (size != NULL)
*size = (producer - consumer) & mask;
return (char *)buffer + ts * (producer & mask);
}
CK_CC_FORCE_INLINE static void
_ck_ring_enqueue_commit_mp(struct ck_ring *ring, unsigned int producer)
{
while (ck_pr_load_uint(&ring->p_tail) != producer)
ck_pr_stall();
ck_pr_fence_store();
ck_pr_store_uint(&ring->p_tail, producer + 1);
return;
}
CK_CC_FORCE_INLINE static bool CK_CC_FORCE_INLINE static bool
_ck_ring_enqueue_mp(struct ck_ring *ring, _ck_ring_enqueue_mp(struct ck_ring *ring,
void *buffer, void *buffer,
@ -354,6 +452,33 @@ ck_ring_enqueue_spsc(struct ck_ring *ring,
&entry, sizeof(entry), NULL); &entry, sizeof(entry), NULL);
} }
CK_CC_INLINE static void *
ck_ring_enqueue_reserve_spsc_size(struct ck_ring *ring,
struct ck_ring_buffer *buffer,
unsigned int *size)
{
return _ck_ring_enqueue_reserve_sp(ring, buffer, sizeof(void *),
size);
}
CK_CC_INLINE static void *
ck_ring_enqueue_reserve_spsc(struct ck_ring *ring,
struct ck_ring_buffer *buffer)
{
return _ck_ring_enqueue_reserve_sp(ring, buffer, sizeof(void *),
NULL);
}
CK_CC_INLINE static void
ck_ring_enqueue_commit_spsc(struct ck_ring *ring)
{
_ck_ring_enqueue_commit_sp(ring);
return;
}
CK_CC_INLINE static bool CK_CC_INLINE static bool
ck_ring_dequeue_spsc(struct ck_ring *ring, ck_ring_dequeue_spsc(struct ck_ring *ring,
const struct ck_ring_buffer *buffer, const struct ck_ring_buffer *buffer,
@ -375,8 +500,7 @@ ck_ring_enqueue_mpmc(struct ck_ring *ring,
const void *entry) const void *entry)
{ {
return _ck_ring_enqueue_mp(ring, buffer, &entry, return _ck_ring_enqueue_mp(ring, buffer, &entry, sizeof(entry), NULL);
sizeof(entry), NULL);
} }
CK_CC_INLINE static bool CK_CC_INLINE static bool
@ -386,8 +510,37 @@ ck_ring_enqueue_mpmc_size(struct ck_ring *ring,
unsigned int *size) unsigned int *size)
{ {
return _ck_ring_enqueue_mp_size(ring, buffer, &entry, return _ck_ring_enqueue_mp_size(ring, buffer, &entry, sizeof(entry),
sizeof(entry), size); size);
}
CK_CC_INLINE static void *
ck_ring_enqueue_reserve_mpmc(struct ck_ring *ring,
struct ck_ring_buffer *buffer,
unsigned int *ticket)
{
return _ck_ring_enqueue_reserve_mp(ring, buffer, sizeof(void *),
ticket, NULL);
}
CK_CC_INLINE static void *
ck_ring_enqueue_reserve_mpmc_size(struct ck_ring *ring,
struct ck_ring_buffer *buffer,
unsigned int *ticket,
unsigned int *size)
{
return _ck_ring_enqueue_reserve_mp(ring, buffer, sizeof(void *),
ticket, size);
}
CK_CC_INLINE static void
ck_ring_enqueue_commit_mpmc(struct ck_ring *ring, unsigned int ticket)
{
_ck_ring_enqueue_commit_mp(ring, ticket);
return;
} }
CK_CC_INLINE static bool CK_CC_INLINE static bool
@ -415,6 +568,31 @@ ck_ring_dequeue_mpmc(struct ck_ring *ring,
* ring buffer containing pointers. Correctness is provided for any number of * ring buffer containing pointers. Correctness is provided for any number of
* consumers with up to one concurrent producer. * consumers with up to one concurrent producer.
*/ */
CK_CC_INLINE static void *
ck_ring_enqueue_reserve_spmc_size(struct ck_ring *ring,
struct ck_ring_buffer *buffer,
unsigned int *size)
{
return _ck_ring_enqueue_reserve_sp(ring, buffer, sizeof(void *), size);
}
CK_CC_INLINE static void *
ck_ring_enqueue_reserve_spmc(struct ck_ring *ring,
struct ck_ring_buffer *buffer)
{
return _ck_ring_enqueue_reserve_sp(ring, buffer, sizeof(void *), NULL);
}
CK_CC_INLINE static void
ck_ring_enqueue_commit_spmc(struct ck_ring *ring)
{
_ck_ring_enqueue_commit_sp(ring);
return;
}
CK_CC_INLINE static bool CK_CC_INLINE static bool
ck_ring_enqueue_spmc_size(struct ck_ring *ring, ck_ring_enqueue_spmc_size(struct ck_ring *ring,
struct ck_ring_buffer *buffer, struct ck_ring_buffer *buffer,
@ -459,6 +637,35 @@ ck_ring_dequeue_spmc(struct ck_ring *ring,
* ring buffer containing pointers. Correctness is provided for any number of * ring buffer containing pointers. Correctness is provided for any number of
* producers with up to one concurrent consumers. * producers with up to one concurrent consumers.
*/ */
CK_CC_INLINE static void *
ck_ring_enqueue_reserve_mpsc(struct ck_ring *ring,
struct ck_ring_buffer *buffer,
unsigned int *ticket)
{
return _ck_ring_enqueue_reserve_mp(ring, buffer, sizeof(void *),
ticket, NULL);
}
CK_CC_INLINE static void *
ck_ring_enqueue_reserve_mpsc_size(struct ck_ring *ring,
struct ck_ring_buffer *buffer,
unsigned int *ticket,
unsigned int *size)
{
return _ck_ring_enqueue_reserve_mp(ring, buffer, sizeof(void *),
ticket, size);
}
CK_CC_INLINE static void
ck_ring_enqueue_commit_mpsc(struct ck_ring *ring, unsigned int ticket)
{
_ck_ring_enqueue_commit_mp(ring, ticket);
return;
}
CK_CC_INLINE static bool CK_CC_INLINE static bool
ck_ring_enqueue_mpsc(struct ck_ring *ring, ck_ring_enqueue_mpsc(struct ck_ring *ring,
struct ck_ring_buffer *buffer, struct ck_ring_buffer *buffer,
@ -495,6 +702,25 @@ ck_ring_dequeue_mpsc(struct ck_ring *ring,
* values of a particular type in the ring the buffer. * values of a particular type in the ring the buffer.
*/ */
#define CK_RING_PROTOTYPE(name, type) \ #define CK_RING_PROTOTYPE(name, type) \
CK_CC_INLINE static struct type * \
ck_ring_enqueue_reserve_spsc_##name(struct ck_ring *a, \
struct type *b) \
{ \
\
return _ck_ring_enqueue_reserve_sp(a, b, \
sizeof(struct type), NULL); \
} \
\
CK_CC_INLINE static struct type * \
ck_ring_enqueue_reserve_spsc_size_##name(struct ck_ring *a, \
struct type *b, \
unsigned int *c) \
{ \
\
return _ck_ring_enqueue_reserve_sp(a, b, \
sizeof(struct type), c); \
} \
\
CK_CC_INLINE static bool \ CK_CC_INLINE static bool \
ck_ring_enqueue_spsc_size_##name(struct ck_ring *a, \ ck_ring_enqueue_spsc_size_##name(struct ck_ring *a, \
struct type *b, \ struct type *b, \
@ -526,6 +752,25 @@ ck_ring_dequeue_spsc_##name(struct ck_ring *a, \
sizeof(struct type)); \ sizeof(struct type)); \
} \ } \
\ \
CK_CC_INLINE static struct type * \
ck_ring_enqueue_reserve_spmc_##name(struct ck_ring *a, \
struct type *b) \
{ \
\
return _ck_ring_enqueue_reserve_sp(a, b, \
sizeof(struct type), NULL); \
} \
\
CK_CC_INLINE static struct type * \
ck_ring_enqueue_reserve_spmc_size_##name(struct ck_ring *a, \
struct type *b, \
unsigned int *c) \
{ \
\
return _ck_ring_enqueue_reserve_sp(a, b, \
sizeof(struct type), c); \
} \
\
CK_CC_INLINE static bool \ CK_CC_INLINE static bool \
ck_ring_enqueue_spmc_size_##name(struct ck_ring *a, \ ck_ring_enqueue_spmc_size_##name(struct ck_ring *a, \
struct type *b, \ struct type *b, \
@ -567,6 +812,27 @@ ck_ring_dequeue_spmc_##name(struct ck_ring *a, \
sizeof(struct type)); \ sizeof(struct type)); \
} \ } \
\ \
CK_CC_INLINE static struct type * \
ck_ring_enqueue_reserve_mpsc_##name(struct ck_ring *a, \
struct type *b, \
unsigned int *c) \
{ \
\
return _ck_ring_enqueue_reserve_mp(a, b, \
sizeof(struct type), c, NULL); \
} \
\
CK_CC_INLINE static struct type * \
ck_ring_enqueue_reserve_mpsc_size_##name(struct ck_ring *a, \
struct type *b, \
unsigned int *c, \
unsigned int *d) \
{ \
\
return _ck_ring_enqueue_reserve_mp(a, b, \
sizeof(struct type), c, d); \
} \
\
CK_CC_INLINE static bool \ CK_CC_INLINE static bool \
ck_ring_enqueue_mpsc_##name(struct ck_ring *a, \ ck_ring_enqueue_mpsc_##name(struct ck_ring *a, \
struct type *b, \ struct type *b, \
@ -598,6 +864,27 @@ ck_ring_dequeue_mpsc_##name(struct ck_ring *a, \
sizeof(struct type)); \ sizeof(struct type)); \
} \ } \
\ \
CK_CC_INLINE static struct type * \
ck_ring_enqueue_reserve_mpmc_##name(struct ck_ring *a, \
struct type *b, \
unsigned int *c) \
{ \
\
return _ck_ring_enqueue_reserve_mp(a, b, \
sizeof(struct type), c, NULL); \
} \
\
CK_CC_INLINE static struct type * \
ck_ring_enqueue_reserve_mpmc_size_##name(struct ck_ring *a, \
struct type *b, \
unsigned int *c, \
unsigned int *d) \
{ \
\
return _ck_ring_enqueue_reserve_mp(a, b, \
sizeof(struct type), c, d); \
} \
\
CK_CC_INLINE static bool \ CK_CC_INLINE static bool \
ck_ring_enqueue_mpmc_size_##name(struct ck_ring *a, \ ck_ring_enqueue_mpmc_size_##name(struct ck_ring *a, \
struct type *b, \ struct type *b, \
@ -646,6 +933,10 @@ ck_ring_dequeue_mpmc_##name(struct ck_ring *a, \
ck_ring_enqueue_spsc_##name(a, b, c) ck_ring_enqueue_spsc_##name(a, b, c)
#define CK_RING_ENQUEUE_SPSC_SIZE(name, a, b, c, d) \ #define CK_RING_ENQUEUE_SPSC_SIZE(name, a, b, c, d) \
ck_ring_enqueue_spsc_size_##name(a, b, c, d) ck_ring_enqueue_spsc_size_##name(a, b, c, d)
#define CK_RING_ENQUEUE_RESERVE_SPSC(name, a, b, c) \
ck_ring_enqueue_reserve_spsc_##name(a, b, c)
#define CK_RING_ENQUEUE_RESERVE_SPSC_SIZE(name, a, b, c, d) \
ck_ring_enqueue_reserve_spsc_size_##name(a, b, c, d)
#define CK_RING_DEQUEUE_SPSC(name, a, b, c) \ #define CK_RING_DEQUEUE_SPSC(name, a, b, c) \
ck_ring_dequeue_spsc_##name(a, b, c) ck_ring_dequeue_spsc_##name(a, b, c)
@ -656,6 +947,10 @@ ck_ring_dequeue_mpmc_##name(struct ck_ring *a, \
ck_ring_enqueue_spmc_##name(a, b, c) ck_ring_enqueue_spmc_##name(a, b, c)
#define CK_RING_ENQUEUE_SPMC_SIZE(name, a, b, c, d) \ #define CK_RING_ENQUEUE_SPMC_SIZE(name, a, b, c, d) \
ck_ring_enqueue_spmc_size_##name(a, b, c, d) ck_ring_enqueue_spmc_size_##name(a, b, c, d)
#define CK_RING_ENQUEUE_RESERVE_SPMC(name, a, b, c) \
ck_ring_enqueue_reserve_spmc_##name(a, b, c)
#define CK_RING_ENQUEUE_RESERVE_SPMC_SIZE(name, a, b, c, d) \
ck_ring_enqueue_reserve_spmc_size_##name(a, b, c, d)
#define CK_RING_TRYDEQUEUE_SPMC(name, a, b, c) \ #define CK_RING_TRYDEQUEUE_SPMC(name, a, b, c) \
ck_ring_trydequeue_spmc_##name(a, b, c) ck_ring_trydequeue_spmc_##name(a, b, c)
#define CK_RING_DEQUEUE_SPMC(name, a, b, c) \ #define CK_RING_DEQUEUE_SPMC(name, a, b, c) \
@ -669,6 +964,10 @@ ck_ring_dequeue_mpmc_##name(struct ck_ring *a, \
ck_ring_enqueue_mpsc_##name(a, b, c) ck_ring_enqueue_mpsc_##name(a, b, c)
#define CK_RING_ENQUEUE_MPSC_SIZE(name, a, b, c, d) \ #define CK_RING_ENQUEUE_MPSC_SIZE(name, a, b, c, d) \
ck_ring_enqueue_mpsc_size_##name(a, b, c, d) ck_ring_enqueue_mpsc_size_##name(a, b, c, d)
#define CK_RING_ENQUEUE_RESERVE_MPSC(name, a, b, c) \
ck_ring_enqueue_reserve_mpsc_##name(a, b, c)
#define CK_RING_ENQUEUE_RESERVE_MPSC_SIZE(name, a, b, c, d) \
ck_ring_enqueue_reserve_mpsc_size_##name(a, b, c, d)
#define CK_RING_DEQUEUE_MPSC(name, a, b, c) \ #define CK_RING_DEQUEUE_MPSC(name, a, b, c) \
ck_ring_dequeue_mpsc_##name(a, b, c) ck_ring_dequeue_mpsc_##name(a, b, c)
@ -679,6 +978,10 @@ ck_ring_dequeue_mpmc_##name(struct ck_ring *a, \
ck_ring_enqueue_mpmc_##name(a, b, c) ck_ring_enqueue_mpmc_##name(a, b, c)
#define CK_RING_ENQUEUE_MPMC_SIZE(name, a, b, c, d) \ #define CK_RING_ENQUEUE_MPMC_SIZE(name, a, b, c, d) \
ck_ring_enqueue_mpmc_size_##name(a, b, c, d) ck_ring_enqueue_mpmc_size_##name(a, b, c, d)
#define CK_RING_ENQUEUE_RESERVE_MPMC(name, a, b, c) \
ck_ring_enqueue_reserve_mpmc_##name(a, b, c)
#define CK_RING_ENQUEUE_RESERVE_MPMC_SIZE(name, a, b, c, d) \
ck_ring_enqueue_reserve_mpmc_size_##name(a, b, c, d)
#define CK_RING_TRYDEQUEUE_MPMC(name, a, b, c) \ #define CK_RING_TRYDEQUEUE_MPMC(name, a, b, c) \
ck_ring_trydequeue_mpmc_##name(a, b, c) ck_ring_trydequeue_mpmc_##name(a, b, c)
#define CK_RING_DEQUEUE_MPMC(name, a, b, c) \ #define CK_RING_DEQUEUE_MPMC(name, a, b, c) \

Loading…
Cancel
Save