From a16642f95c048c65d47107205a2cfc70d099dbd6 Mon Sep 17 00:00:00 2001 From: Paul Khuong Date: Mon, 3 Dec 2018 09:01:59 -0500 Subject: [PATCH] ck_ec: event count with optimistic OS-level blocking (#133) ck_ec implements 32- and (on 64 bit platforms) 64- bit event counts. Event counts let us easily integrate OS-level blocking (e.g., futexes) in lock-free protocols. Waking up waiters only locks in the OS kernel, and does not happen at all when no waiter is blocked. Waiters only block conditionally, if the event count's value is still equal to some prior value. ck_ec supports multiple producers (wakers) and consumers (waiters), and, on x86-TSO, has a more efficient specialisation for single producer mode. In the latter mode, the overhead compared to a version counter is on the order of 2-3 cycles and 1-2 instructions, in the fast path. The slow path, when there are threads blocked on the event count, consists of one additional atomic instruction and a futex syscall. Similarly, the fast path for consumers, when an update comes quickly, has no overhead compared to spinning on a read-only counter. After a few thousand cycles, consumers (waiters) enter the slow path with one atomic instruction and a few blocking syscalls. The single-producer specialisation requires the x86-TSO memory model, x86's non-atomic read-modify-write instructions, and, ideally a futex-like OS abstraction. On !x86/x86_64 platforms, single producer increments fall back to the multiple producer code path. Fixes https://github.com/concurrencykit/ck/issues/79 --- .gitignore | 9 + include/ck_ec.h | 945 ++++++++++++++++++ regressions/Makefile | 5 + regressions/ck_ec/benchmark/Makefile | 18 + regressions/ck_ec/benchmark/ck_ec.c | 484 +++++++++ regressions/ck_ec/validate/Makefile | 55 + regressions/ck_ec/validate/ck_ec_smoke_test.c | 450 +++++++++ regressions/ck_ec/validate/fuzz_harness.h | 95 ++ .../ck_ec/validate/prop_test_slow_wakeup.c | 110 ++ .../ck_ec/validate/prop_test_timeutil_add.c | 101 ++ .../validate/prop_test_timeutil_add_ns.c | 88 ++ .../ck_ec/validate/prop_test_timeutil_cmp.c | 99 ++ .../ck_ec/validate/prop_test_timeutil_scale.c | 41 + regressions/ck_ec/validate/prop_test_value.c | 150 +++ regressions/ck_ec/validate/prop_test_wakeup.c | 193 ++++ src/Makefile.in | 4 + src/ck_ec.c | 414 ++++++++ src/ck_ec_timeutil.h | 150 +++ 18 files changed, 3411 insertions(+) create mode 100644 include/ck_ec.h create mode 100644 regressions/ck_ec/benchmark/Makefile create mode 100644 regressions/ck_ec/benchmark/ck_ec.c create mode 100644 regressions/ck_ec/validate/Makefile create mode 100644 regressions/ck_ec/validate/ck_ec_smoke_test.c create mode 100644 regressions/ck_ec/validate/fuzz_harness.h create mode 100644 regressions/ck_ec/validate/prop_test_slow_wakeup.c create mode 100644 regressions/ck_ec/validate/prop_test_timeutil_add.c create mode 100644 regressions/ck_ec/validate/prop_test_timeutil_add_ns.c create mode 100644 regressions/ck_ec/validate/prop_test_timeutil_cmp.c create mode 100644 regressions/ck_ec/validate/prop_test_timeutil_scale.c create mode 100644 regressions/ck_ec/validate/prop_test_value.c create mode 100644 regressions/ck_ec/validate/prop_test_wakeup.c create mode 100644 src/ck_ec.c create mode 100644 src/ck_ec_timeutil.h diff --git a/.gitignore b/.gitignore index 4fa574f..f4f3835 100644 --- a/.gitignore +++ b/.gitignore @@ -40,6 +40,15 @@ regressions/ck_cc/validate/ck_cc_nobuiltin regressions/ck_cohort/benchmark/ck_cohort.LATENCY regressions/ck_cohort/benchmark/ck_cohort.THROUGHPUT regressions/ck_cohort/validate/validate +regressions/ck_ec/benchmark/ck_ec +regressions/ck_ec/validate/ck_ec_smoke_test +regressions/ck_ec/validate/prop_test_slow_wakeup +regressions/ck_ec/validate/prop_test_timeutil_add +regressions/ck_ec/validate/prop_test_timeutil_add_ns +regressions/ck_ec/validate/prop_test_timeutil_cmp +regressions/ck_ec/validate/prop_test_timeutil_scale +regressions/ck_ec/validate/prop_test_value +regressions/ck_ec/validate/prop_test_wakeup regressions/ck_epoch/validate/ck_epoch_call regressions/ck_epoch/validate/ck_epoch_poll regressions/ck_epoch/validate/ck_epoch_section diff --git a/include/ck_ec.h b/include/ck_ec.h new file mode 100644 index 0000000..cd2a368 --- /dev/null +++ b/include/ck_ec.h @@ -0,0 +1,945 @@ +/* + * Copyright 2018 Paul Khuong, Google LLC. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Overview + * ======== + * + * ck_ec implements 32- and 64- bit event counts. Event counts let us + * easily integrate OS-level blocking (e.g., futexes) in lock-free + * protocols. Waiters block conditionally, if the event count's value + * is still equal to some old value. + * + * Event counts come in four variants: 32 and 64 bit (with one bit + * stolen for internal signaling, so 31 and 63 bit counters), and + * single or multiple producers (wakers). Waiters are always multiple + * consumers. The 32 bit variants are smaller, and more efficient, + * especially in single producer mode. The 64 bit variants are larger, + * but practically invulnerable to ABA. + * + * The 32 bit variant is always available. The 64 bit variant is only + * available if CK supports 64-bit atomic operations. Currently, + * specialization for single producer is only implemented for x86 and + * x86-64, on compilers that support GCC extended inline assembly; + * other platforms fall back to the multiple producer code path. + * + * A typical usage pattern is: + * + * 1. On the producer side: + * + * - Make changes to some shared data structure, without involving + * the event count at all. + * - After each change, call ck_ec_inc on the event count. The call + * acts as a write-write barrier, and wakes up any consumer blocked + * on the event count (waiting for new changes). + * + * 2. On the consumer side: + * + * - Snapshot ck_ec_value of the event count. The call acts as a + * read barrier. + * - Read and process the shared data structure. + * - Wait for new changes by calling ck_ec_wait with the snapshot value. + * + * Some data structures may opt for tighter integration with their + * event count. For example, an SPMC ring buffer or disruptor might + * use the event count's value as the write pointer. If the buffer is + * regularly full, it might also make sense to store the read pointer + * in an MP event count. + * + * This event count implementation supports tighter integration in two + * ways. + * + * Producers may opt to increment by an arbitrary value (less than + * INT32_MAX / INT64_MAX), in order to encode, e.g., byte + * offsets. Larger increment values make wraparound more likely, so + * the increments should still be relatively small. + * + * Consumers may pass a predicate to ck_ec_wait_pred. This predicate + * can make `ck_ec_wait_pred` return early, before the event count's + * value changes, and can override the deadline passed to futex_wait. + * This lets consumer block on one eventcount, while optimistically + * looking at other waking conditions. + * + * API Reference + * ============= + * + * When compiled as C11 or later, this header defines type-generic + * macros for ck_ec32 and ck_ec64; the reference describes this + * type-generic API. + * + * ck_ec needs additional OS primitives to determine the current time, + * to wait on an address, and to wake all threads waiting on a given + * address. These are defined with fields in a struct ck_ec_ops. Each + * ck_ec_ops may additionally define the number of spin loop + * iterations in the slow path, as well as the initial wait time in + * the internal exponential backoff, the exponential scale factor, and + * the right shift count (< 32). + * + * The ops, in addition to the single/multiple producer flag, are + * encapsulated in a struct ck_ec_mode, passed to most ck_ec + * operations. + * + * ec is a struct ck_ec32 *, or a struct ck_ec64 *. + * + * value is an uint32_t for ck_ec32, and an uint64_t for ck_ec64. It + * never exceeds INT32_MAX and INT64_MAX respectively. + * + * mode is a struct ck_ec_mode *. + * + * deadline is either NULL, or a `const struct timespec *` that will + * be treated as an absolute deadline. + * + * `void ck_ec_init(ec, value)`: initializes the event count to value. + * + * `value ck_ec_value(ec)`: returns the current value of the event + * counter. This read acts as a read (acquire) barrier. + * + * `bool ck_ec_has_waiters(ec)`: returns whether some thread has + * marked the event count as requiring an OS wakeup. + * + * `void ck_ec_inc(ec, mode)`: increments the value of the event + * counter by one. This writes acts as a write barrier. Wakes up + * any waiting thread. + * + * `value ck_ec_add(ec, mode, value)`: increments the event counter by + * `value`, and returns the event counter's previous value. This + * write acts as a write barrier. Wakes up any waiting thread. + * + * `int ck_ec_deadline(struct timespec *new_deadline, + * mode, + * const struct timespec *timeout)`: + * computes a deadline `timeout` away from the current time. If + * timeout is NULL, computes a deadline in the infinite future. The + * resulting deadline is written to `new_deadline`. Returns 0 on + * success, and -1 if ops->gettime failed (without touching errno). + * + * `int ck_ec_wait(ec, mode, value, deadline)`: waits until the event + * counter's value differs from `value`, or, if `deadline` is + * provided and non-NULL, until the current time is after that + * deadline. Use a deadline with tv_sec = 0 for a non-blocking + * execution. Returns 0 if the event counter has changed, and -1 on + * timeout. This function acts as a read (acquire) barrier. + * + * `int ck_ec_wait_pred(ec, mode, value, pred, data, deadline)`: waits + * until the event counter's value differs from `value`, or until + * `pred` returns non-zero, or, if `deadline` is provided and + * non-NULL, until the current time is after that deadline. Use a + * deadline with tv_sec = 0 for a non-blocking execution. Returns 0 if + * the event counter has changed, `pred`'s return value if non-zero, + * and -1 on timeout. This function acts as a read (acquire) barrier. + * + * `pred` is always called as `pred(data, iteration_deadline, now)`, + * where `iteration_deadline` is a timespec of the deadline for this + * exponential backoff iteration, and `now` is the current time. If + * `pred` returns a non-zero value, that value is immediately returned + * to the waiter. Otherwise, `pred` is free to modify + * `iteration_deadline` (moving it further in the future is a bad + * idea). + * + * Implementation notes + * ==================== + * + * The multiple producer implementation is a regular locked event + * count, with a single flag bit to denote the need to wake up waiting + * threads. + * + * The single producer specialization is heavily tied to + * [x86-TSO](https://www.cl.cam.ac.uk/~pes20/weakmemory/cacm.pdf), and + * to non-atomic read-modify-write instructions (e.g., `inc mem`); + * these non-atomic RMW let us write to the same memory locations with + * atomic and non-atomic instructions, without suffering from process + * scheduling stalls. + * + * The reason we can mix atomic and non-atomic writes to the `counter` + * word is that every non-atomic write obviates the need for the + * atomically flipped flag bit: we only use non-atomic writes to + * update the event count, and the atomic flag only informs the + * producer that we would like a futex_wake, because of the update. + * We only require the non-atomic RMW counter update to prevent + * preemption from introducing arbitrarily long worst case delays. + * + * Correctness does not rely on the usual ordering argument: in the + * absence of fences, there is no strict ordering between atomic and + * non-atomic writes. The key is instead x86-TSO's guarantee that a + * read is satisfied from the most recent buffered write in the local + * store queue if there is one, or from memory if there is no write to + * that address in the store queue. + * + * x86-TSO's constraint on reads suffices to guarantee that the + * producer will never forget about a counter update. If the last + * update is still queued, the new update will be based on the queued + * value. Otherwise, the new update will be based on the value in + * memory, which may or may not have had its flag flipped. In either + * case, the value of the counter (modulo flag) is correct. + * + * When the producer forwards the counter's value from its store + * queue, the new update might not preserve a flag flip. Any waiter + * thus has to check from time to time to determine if it wasn't + * woken up because the flag bit was silently cleared. + * + * In reality, the store queue in x86-TSO stands for in-flight + * instructions in the chip's out-of-order backend. In the vast + * majority of cases, instructions will only remain in flight for a + * few hundred or thousand of cycles. That's why ck_ec_wait spins on + * the `counter` word for ~100 iterations after flipping its flag bit: + * if the counter hasn't changed after that many iterations, it is + * very likely that the producer's next counter update will observe + * the flag flip. + * + * That's still not a hard guarantee of correctness. Conservatively, + * we can expect that no instruction will remain in flight for more + * than 1 second... if only because some interrupt will have forced + * the chip to store its architectural state in memory, at which point + * an instruction is either fully retired or rolled back. Interrupts, + * particularly the pre-emption timer, are why single-producer updates + * must happen in a single non-atomic read-modify-write instruction. + * Having a single instruction as the critical section means we only + * have to consider the worst-case execution time for that + * instruction. That's easier than doing the same for a pair of + * instructions, which an unlucky pre-emption could delay for + * arbitrarily long. + * + * Thus, after a short spin loop, ck_ec_wait enters an exponential + * backoff loop, where each "sleep" is instead a futex_wait. The + * backoff is only necessary to handle rare cases where the flag flip + * was overwritten after the spin loop. Eventually, more than one + * second will have elapsed since the flag flip, and the sleep timeout + * becomes infinite: since the flag bit has been set for much longer + * than the time for which an instruction may remain in flight, the + * flag will definitely be observed at the next counter update. + * + * The 64 bit ck_ec_wait pulls another trick: futexes only handle 32 + * bit ints, so we must treat the 64 bit counter's low 32 bits as an + * int in futex_wait. That's a bit dodgy, but fine in practice, given + * that the OS's futex code will always read whatever value is + * currently in memory: even if the producer thread were to wait on + * its own event count, the syscall and ring transition would empty + * the store queue (the out-of-order execution backend). + * + * Finally, what happens when the producer is migrated to another core + * or otherwise pre-empted? Migration must already incur a barrier, so + * that thread always sees its own writes, so that's safe. As for + * pre-emption, that requires storing the architectural state, which + * means every instruction must either be executed fully or not at + * all when pre-emption happens. + */ + +#ifndef CK_EC_H +#define CK_EC_H +#include +#include +#include +#include +#include +#include + +/* + * If we have ck_pr_faa_64 (and, presumably, ck_pr_load_64), we + * support 63 bit counters. + */ +#ifdef CK_F_PR_FAA_64 +#define CK_F_EC64 +#endif /* CK_F_PR_FAA_64 */ + +/* + * GCC inline assembly lets us exploit non-atomic read-modify-write + * instructions on x86/x86_64 for a fast single-producer mode. + * + * If we CK_F_EC_SP is not defined, CK_EC always uses the slower + * multiple producer code. + */ +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) +#define CK_F_EC_SP +#endif /* GNUC && (__i386__ || __x86_64__) */ + +struct ck_ec_ops; + +struct ck_ec_wait_state { + struct timespec start; /* Time when we entered ck_ec_wait. */ + struct timespec now; /* Time now. */ + const struct ck_ec_ops *ops; + void *data; /* Opaque pointer for the predicate's internal state. */ + +}; + +/* + * ck_ec_ops define system-specific functions to get the current time, + * atomically wait on an address if it still has some expected value, + * and to wake all threads waiting on an address. + * + * Each platform is expected to have few (one) opaque pointer to a + * const ops struct, and reuse it for all ck_ec_mode structs. + */ +struct ck_ec_ops { + /* Populates out with the current time. Returns non-zero on failure. */ + int (*gettime)(const struct ck_ec_ops *, struct timespec *out); + + /* + * Waits on address if its value is still `expected`. If + * deadline is non-NULL, stops waiting once that deadline is + * reached. May return early for any reason. + */ + void (*wait32)(const struct ck_ec_wait_state *, const uint32_t *, + uint32_t expected, const struct timespec *deadline); + + /* + * Same as wait32, but for a 64 bit counter. Only used if + * CK_F_EC64 is defined. + * + * If underlying blocking primitive only supports 32 bit + * control words, it should be safe to block on the least + * significant half of the 64 bit address. + */ + void (*wait64)(const struct ck_ec_wait_state *, const uint64_t *, + uint64_t expected, const struct timespec *deadline); + + /* Wakes up all threads waiting on address. */ + void (*wake32)(const struct ck_ec_ops *, const uint32_t *address); + + /* + * Same as wake32, but for a 64 bit counter. Only used if + * CK_F_EC64 is defined. + * + * When wait64 truncates the control word at address to `only` + * consider its least significant half, wake64 should perform + * any necessary fixup (e.g., on big endian platforms). + */ + void (*wake64)(const struct ck_ec_ops *, const uint64_t *address); + + /* + * Number of iterations for the initial busy wait. 0 defaults + * to 100 (not ABI stable). + */ + uint32_t busy_loop_iter; + + /* + * Delay in nanoseconds for the first iteration of the + * exponential backoff. 0 defaults to 2 ms (not ABI stable). + */ + uint32_t initial_wait_ns; + + /* + * Scale factor for the exponential backoff. 0 defaults to 8x + * (not ABI stable). + */ + uint32_t wait_scale_factor; + + /* + * Right shift count for the exponential backoff. The update + * after each iteration is + * wait_ns = (wait_ns * wait_scale_factor) >> wait_shift_count, + * until one second has elapsed. After that, the deadline goes + * to infinity. + */ + uint32_t wait_shift_count; +}; + +/* + * ck_ec_mode wraps the ops table, and informs the fast path whether + * it should attempt to specialize for single producer mode. + * + * mode structs are expected to be exposed by value, e.g., + * + * extern const struct ck_ec_ops system_ec_ops; + * + * static const struct ck_ec_mode ec_sp = { + * .ops = &system_ec_ops, + * .single_producer = true + * }; + * + * static const struct ck_ec_mode ec_mp = { + * .ops = &system_ec_ops, + * .single_producer = false + * }; + * + * ck_ec_mode structs are only passed to inline functions defined in + * this header, and never escape to their slow paths, so they should + * not result in any object file size increase. + */ +struct ck_ec_mode { + const struct ck_ec_ops *ops; + /* + * If single_producer is true, the event count has a unique + * incrementer. The implementation will specialize ck_ec_inc + * and ck_ec_add if possible (if CK_F_EC_SP is defined). + */ + bool single_producer; +}; + +struct ck_ec32 { + /* Flag is "sign" bit, value in bits 0:30. */ + uint32_t counter; +}; + +typedef struct ck_ec32 ck_ec32_t; + +#ifdef CK_F_EC64 +struct ck_ec64 { + /* + * Flag is bottom bit, value in bits 1:63. Eventcount only + * works on x86-64 (i.e., little endian), so the futex int + * lies in the first 4 (bottom) bytes. + */ + uint64_t counter; +}; + +typedef struct ck_ec64 ck_ec64_t; +#endif /* CK_F_EC64 */ + +#define CK_EC_INITIALIZER { .counter = 0 } + +/* + * Initializes the event count to `value`. The value must not + * exceed INT32_MAX. + */ +static void ck_ec32_init(struct ck_ec32 *ec, uint32_t value); + +#ifndef CK_F_EC64 +#define ck_ec_init ck_ec32_init +#else +/* + * Initializes the event count to `value`. The value must not + * exceed INT64_MAX. + */ +static void ck_ec64_init(struct ck_ec64 *ec, uint64_t value); + +#if __STDC_VERSION__ >= 201112L +#define ck_ec_init(EC, VALUE) \ + (_Generic(*(EC), \ + struct ck_ec32 : ck_ec32_init, \ + struct ck_ec64 : ck_ec64_init)((EC), (VALUE))) +#endif /* __STDC_VERSION__ */ +#endif /* CK_F_EC64 */ + +/* + * Returns the counter value in the event count. The value is at most + * INT32_MAX. + */ +static uint32_t ck_ec32_value(const struct ck_ec32* ec); + +#ifndef CK_F_EC64 +#define ck_ec_value ck_ec32_value +#else +/* + * Returns the counter value in the event count. The value is at most + * INT64_MAX. + */ +static uint64_t ck_ec64_value(const struct ck_ec64* ec); + +#if __STDC_VERSION__ >= 201112L +#define ck_ec_value(EC) \ + (_Generic(*(EC), \ + struct ck_ec32 : ck_ec32_value, \ + struct ck_ec64 : ck_ec64_value)((EC))) +#endif /* __STDC_VERSION__ */ +#endif /* CK_F_EC64 */ + +/* + * Returns whether there may be slow pathed waiters that need an + * explicit OS wakeup for this event count. + */ +static bool ck_ec32_has_waiters(const struct ck_ec32 *ec); + +#ifndef CK_F_EC64 +#define ck_ec_has_waiters ck_ec32_has_waiters +#else +static bool ck_ec64_has_waiters(const struct ck_ec64 *ec); + +#if __STDC_VERSION__ >= 201112L +#define ck_ec_has_waiters(EC) \ + (_Generic(*(EC), \ + struct ck_ec32 : ck_ec32_has_waiters, \ + struct ck_ec64 : ck_ec64_has_waiters)((EC))) +#endif /* __STDC_VERSION__ */ +#endif /* CK_F_EC64 */ + +/* + * Increments the counter value in the event count by one, and wakes + * up any waiter. + */ +static void ck_ec32_inc(struct ck_ec32 *ec, const struct ck_ec_mode *mode); + +#ifndef CK_F_EC64 +#define ck_ec_inc ck_ec32_inc +#else +static void ck_ec64_inc(struct ck_ec64 *ec, const struct ck_ec_mode *mode); + +#if __STDC_VERSION__ >= 201112L +#define ck_ec_inc(EC, MODE) \ + (_Generic(*(EC), \ + struct ck_ec32 : ck_ec32_inc, \ + struct ck_ec64 : ck_ec64_inc)((EC), (MODE))) +#endif /* __STDC_VERSION__ */ +#endif /* CK_F_EC64 */ + +/* + * Increments the counter value in the event count by delta, wakes + * up any waiter, and returns the previous counter value. + */ +static uint32_t ck_ec32_add(struct ck_ec32 *ec, + const struct ck_ec_mode *mode, + uint32_t delta); + +#ifndef CK_F_EC64 +#define ck_ec_add ck_ec32_add +#else +static uint64_t ck_ec64_add(struct ck_ec64 *ec, + const struct ck_ec_mode *mode, + uint64_t delta); + +#if __STDC_VERSION__ >= 201112L +#define ck_ec_add(EC, MODE, DELTA) \ + (_Generic(*(EC), \ + struct ck_ec32 : ck_ec32_add, \ + struct ck_ec64 : ck_ec64_add)((EC), (MODE), (DELTA))) +#endif /* __STDC_VERSION__ */ +#endif /* CK_F_EC64 */ + +/* + * Populates `new_deadline` with a deadline `timeout` in the future. + * Returns 0 on success, and -1 if clock_gettime failed, in which + * case errno is left as is. + */ +static int ck_ec_deadline(struct timespec *new_deadline, + const struct ck_ec_mode *mode, + const struct timespec *timeout); + +/* + * Waits until the counter value in the event count differs from + * old_value, or, if deadline is non-NULL, until CLOCK_MONOTONIC is + * past the deadline. + * + * Returns 0 on success, and -1 on timeout. + */ +static int ck_ec32_wait(struct ck_ec32 *ec, + const struct ck_ec_mode *mode, + uint32_t old_value, + const struct timespec *deadline); + +#ifndef CK_F_EC64 +#define ck_ec_wait ck_ec32_wait +#else +static int ck_ec64_wait(struct ck_ec64 *ec, + const struct ck_ec_mode *mode, + uint64_t old_value, + const struct timespec *deadline); + +#if __STDC_VERSION__ >= 201112L +#define ck_ec_wait(EC, MODE, OLD_VALUE, DEADLINE) \ + (_Generic(*(EC), \ + struct ck_ec32 : ck_ec32_wait, \ + struct ck_ec64 : ck_ec64_wait)((EC), (MODE), \ + (OLD_VALUE), (DEADLINE))) + +#endif /* __STDC_VERSION__ */ +#endif /* CK_F_EC64 */ + +/* + * Waits until the counter value in the event count differs from + * old_value, pred returns non-zero, or, if deadline is non-NULL, + * until CLOCK_MONOTONIC is past the deadline. + * + * Returns 0 on success, -1 on timeout, and the return value of pred + * if it returns non-zero. + * + * A NULL pred represents a function that always returns 0. + */ +static int ck_ec32_wait_pred(struct ck_ec32 *ec, + const struct ck_ec_mode *mode, + uint32_t old_value, + int (*pred)(const struct ck_ec_wait_state *, + struct timespec *deadline), + void *data, + const struct timespec *deadline); + +#ifndef CK_F_EC64 +#define ck_ec_wait_pred ck_ec32_wait_pred +#else +static int ck_ec64_wait_pred(struct ck_ec64 *ec, + const struct ck_ec_mode *mode, + uint64_t old_value, + int (*pred)(const struct ck_ec_wait_state *, + struct timespec *deadline), + void *data, + const struct timespec *deadline); + +#if __STDC_VERSION__ >= 201112L +#define ck_ec_wait_pred(EC, MODE, OLD_VALUE, PRED, DATA, DEADLINE) \ + (_Generic(*(EC), \ + struct ck_ec32 : ck_ec32_wait_pred, \ + struct ck_ec64 : ck_ec64_wait_pred) \ + ((EC), (MODE), (OLD_VALUE), (PRED), (DATA), (DEADLINE))) +#endif /* __STDC_VERSION__ */ +#endif /* CK_F_EC64 */ + +/* + * Inline implementation details. 32 bit first, then 64 bit + * conditionally. + */ +CK_CC_FORCE_INLINE void ck_ec32_init(struct ck_ec32 *ec, uint32_t value) +{ + ec->counter = value & ~(1UL << 31); + return; +} + +CK_CC_FORCE_INLINE uint32_t ck_ec32_value(const struct ck_ec32 *ec) +{ + uint32_t ret = ck_pr_load_32(&ec->counter) & ~(1UL << 31); + + ck_pr_fence_acquire(); + return ret; +} + +CK_CC_FORCE_INLINE bool ck_ec32_has_waiters(const struct ck_ec32 *ec) +{ + return ck_pr_load_32(&ec->counter) & (1UL << 31); +} + +/* Slow path for ck_ec{32,64}_{inc,add} */ +void ck_ec32_wake(struct ck_ec32 *ec, const struct ck_ec_ops *ops); + +CK_CC_FORCE_INLINE void ck_ec32_inc(struct ck_ec32 *ec, + const struct ck_ec_mode *mode) +{ +#if !defined(CK_F_EC_SP) + /* Nothing to specialize if we don't have EC_SP. */ + ck_ec32_add(ec, mode, 1); + return; +#else + char flagged; + +#if __GNUC__ >= 6 + /* + * We don't want to wake if the sign bit is 0. We do want to + * wake if the sign bit just flipped from 1 to 0. We don't + * care what happens when our increment caused the sign bit to + * flip from 0 to 1 (that's once per 2^31 increment). + * + * This leaves us with four cases: + * + * old sign bit | new sign bit | SF | OF | ZF + * ------------------------------------------- + * 0 | 0 | 0 | 0 | ? + * 0 | 1 | 1 | 0 | ? + * 1 | 1 | 1 | 0 | ? + * 1 | 0 | 0 | 0 | 1 + * + * In the first case, we don't want to hit ck_ec32_wake. In + * the last two cases, we do want to call ck_ec32_wake. In the + * second case, we don't care, so we arbitrarily choose to + * call ck_ec32_wake. + * + * The "le" condition checks if SF != OF, or ZF == 1, which + * meets our requirements. + */ +#define CK_EC32_INC_ASM(PREFIX) \ + __asm__ volatile(PREFIX " incl %0" \ + : "+m"(ec->counter), "=@ccle"(flagged) \ + :: "cc", "memory") +#else +#define CK_EC32_INC_ASM(PREFIX) \ + __asm__ volatile(PREFIX " incl %0; setle %1" \ + : "+m"(ec->counter), "=r"(flagged) \ + :: "cc", "memory") +#endif /* __GNUC__ */ + + if (mode->single_producer == true) { + ck_pr_fence_store(); + CK_EC32_INC_ASM(""); + } else { + ck_pr_fence_store_atomic(); + CK_EC32_INC_ASM("lock"); + } +#undef CK_EC32_INC_ASM + + if (CK_CC_UNLIKELY(flagged)) { + ck_ec32_wake(ec, mode->ops); + } + + return; +#endif /* CK_F_EC_SP */ +} + +CK_CC_FORCE_INLINE uint32_t ck_ec32_add_epilogue(struct ck_ec32 *ec, + const struct ck_ec_mode *mode, + uint32_t old) +{ + const uint32_t flag_mask = 1U << 31; + uint32_t ret; + + ret = old & ~flag_mask; + /* These two only differ if the flag bit is set. */ + if (CK_CC_UNLIKELY(old != ret)) { + ck_ec32_wake(ec, mode->ops); + } + + return ret; +} + +static CK_CC_INLINE uint32_t ck_ec32_add_mp(struct ck_ec32 *ec, + const struct ck_ec_mode *mode, + uint32_t delta) +{ + uint32_t old; + + ck_pr_fence_store_atomic(); + old = ck_pr_faa_32(&ec->counter, delta); + return ck_ec32_add_epilogue(ec, mode, old); +} + +#ifdef CK_F_EC_SP +static CK_CC_INLINE uint32_t ck_ec32_add_sp(struct ck_ec32 *ec, + const struct ck_ec_mode *mode, + uint32_t delta) +{ + uint32_t old; + + /* + * Correctness of this racy write depends on actually + * having an update to write. Exit here if the update + * is a no-op. + */ + if (CK_CC_UNLIKELY(delta == 0)) { + return ck_ec32_value(ec); + } + + ck_pr_fence_store(); + old = delta; + __asm__ volatile("xaddl %1, %0" + : "+m"(ec->counter), "+r"(old) + :: "cc", "memory"); + return ck_ec32_add_epilogue(ec, mode, old); +} +#endif /* CK_F_EC_SP */ + +CK_CC_FORCE_INLINE uint32_t ck_ec32_add(struct ck_ec32 *ec, + const struct ck_ec_mode *mode, + uint32_t delta) +{ +#ifdef CK_F_EC_SP + if (mode->single_producer == true) { + return ck_ec32_add_sp(ec, mode, delta); + } +#endif + + return ck_ec32_add_mp(ec, mode, delta); +} + +int ck_ec_deadline_impl(struct timespec *new_deadline, + const struct ck_ec_ops *ops, + const struct timespec *timeout); + +CK_CC_FORCE_INLINE int ck_ec_deadline(struct timespec *new_deadline, + const struct ck_ec_mode *mode, + const struct timespec *timeout) +{ + return ck_ec_deadline_impl(new_deadline, mode->ops, timeout); +} + + +int ck_ec32_wait_slow(struct ck_ec32 *ec, + const struct ck_ec_ops *ops, + uint32_t old_value, + const struct timespec *deadline); + +CK_CC_FORCE_INLINE int ck_ec32_wait(struct ck_ec32 *ec, + const struct ck_ec_mode *mode, + uint32_t old_value, + const struct timespec *deadline) +{ + if (ck_ec32_value(ec) != old_value) { + return 0; + } + + return ck_ec32_wait_slow(ec, mode->ops, old_value, deadline); +} + +int ck_ec32_wait_pred_slow(struct ck_ec32 *ec, + const struct ck_ec_ops *ops, + uint32_t old_value, + int (*pred)(const struct ck_ec_wait_state *state, + struct timespec *deadline), + void *data, + const struct timespec *deadline); + +CK_CC_FORCE_INLINE int +ck_ec32_wait_pred(struct ck_ec32 *ec, + const struct ck_ec_mode *mode, + uint32_t old_value, + int (*pred)(const struct ck_ec_wait_state *state, + struct timespec *deadline), + void *data, + const struct timespec *deadline) +{ + if (ck_ec32_value(ec) != old_value) { + return 0; + } + + return ck_ec32_wait_pred_slow(ec, mode->ops, old_value, + pred, data, deadline); +} + +#ifdef CK_F_EC64 +CK_CC_FORCE_INLINE void ck_ec64_init(struct ck_ec64 *ec, uint64_t value) +{ + ec->counter = value << 1; + return; +} + +CK_CC_FORCE_INLINE uint64_t ck_ec64_value(const struct ck_ec64 *ec) +{ + uint64_t ret = ck_pr_load_64(&ec->counter) >> 1; + + ck_pr_fence_acquire(); + return ret; +} + +CK_CC_FORCE_INLINE bool ck_ec64_has_waiters(const struct ck_ec64 *ec) +{ + return ck_pr_load_64(&ec->counter) & 1; +} + +void ck_ec64_wake(struct ck_ec64 *ec, const struct ck_ec_ops *ops); + +CK_CC_FORCE_INLINE void ck_ec64_inc(struct ck_ec64 *ec, + const struct ck_ec_mode *mode) +{ + /* We always xadd, so there's no special optimization here. */ + (void)ck_ec64_add(ec, mode, 1); + return; +} + +CK_CC_FORCE_INLINE uint64_t ck_ec_add64_epilogue(struct ck_ec64 *ec, + const struct ck_ec_mode *mode, + uint64_t old) +{ + uint64_t ret = old >> 1; + + if (CK_CC_UNLIKELY(old & 1)) { + ck_ec64_wake(ec, mode->ops); + } + + return ret; +} + +static CK_CC_INLINE uint64_t ck_ec64_add_mp(struct ck_ec64 *ec, + const struct ck_ec_mode *mode, + uint64_t delta) +{ + uint64_t inc = 2 * delta; /* The low bit is the flag bit. */ + + ck_pr_fence_store_atomic(); + return ck_ec_add64_epilogue(ec, mode, ck_pr_faa_64(&ec->counter, inc)); +} + +#ifdef CK_F_EC_SP +/* Single-producer specialisation. */ +static CK_CC_INLINE uint64_t ck_ec64_add_sp(struct ck_ec64 *ec, + const struct ck_ec_mode *mode, + uint64_t delta) +{ + uint64_t old; + + /* + * Correctness of this racy write depends on actually + * having an update to write. Exit here if the update + * is a no-op. + */ + if (CK_CC_UNLIKELY(delta == 0)) { + return ck_ec64_value(ec); + } + + ck_pr_fence_store(); + old = 2 * delta; /* The low bit is the flag bit. */ + __asm__ volatile("xaddq %1, %0" + : "+m"(ec->counter), "+r"(old) + :: "cc", "memory"); + return ck_ec_add64_epilogue(ec, mode, old); +} +#endif /* CK_F_EC_SP */ + +/* + * Dispatch on mode->single_producer in this FORCE_INLINE function: + * the end result is always small, but not all compilers have enough + * foresight to inline and get the reduction. + */ +CK_CC_FORCE_INLINE uint64_t ck_ec64_add(struct ck_ec64 *ec, + const struct ck_ec_mode *mode, + uint64_t delta) +{ +#ifdef CK_F_EC_SP + if (mode->single_producer == true) { + return ck_ec64_add_sp(ec, mode, delta); + } +#endif + + return ck_ec64_add_mp(ec, mode, delta); +} + +int ck_ec64_wait_slow(struct ck_ec64 *ec, + const struct ck_ec_ops *ops, + uint64_t old_value, + const struct timespec *deadline); + +CK_CC_FORCE_INLINE int ck_ec64_wait(struct ck_ec64 *ec, + const struct ck_ec_mode *mode, + uint64_t old_value, + const struct timespec *deadline) +{ + if (ck_ec64_value(ec) != old_value) { + return 0; + } + + return ck_ec64_wait_slow(ec, mode->ops, old_value, deadline); +} + +int ck_ec64_wait_pred_slow(struct ck_ec64 *ec, + const struct ck_ec_ops *ops, + uint64_t old_value, + int (*pred)(const struct ck_ec_wait_state *state, + struct timespec *deadline), + void *data, + const struct timespec *deadline); + + +CK_CC_FORCE_INLINE int +ck_ec64_wait_pred(struct ck_ec64 *ec, + const struct ck_ec_mode *mode, + uint64_t old_value, + int (*pred)(const struct ck_ec_wait_state *state, + struct timespec *deadline), + void *data, + const struct timespec *deadline) +{ + if (ck_ec64_value(ec) != old_value) { + return 0; + } + + return ck_ec64_wait_pred_slow(ec, mode->ops, old_value, + pred, data, deadline); +} +#endif /* CK_F_EC64 */ +#endif /* !CK_EC_H */ diff --git a/regressions/Makefile b/regressions/Makefile index 6b369a8..c74b4fa 100644 --- a/regressions/Makefile +++ b/regressions/Makefile @@ -6,6 +6,7 @@ DIR=array \ bytelock \ cc \ cohort \ + ec \ epoch \ fifo \ hp \ @@ -71,6 +72,8 @@ all: $(MAKE) -C ./ck_pflock/benchmark all $(MAKE) -C ./ck_hp/validate all $(MAKE) -C ./ck_hp/benchmark all + $(MAKE) -C ./ck_ec/validate all + $(MAKE) -C ./ck_ec/benchmark all clean: $(MAKE) -C ./ck_array/validate clean @@ -119,6 +122,8 @@ clean: $(MAKE) -C ./ck_pflock/benchmark clean $(MAKE) -C ./ck_hp/validate clean $(MAKE) -C ./ck_hp/benchmark clean + $(MAKE) -C ./ck_ec/validate clean + $(MAKE) -C ./ck_ec/benchmark clean check: all rc=0; \ diff --git a/regressions/ck_ec/benchmark/Makefile b/regressions/ck_ec/benchmark/Makefile new file mode 100644 index 0000000..c266023 --- /dev/null +++ b/regressions/ck_ec/benchmark/Makefile @@ -0,0 +1,18 @@ +.PHONY: check clean distribution + +OBJECTS=ck_ec + +all: $(OBJECTS) + +ck_ec: ck_ec.c ../../../include/ck_ec.h + $(CC) $(CFLAGS) ../../../src/ck_ec.c -o ck_ec ck_ec.c + +check: all + ./ck_ec $(CORES) 1 + +clean: + rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe + +include ../../../build/regressions.build +CFLAGS+=-D_GNU_SOURCE + diff --git a/regressions/ck_ec/benchmark/ck_ec.c b/regressions/ck_ec/benchmark/ck_ec.c new file mode 100644 index 0000000..2c6b5d3 --- /dev/null +++ b/regressions/ck_ec/benchmark/ck_ec.c @@ -0,0 +1,484 @@ +/* + * Copyright 2018 Paul Khuong. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "../../common.h" + +#ifndef STEPS +#define STEPS (65536 * 64) +#endif + +static int gettime(const struct ck_ec_ops *, struct timespec *out); +static void wake32(const struct ck_ec_ops *, const uint32_t *); +static void wait32(const struct ck_ec_wait_state *, + const uint32_t *, uint32_t, const struct timespec *); +static void wake64(const struct ck_ec_ops *, const uint64_t *); +static void wait64(const struct ck_ec_wait_state *, + const uint64_t *, uint64_t, const struct timespec *); + +static const struct ck_ec_ops test_ops = { + .gettime = gettime, + .wait32 = wait32, + .wait64 = wait64, + .wake32 = wake32, + .wake64 = wake64 +}; + +#ifndef __linux__ +static int gettime(const struct ck_ec_ops *ops, struct timespec *out) +{ + (void)out; + + assert(ops == &test_ops); + return -1; +} + +static void wait32(const struct ck_ec_wait_state *state, + const uint32_t *address, uint32_t expected, + const struct timespec *deadline) +{ + (void)address; + (void)expected; + (void)deadline; + + assert(state->ops == &test_ops); + return; +} + +static void wait64(const struct ck_ec_wait_state *state, + const uint64_t *address, uint64_t expected, + const struct timespec *deadline) +{ + (void)address; + (void)expected; + (void)deadline; + + assert(state->ops == &test_ops); + return; +} + +static void wake32(const struct ck_ec_ops *ops, const uint32_t *address) +{ + (void)address; + + assert(ops == &test_ops); + return; +} + +static void wake64(const struct ck_ec_ops *ops, const uint64_t *address) +{ + (void)address; + + assert(ops == &test_ops); + return; +} +#else +#include +#include +#include +#include + +static int gettime(const struct ck_ec_ops *ops, struct timespec *out) +{ + assert(ops == &test_ops); + return clock_gettime(CLOCK_MONOTONIC, out); +} + +static void wait32(const struct ck_ec_wait_state *state, + const uint32_t *address, uint32_t expected, + const struct timespec *deadline) +{ + assert(state->ops == &test_ops); + syscall(SYS_futex, address, + FUTEX_WAIT_BITSET, expected, deadline, + NULL, deadline, 0); + return; +} + +static void wait64(const struct ck_ec_wait_state *state, + const uint64_t *address, uint64_t expected, + const struct timespec *deadline) +{ + const void *low_half; + + assert(state->ops == &test_ops); + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + low_half = address; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + low_half = (uintptr_t)address + sizeof(uint32_t); +#else +# error "__BYTE_ORDER__ must be defined." +#endif + + syscall(SYS_futex, low_half, + FUTEX_WAIT_BITSET, (uint32_t)expected, deadline, + NULL, deadline, 0); + return; +} + +static void wake32(const struct ck_ec_ops *ops, const uint32_t *address) +{ + assert(ops == &test_ops); + syscall(SYS_futex, address, + FUTEX_WAKE, INT_MAX, + /* ignored arguments */NULL, NULL, 0); + return; +} + +static void wake64(const struct ck_ec_ops *ops, const uint64_t *address) +{ + const void *low_half; + + assert(ops == &test_ops); + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + low_half = address; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + low_half = (uintptr_t)address + sizeof(uint32_t); +#else +# error "__BYTE_ORDER__ must be defined." +#endif + + syscall(SYS_futex, low_half, + FUTEX_WAKE, INT_MAX, + /* ignored arguments */NULL, NULL, 0); + return; +} +#endif /* __linux__ */ + +static const struct ck_ec_mode sp = { + .ops = &test_ops, + .single_producer = true +}; + +static const struct ck_ec_mode mp = { + .ops = &test_ops, + .single_producer = false +}; + +static CK_CC_FORCE_INLINE void bench32(const struct ck_ec_mode mode) +{ + ck_ec32_t ec CK_CC_CACHELINE = CK_EC_INITIALIZER; + uint64_t a; + uint64_t baseline = 1000 * 1000; + uint32_t value; + + for (size_t i = 0; i < STEPS; i++) { + uint64_t s = rdtsc(); + uint64_t elapsed = rdtsc() - s; + + if (elapsed < baseline) { + baseline = elapsed; + } + } + + /* Read value. */ + a = 0; + value = 0; + for (size_t i = 0; i < STEPS / 4; i++) { + uint64_t s = rdtsc(); + + value ^= ck_ec32_value(&ec); + value ^= ck_ec32_value(&ec); + value ^= ck_ec32_value(&ec); + value ^= ck_ec32_value(&ec); + + __asm__ volatile("" :: "r"(value)); + a += rdtsc() - s - baseline; + } + + printf("%s ec32_value: %" PRIu64 "\n", + (mode.single_producer ? "SP" : "MP"), a / STEPS); + + /* Wait (fast path). */ + a = 0; + for (size_t i = 0; i < STEPS / 4; i++) { + uint64_t s = rdtsc(); + + ck_ec32_wait(&ec, &mode, 1, NULL); + ck_ec32_wait(&ec, &mode, 1, NULL); + ck_ec32_wait(&ec, &mode, 1, NULL); + ck_ec32_wait(&ec, &mode, 1, NULL); + + a += rdtsc() - s - baseline; + } + + printf("%s ec32_wait fast: %" PRIu64 "\n", + (mode.single_producer ? "SP" : "MP"), a / STEPS); + + /* trywait. */ + a = 0; + for (size_t i = 0; i < STEPS / 4; i++) { + struct timespec past = { .tv_sec = 0 }; + uint64_t s = rdtsc(); + + ck_ec32_wait(&ec, &mode, 0, &past); + ck_ec32_wait(&ec, &mode, 0, &past); + ck_ec32_wait(&ec, &mode, 0, &past); + ck_ec32_wait(&ec, &mode, 0, &past); + + a += rdtsc() - s - baseline; + } + + printf("%s ec32_wait timeout: %" PRIu64 "\n", + (mode.single_producer ? "SP" : "MP"), a / STEPS); + + /* Inc (no waiter). */ + assert(!ck_ec32_has_waiters(&ec)); + a = 0; + for (size_t i = 0; i < STEPS / 4; i++) { + uint64_t s = rdtsc(); + + ck_ec32_inc(&ec, &mode); + ck_ec32_inc(&ec, &mode); + ck_ec32_inc(&ec, &mode); + ck_ec32_inc(&ec, &mode); + + a += rdtsc() - s - baseline; + } + + printf("%s ec32_inc: %" PRIu64 "\n", + (mode.single_producer ? "SP" : "MP"), a / STEPS); + + /* Inc (with waiter). */ + assert(!ck_ec32_has_waiters(&ec)); + a = 0; + for (size_t i = 0; i < STEPS; i++) { + struct timespec past = { .tv_sec = 1 }; + uint64_t s; + + ck_ec32_wait(&ec, &mode, ck_ec32_value(&ec), &past); + assert(ck_ec32_has_waiters(&ec)); + + s = rdtsc(); + ck_ec32_inc(&ec, &mode); + a += rdtsc() - s - baseline; + } + + printf("%s ec32_inc slow: %" PRIu64 "\n", + (mode.single_producer ? "SP" : "MP"), a / STEPS); + + /* Add (no waiter). */ + assert(!ck_ec32_has_waiters(&ec)); + a = 0; + for (size_t i = 0; i < STEPS / 4; i++) { + uint64_t s = rdtsc(); + + ck_ec32_add(&ec, &mode, i + 1); + ck_ec32_add(&ec, &mode, i + 2); + ck_ec32_add(&ec, &mode, i + 3); + ck_ec32_add(&ec, &mode, i + 4); + + a += rdtsc() - s - baseline; + } + + printf("%s ec32_add: %" PRIu64 "\n", + (mode.single_producer ? "SP" : "MP"), a / STEPS); + + assert(!ck_ec32_has_waiters(&ec)); + a = 0; + for (size_t i = 0; i < STEPS; i++) { + struct timespec past = { .tv_sec = 1 }; + uint64_t s; + + ck_ec32_wait(&ec, &mode, ck_ec32_value(&ec), &past); + assert(ck_ec32_has_waiters(&ec)); + + s = rdtsc(); + ck_ec32_add(&ec, &mode, i + 1); + a += rdtsc() - s - baseline; + } + + printf("%s ec32_add slow: %" PRIu64 "\n", + (mode.single_producer ? "SP" : "MP"), a / STEPS); + return; +} + +#ifdef CK_F_EC64 +static CK_CC_FORCE_INLINE void bench64(const struct ck_ec_mode mode) +{ + ck_ec64_t ec CK_CC_CACHELINE = CK_EC_INITIALIZER; + uint64_t a; + uint64_t baseline = 1000 * 1000; + uint64_t value; + + for (size_t i = 0; i < STEPS; i++) { + uint64_t s = rdtsc(); + uint64_t elapsed = rdtsc() - s; + + if (elapsed < baseline) { + baseline = elapsed; + } + } + + /* Read value. */ + a = 0; + value = 0; + for (size_t i = 0; i < STEPS / 4; i++) { + uint64_t s = rdtsc(); + + value ^= ck_ec64_value(&ec); + value ^= ck_ec64_value(&ec); + value ^= ck_ec64_value(&ec); + value ^= ck_ec64_value(&ec); + + __asm__ volatile("" :: "r"(value)); + a += rdtsc() - s - baseline; + } + + printf("%s ec64_value: %" PRIu64 "\n", + (mode.single_producer ? "SP" : "MP"), a / STEPS); + + /* Wait (fast path). */ + a = 0; + for (size_t i = 0; i < STEPS / 4; i++) { + uint64_t s = rdtsc(); + + ck_ec64_wait(&ec, &mode, 1, NULL); + ck_ec64_wait(&ec, &mode, 1, NULL); + ck_ec64_wait(&ec, &mode, 1, NULL); + ck_ec64_wait(&ec, &mode, 1, NULL); + + a += rdtsc() - s - baseline; + } + + printf("%s ec64_wait fast: %" PRIu64 "\n", + (mode.single_producer ? "SP" : "MP"), a / STEPS); + + /* trywait. */ + a = 0; + for (size_t i = 0; i < STEPS / 4; i++) { + struct timespec past = { .tv_sec = 0 }; + uint64_t s = rdtsc(); + + ck_ec64_wait(&ec, &mode, 0, &past); + ck_ec64_wait(&ec, &mode, 0, &past); + ck_ec64_wait(&ec, &mode, 0, &past); + ck_ec64_wait(&ec, &mode, 0, &past); + + a += rdtsc() - s - baseline; + } + + printf("%s ec64_wait timeout: %" PRIu64 "\n", + (mode.single_producer ? "SP" : "MP"), a / STEPS); + + /* Inc (no waiter). */ + assert(!ck_ec64_has_waiters(&ec)); + a = 0; + for (size_t i = 0; i < STEPS / 4; i++) { + uint64_t s = rdtsc(); + + ck_ec64_inc(&ec, &mode); + ck_ec64_inc(&ec, &mode); + ck_ec64_inc(&ec, &mode); + ck_ec64_inc(&ec, &mode); + + a += rdtsc() - s - baseline; + } + + printf("%s ec64_inc: %" PRIu64 "\n", + (mode.single_producer ? "SP" : "MP"), a / STEPS); + + /* Inc (with waiter). */ + assert(!ck_ec64_has_waiters(&ec)); + a = 0; + for (size_t i = 0; i < STEPS; i++) { + struct timespec past = { .tv_sec = 1 }; + uint64_t s; + + ck_ec64_wait(&ec, &mode, ck_ec64_value(&ec), &past); + assert(ck_ec64_has_waiters(&ec)); + + s = rdtsc(); + ck_ec64_inc(&ec, &mode); + a += rdtsc() - s - baseline; + } + + printf("%s ec64_inc slow: %" PRIu64 "\n", + (mode.single_producer ? "SP" : "MP"), a / STEPS); + + /* Add (no waiter). */ + assert(!ck_ec64_has_waiters(&ec)); + a = 0; + for (size_t i = 0; i < STEPS / 4; i++) { + uint64_t s = rdtsc(); + + ck_ec64_add(&ec, &mode, i + 1); + ck_ec64_add(&ec, &mode, i + 2); + ck_ec64_add(&ec, &mode, i + 3); + ck_ec64_add(&ec, &mode, i + 4); + + a += rdtsc() - s - baseline; + } + + printf("%s ec64_add: %" PRIu64 "\n", + (mode.single_producer ? "SP" : "MP"), a / STEPS); + + assert(!ck_ec64_has_waiters(&ec)); + a = 0; + for (size_t i = 0; i < STEPS; i++) { + struct timespec past = { .tv_sec = 1 }; + uint64_t s; + + ck_ec64_wait(&ec, &mode, ck_ec64_value(&ec), &past); + assert(ck_ec64_has_waiters(&ec)); + + s = rdtsc(); + ck_ec64_add(&ec, &mode, i + 1); + a += rdtsc() - s - baseline; + } + + printf("%s ec64_add slow: %" PRIu64 "\n", + (mode.single_producer ? "SP" : "MP"), a / STEPS); + return; +} +#endif /* CK_F_EC64 */ + +int +main(void) +{ + printf("SP ec32\n"); + bench32(sp); + printf("\nMP ec32\n"); + bench32(mp); + +#ifdef CK_F_EC64 + printf("\nSP ec64\n"); + bench64(sp); + printf("\nMP ec64\n"); + bench64(mp); +#endif /* CK_F_EC64 */ + + return 0; +} diff --git a/regressions/ck_ec/validate/Makefile b/regressions/ck_ec/validate/Makefile new file mode 100644 index 0000000..2c66d82 --- /dev/null +++ b/regressions/ck_ec/validate/Makefile @@ -0,0 +1,55 @@ +.PHONY: check clean distribution + +FUZZER ?= none + +FUZZ_CFLAGS ?= + +# See http://gallium.inria.fr/blog/portable-conditionals-in-makefiles/ for +# the portable conditional technique below. +none_fuzz_cflags = +libfuzzer_fuzz_cflags = -DUSE_LIBFUZZER -fsanitize=fuzzer,memory,undefined + +FUZZ_CFLAGS += ${${FUZZER}_fuzz_cflags} + +OBJECTS = ck_ec_smoke_test \ + prop_test_slow_wakeup \ + prop_test_timeutil_add \ + prop_test_timeutil_add_ns \ + prop_test_timeutil_cmp \ + prop_test_timeutil_scale \ + prop_test_value \ + prop_test_wakeup + +all: $(OBJECTS) + +check: all + ./ck_ec_smoke_test + # the command line arguments are only consumed by libfuzzer. + ./prop_test_slow_wakeup -max_total_time=60 + ./prop_test_timeutil_add -max_total_time=60 + ./prop_test_timeutil_add_ns -max_total_time=60 + ./prop_test_timeutil_cmp -max_total_time=60 + ./prop_test_timeutil_scale -max_total_time=60 + ./prop_test_value -max_total_time=60 + ./prop_test_wakeup -max_total_time=60 + +quickfuzz: all + ./prop_test_slow_wakeup -max_total_time=5 + ./prop_test_timeutil_add -max_total_time=5 + ./prop_test_timeutil_add_ns -max_total_time=5 + ./prop_test_timeutil_cmp -max_total_time=5 + ./prop_test_timeutil_scale -max_total_time=5 + ./prop_test_value -max_total_time=5 + ./prop_test_wakeup -max_total_time=5 + +ck_ec_smoke_test: ../../../src/ck_ec.c ck_ec_smoke_test.c ../../../src/ck_ec_timeutil.h ../../../include/ck_ec.h + $(CC) $(CFLAGS) -std=gnu11 ../../../src/ck_ec.c -o ck_ec_smoke_test ck_ec_smoke_test.c + +prop_test_%: ../../../src/ck_ec.c prop_test_%.c ../../../src/ck_ec_timeutil.h ../../../include/ck_ec.h fuzz_harness.h + $(CC) $(CFLAGS) $(FUZZ_CFLAGS) ../../../src/ck_ec.c -o $@ $@.c + +clean: + rm -rf *~ *.o *.dSYM *.exe $(OBJECTS) + +include ../../../build/regressions.build +CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE diff --git a/regressions/ck_ec/validate/ck_ec_smoke_test.c b/regressions/ck_ec/validate/ck_ec_smoke_test.c new file mode 100644 index 0000000..fadfd85 --- /dev/null +++ b/regressions/ck_ec/validate/ck_ec_smoke_test.c @@ -0,0 +1,450 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#define TIME_MAX ((time_t)((1ULL << ((sizeof(time_t) * CHAR_BIT) - 1)) - 1)) + +#ifndef __linux__ +/* Zero-initialize to mark the ops as unavailable. */ +static const struct ck_ec_ops test_ops; +#else +#include +#include +#include + +static int gettime(const struct ck_ec_ops *, struct timespec *out); +static void wake32(const struct ck_ec_ops *, const uint32_t *); +static void wait32(const struct ck_ec_wait_state *, const uint32_t *, + uint32_t, const struct timespec *); +static void wake64(const struct ck_ec_ops *, const uint64_t *); +static void wait64(const struct ck_ec_wait_state *, const uint64_t *, + uint64_t, const struct timespec *); + +static const struct ck_ec_ops test_ops = { + .gettime = gettime, + .wait32 = wait32, + .wait64 = wait64, + .wake32 = wake32, + .wake64 = wake64 +}; + +static int gettime(const struct ck_ec_ops *ops, struct timespec *out) +{ + assert(ops == &test_ops); + return clock_gettime(CLOCK_MONOTONIC, out); +} + +static void wait32(const struct ck_ec_wait_state *state, + const uint32_t *address, uint32_t expected, + const struct timespec *deadline) +{ + assert(state->ops == &test_ops); + syscall(SYS_futex, address, + FUTEX_WAIT_BITSET, expected, deadline, + NULL, deadline, 0); + return; +} + +static void wait64(const struct ck_ec_wait_state *state, + const uint64_t *address, uint64_t expected, + const struct timespec *deadline) +{ + const void *low_half; + + assert(state->ops == &test_ops); + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + low_half = address; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + low_half = (uintptr_t)address + sizeof(uint32_t); +#else +# error "__BYTE_ORDER__ must be defined." +#endif + + syscall(SYS_futex, low_half, + FUTEX_WAIT_BITSET, (uint32_t)expected, deadline, + NULL, deadline, 0); + return; +} + +static void wake32(const struct ck_ec_ops *ops, const uint32_t *address) +{ + assert(ops == &test_ops); + syscall(SYS_futex, address, + FUTEX_WAKE, INT_MAX, + /* ignored arguments */NULL, NULL, 0); + return; +} + +static void wake64(const struct ck_ec_ops *ops, const uint64_t *address) +{ + const void *low_half; + + assert(ops == &test_ops); + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + low_half = address; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + low_half = (uintptr_t)address + sizeof(uint32_t); +#else +# error "__BYTE_ORDER__ must be defined." +#endif + + syscall(SYS_futex, low_half, + FUTEX_WAKE, INT_MAX, + /* ignored arguments */NULL, NULL, 0); + return; +} +#endif /* __linux__ */ + +static const struct ck_ec_mode sp = { + .ops = &test_ops, + .single_producer = true +}; + +static const struct ck_ec_mode mp = { + .ops = &test_ops, + .single_producer = false +}; + +static void test_update_counter_32(const struct ck_ec_mode *mode) +{ + struct ck_ec32 ec = CK_EC_INITIALIZER; + + assert(ck_ec_value(&ec) == 0); + + ck_ec_inc(&ec, mode); + assert(ck_ec_value(&ec) == 1); + + uint32_t old = ck_ec_add(&ec, mode, 42); + assert(old == 1); + assert(ck_ec_value(&ec) == 43); + return; +} + +#ifdef CK_F_EC64 +static void test_update_counter_64(const struct ck_ec_mode *mode) +{ + struct ck_ec64 ec = CK_EC_INITIALIZER; + + assert(ck_ec_value(&ec) == 0); + + ck_ec_inc(&ec, mode); + assert(ck_ec_value(&ec) == 1); + + uint64_t old = ck_ec_add(&ec, mode, 42); + assert(old == 1); + assert(ck_ec_value(&ec) == 43); + return; +} +#endif + +static void test_deadline(void) +{ + struct timespec deadline; + + assert(ck_ec_deadline(&deadline, &sp, NULL) == 0); + assert(deadline.tv_sec == TIME_MAX); + + { + const struct timespec timeout = { + .tv_sec = 1, + .tv_nsec = 1000 + }; + const struct timespec no_timeout = { + .tv_sec = 0 + }; + struct timespec now; + + assert(ck_ec_deadline(&deadline, &sp, &timeout) == 0); + assert(ck_ec_deadline(&now, &sp, &no_timeout) == 0); + + double now_sec = now.tv_sec + 1e-9 * now.tv_nsec; + double deadline_sec = deadline.tv_sec + 1e-9 * deadline.tv_nsec; + assert(now_sec < deadline_sec); + assert(deadline_sec <= now_sec + 1 + 1000e-9); + } + + { + const struct timespec timeout = { + .tv_sec = TIME_MAX - 1, + .tv_nsec = 1000 + }; + + assert(ck_ec_deadline(&deadline, &sp, &timeout) == 0); + assert(deadline.tv_sec == TIME_MAX); + } + + return; +} + +static void test_wait_32(void) +{ + struct timespec deadline = { .tv_sec = 0 }; + struct ck_ec32 ec; + + ck_ec_init(&ec, 1); + assert(ck_ec_value(&ec) == 1); + assert(ck_ec_wait(&ec, &sp, 2, NULL) == 0); + assert(ck_ec_wait(&ec, &sp, 1, &deadline) == -1); + + { + const struct timespec timeout = { .tv_nsec = 1 }; + + assert(ck_ec_deadline(&deadline, &sp, &timeout) == 0); + assert(ck_ec_wait(&ec, &sp, 1, &deadline) == -1); + assert(ck_ec_has_waiters(&ec)); + } + + return; +} + +#ifdef CK_F_EC64 +static void test_wait_64(void) +{ + struct timespec deadline = { .tv_sec = 0 }; + struct ck_ec64 ec; + + ck_ec_init(&ec, 0); + assert(ck_ec_value(&ec) == 0); + assert(ck_ec_wait(&ec, &sp, 1, NULL) == 0); + assert(ck_ec_wait(&ec, &sp, 0, &deadline) == -1); + + { + const struct timespec timeout = { .tv_nsec = 1 }; + + assert(ck_ec_deadline(&deadline, &sp, &timeout) == 0); + assert(ck_ec_wait(&ec, &sp, 0, &deadline) == -1); + assert(ck_ec_has_waiters(&ec)); + } + + return; +} +#endif + +static int pred(const struct ck_ec_wait_state *state, + struct timespec *deadline) +{ + double initial_ts = state->start.tv_sec + + 1e-9 * state->start.tv_nsec; + int *count = state->data; + + printf("pred wait: %f\n", + deadline->tv_sec + 1e-9 * deadline->tv_nsec - initial_ts); + + if ((*count)++ < 3) { + return 0; + } + + return (*count)++; +} + +/* + * Check that pred's return value is correctly bubbled up, + * and that the event count is marked as having waiters. + */ +static void test_wait_pred_32(void) +{ + struct ck_ec32 ec = CK_EC_INITIALIZER; + int count = 0; + + assert(!ck_ec_has_waiters(&ec)); + assert(ck_ec_wait_pred(&ec, &sp, 0, pred, &count, NULL) == 4); + assert(ck_ec_has_waiters(&ec)); + assert(count == 5); + return; +} + +#ifdef CK_F_EC64 +static int pred2(const struct ck_ec_wait_state *state, + struct timespec *deadline) +{ + double initial_ts = state->start.tv_sec + + 1e-9 * state->start.tv_nsec; + int *count = state->data; + + printf("pred2 wait: %f\n", + deadline->tv_sec + 1e-9 * deadline->tv_nsec - initial_ts); + + *deadline = state->now; + deadline->tv_sec++; + + (*count)++; + return 0; +} + +/* + * wait_pred_64 is nearly identical to _32. Now check that deadline + * overriding works. + */ +static void test_wait_pred_64(void) +{ + const struct timespec timeout = { .tv_sec = 5 }; + struct timespec deadline; + struct ck_ec64 ec = CK_EC_INITIALIZER; + int count = 0; + + assert(!ck_ec_has_waiters(&ec)); + assert(ck_ec_deadline(&deadline, &sp, &timeout) == 0); + assert(ck_ec_wait_pred(&ec, &sp, 0, pred2, &count, &deadline) == -1); + assert(ck_ec_has_waiters(&ec)); + assert(count == 5); + return; +} +#endif + +static int woken = 0; + +static void *test_threaded_32_waiter(void *data) +{ + struct ck_ec32 *ec = data; + + ck_ec_wait(ec, &sp, 0, NULL); + ck_pr_store_int(&woken, 1); + return NULL; +} + +static void test_threaded_inc_32(const struct ck_ec_mode *mode) +{ + struct ck_ec32 ec = CK_EC_INITIALIZER; + pthread_t waiter; + + ck_pr_store_int(&woken, 0); + + pthread_create(&waiter, NULL, test_threaded_32_waiter, &ec); + usleep(10000); + + assert(ck_pr_load_int(&woken) == 0); + ck_ec_inc(&ec, mode); + + pthread_join(waiter, NULL); + assert(ck_pr_load_int(&woken) == 1); + return; +} + +static void test_threaded_add_32(const struct ck_ec_mode *mode) +{ + struct ck_ec32 ec = CK_EC_INITIALIZER; + pthread_t waiter; + + ck_pr_store_int(&woken, 0); + + pthread_create(&waiter, NULL, test_threaded_32_waiter, &ec); + usleep(10000); + + assert(ck_pr_load_int(&woken) == 0); + ck_ec_add(&ec, mode, 4); + + pthread_join(waiter, NULL); + assert(ck_pr_load_int(&woken) == 1); + return; +} + +#ifdef CK_F_EC64 +static void *test_threaded_64_waiter(void *data) +{ + struct ck_ec64 *ec = data; + + ck_ec_wait(ec, &sp, 0, NULL); + ck_pr_store_int(&woken, 1); + return NULL; +} + +static void test_threaded_inc_64(const struct ck_ec_mode *mode) +{ + struct ck_ec64 ec = CK_EC_INITIALIZER; + pthread_t waiter; + + ck_pr_store_int(&woken, 0); + + pthread_create(&waiter, NULL, test_threaded_64_waiter, &ec); + usleep(10000); + + assert(ck_pr_load_int(&woken) == 0); + ck_ec_inc(&ec, mode); + + pthread_join(waiter, NULL); + assert(ck_pr_load_int(&woken) == 1); + return; +} + +static void test_threaded_add_64(const struct ck_ec_mode *mode) +{ + struct ck_ec64 ec = CK_EC_INITIALIZER; + pthread_t waiter; + + ck_pr_store_int(&woken, 0); + + pthread_create(&waiter, NULL, test_threaded_64_waiter, &ec); + usleep(10000); + + assert(ck_pr_load_int(&woken) == 0); + ck_ec_add(&ec, mode, 4); + + pthread_join(waiter, NULL); + assert(ck_pr_load_int(&woken) == 1); + return; +} +#endif + +int main(int argc, char **argv) +{ + (void)argc; + (void)argv; + + if (test_ops.gettime == NULL || + test_ops.wake32 == NULL || + test_ops.wait32 == NULL) { + printf("No ck_ec ops for this platform. Trivial success.\n"); + return 0; + } + + test_update_counter_32(&sp); +#ifdef CK_F_EC64 + test_update_counter_64(&sp); +#endif + printf("test_update_counter SP passed.\n"); + + test_update_counter_32(&mp); +#ifdef CK_F_EC64 + test_update_counter_64(&mp); +#endif + printf("test_update_counter MP passed.\n"); + + test_deadline(); + printf("test_deadline passed.\n"); + + test_wait_32(); +#ifdef CK_F_EC64 + test_wait_64(); +#endif + printf("test_wait passed.\n"); + + test_wait_pred_32(); +#ifdef CK_F_EC64 + test_wait_pred_64(); +#endif + printf("test_wait_pred passed.\n"); + + test_threaded_inc_32(&sp); + test_threaded_add_32(&sp); +#ifdef CK_F_EC64 + test_threaded_inc_64(&sp); + test_threaded_add_64(&sp); +#endif + printf("test_threaded SP passed.\n"); + + test_threaded_inc_32(&mp); + test_threaded_add_32(&mp); +#ifdef CK_F_EC64 + test_threaded_inc_64(&mp); + test_threaded_add_64(&mp); +#endif + printf("test_threaded MP passed.\n"); + return 0; +} diff --git a/regressions/ck_ec/validate/fuzz_harness.h b/regressions/ck_ec/validate/fuzz_harness.h new file mode 100644 index 0000000..8ba6ebe --- /dev/null +++ b/regressions/ck_ec/validate/fuzz_harness.h @@ -0,0 +1,95 @@ +#ifndef FUZZ_HARNESS_H +#define FUZZ_HARNESS_H +#include +#include +#include +#include +#include + +#if defined(USE_LIBFUZZER) +#define TEST(function, examples) \ + void LLVMFuzzerInitialize(int *argcp, char ***argvp); \ + int LLVMFuzzerTestOneInput(const void *data, size_t n); \ + \ + void LLVMFuzzerInitialize(int *argcp, char ***argvp) \ + { \ + static char size[128]; \ + static char *argv[1024]; \ + int argc = *argcp; \ + \ + assert(argc < 1023); \ + \ + int r = snprintf(size, sizeof(size), \ + "-max_len=%zu", sizeof(examples[0])); \ + assert((size_t)r < sizeof(size)); \ + \ + memcpy(argv, *argvp, argc * sizeof(argv[0])); \ + argv[argc++] = size; \ + \ + *argcp = argc; \ + *argvp = argv; \ + \ + for (size_t i = 0; \ + i < sizeof(examples) / sizeof(examples[0]); \ + i++) { \ + assert(function(&examples[i]) == 0); \ + } \ + \ + return; \ + } \ + \ + int LLVMFuzzerTestOneInput(const void *data, size_t n) \ + { \ + char buf[sizeof(examples[0])]; \ + \ + memset(buf, 0, sizeof(buf)); \ + if (n < sizeof(buf)) { \ + memcpy(buf, data, n); \ + } else { \ + memcpy(buf, data, sizeof(buf)); \ + } \ + \ + assert(function((const void *)buf) == 0); \ + return 0; \ + } +#elif defined(USE_AFL) +#define TEST(function, examples) \ + int main(int argc, char **argv) \ + { \ + char buf[sizeof(examples[0])]; \ + \ + (void)argc; \ + (void)argv; \ + for (size_t i = 0; \ + i < sizeof(examples) / sizeof(examples[0]); \ + i++) { \ + assert(function(&examples[i]) == 0); \ + } \ + \ + \ + while (__AFL_LOOP(10000)) { \ + memset(buf, 0, sizeof(buf)); \ + read(0, buf, sizeof(buf)); \ + \ + assert(function((const void *)buf) == 0); \ + } \ + \ + return 0; \ + } +#else +#define TEST(function, examples) \ + int main(int argc, char **argv) \ + { \ + (void)argc; \ + (void)argv; \ + \ + for (size_t i = 0; \ + i < sizeof(examples) / sizeof(examples[0]); \ + i++) { \ + assert(function(&examples[i]) == 0); \ + } \ + \ + return 0; \ + } +#endif +#endif /* !FUZZ_HARNESS_H */ diff --git a/regressions/ck_ec/validate/prop_test_slow_wakeup.c b/regressions/ck_ec/validate/prop_test_slow_wakeup.c new file mode 100644 index 0000000..d172676 --- /dev/null +++ b/regressions/ck_ec/validate/prop_test_slow_wakeup.c @@ -0,0 +1,110 @@ +#include +#include + +#include "fuzz_harness.h" + +static int gettime(const struct ck_ec_ops *, struct timespec *out); +static void wake32(const struct ck_ec_ops *, const uint32_t *); +static void wait32(const struct ck_ec_wait_state *, const uint32_t *, + uint32_t, const struct timespec *); +static void wake64(const struct ck_ec_ops *, const uint64_t *); +static void wait64(const struct ck_ec_wait_state *, const uint64_t *, + uint64_t, const struct timespec *); + +static const struct ck_ec_ops test_ops = { + .gettime = gettime, + .wait32 = wait32, + .wait64 = wait64, + .wake32 = wake32, + .wake64 = wake64 +}; + +static int gettime(const struct ck_ec_ops *ops, struct timespec *out) +{ + (void)out; + + assert(ops == &test_ops); + return -1; +} + +static void wait32(const struct ck_ec_wait_state *wait_state, + const uint32_t *addr, uint32_t expected, + const struct timespec *deadline) +{ + (void)addr; + (void)expected; + (void)deadline; + + assert(wait_state->ops == &test_ops); + return; +} + +static void wait64(const struct ck_ec_wait_state *wait_state, + const uint64_t *addr, uint64_t expected, + const struct timespec *deadline) +{ + (void)addr; + (void)expected; + (void)deadline; + + assert(wait_state->ops == &test_ops); + return; +} + +static void wake32(const struct ck_ec_ops *ops, const uint32_t *addr) +{ + (void)addr; + + assert(ops == &test_ops); + return; +} + +static void wake64(const struct ck_ec_ops *ops, const uint64_t *addr) +{ + (void)addr; + + assert(ops == &test_ops); + return; +} + +/* + * Check that calling ck_ec{32,64}_wake always clears the waiting bit. + */ + +struct example { + uint64_t value; +}; + +const struct example examples[] = { + { 0 }, + { 1 }, + { 1UL << 30 }, + { 1UL << 31 }, + { INT32_MAX }, + { INT64_MAX }, + { 1ULL << 62 }, + { 1ULL << 63 }, +}; + +static inline int test_slow_wakeup(const struct example *example) +{ + { + struct ck_ec32 ec = { .counter = example->value }; + + ck_ec32_wake(&ec, &test_ops); + assert(!ck_ec32_has_waiters(&ec)); + } + +#ifdef CK_F_EC64 + { + struct ck_ec64 ec = { .counter = example->value }; + + ck_ec64_wake(&ec, &test_ops); + assert(!ck_ec64_has_waiters(&ec)); + } +#endif /* CK_F_EC64 */ + + return 0; +} + +TEST(test_slow_wakeup, examples) diff --git a/regressions/ck_ec/validate/prop_test_timeutil_add.c b/regressions/ck_ec/validate/prop_test_timeutil_add.c new file mode 100644 index 0000000..bd44607 --- /dev/null +++ b/regressions/ck_ec/validate/prop_test_timeutil_add.c @@ -0,0 +1,101 @@ +#include +#include +#include + +#include "../../../src/ck_ec_timeutil.h" +#include "fuzz_harness.h" + +#if ULONG_MAX > 4294967295 +typedef unsigned __int128 dword_t; +#else +typedef uint64_t dword_t; +#endif + +struct example { + struct timespec ts; + struct timespec inc; +}; + +static const struct example examples[] = { + { + { + 42, + 100 + }, + { + 1, + 2 + } + }, + { + { + 42, + 100 + }, + { + 1, + NSEC_MAX + } + }, + { + { + 42, + NSEC_MAX + }, + { + 0, + NSEC_MAX + } + }, + { + { + TIME_MAX - 1, + 1000 + }, + { + 2, + NSEC_MAX + } + } +}; + +static struct timespec normalize_ts(const struct timespec ts) +{ + struct timespec ret = ts; + + if (ret.tv_sec < 0) { + ret.tv_sec = ~ret.tv_sec; + } + + if (ret.tv_nsec < 0) { + ret.tv_nsec = ~ret.tv_nsec; + } + + ret.tv_nsec %= NSEC_MAX + 1; + return ret; +} + +static dword_t ts_to_nanos(const struct timespec ts) +{ + return (dword_t)ts.tv_sec * (NSEC_MAX + 1) + ts.tv_nsec; +} + +static inline int test_timespec_add(const struct example *example) +{ + const struct timespec ts = normalize_ts(example->ts); + const struct timespec inc = normalize_ts(example->inc); + const struct timespec actual = timespec_add(ts, inc); + const dword_t nanos = ts_to_nanos(ts) + ts_to_nanos(inc); + + if (nanos / (NSEC_MAX + 1) > TIME_MAX) { + assert(actual.tv_sec == TIME_MAX); + assert(actual.tv_nsec == NSEC_MAX); + } else { + assert(actual.tv_sec == (time_t)(nanos / (NSEC_MAX + 1))); + assert(actual.tv_nsec == (long)(nanos % (NSEC_MAX + 1))); + } + + return 0; +} + +TEST(test_timespec_add, examples) diff --git a/regressions/ck_ec/validate/prop_test_timeutil_add_ns.c b/regressions/ck_ec/validate/prop_test_timeutil_add_ns.c new file mode 100644 index 0000000..b62e1c7 --- /dev/null +++ b/regressions/ck_ec/validate/prop_test_timeutil_add_ns.c @@ -0,0 +1,88 @@ +#include + +#include "../../../src/ck_ec_timeutil.h" +#include "fuzz_harness.h" + +#if ULONG_MAX > 4294967295 +typedef unsigned __int128 dword_t; +#else +typedef uint64_t dword_t; +#endif + +struct example { + struct timespec ts; + uint32_t ns; +}; + +static const struct example examples[] = { + { + { + 42, + 100 + }, + 1 + }, + { + { + 42, + 100 + }, + 2 * NSEC_MAX + }, + { + { + 42, + NSEC_MAX + }, + NSEC_MAX + }, + { + { + TIME_MAX - 1, + 1000 + }, + 2 * NSEC_MAX + } +}; + +static inline int test_timespec_add_ns(const struct example *example) +{ + struct timespec ts = { + .tv_sec = example->ts.tv_sec, + .tv_nsec = example->ts.tv_nsec + }; + const uint32_t ns = example->ns; + + if (ts.tv_sec < 0) { + ts.tv_sec = ~ts.tv_sec; + } + + if (ts.tv_nsec < 0) { + ts.tv_nsec = ~ts.tv_nsec; + } + + ts.tv_nsec %= NSEC_MAX + 1; + + const struct timespec actual = timespec_add_ns(ts, ns); + + dword_t nanos = + (dword_t)ts.tv_sec * (NSEC_MAX + 1) + ts.tv_nsec; + + if (ns > NSEC_MAX) { + nanos += NSEC_MAX + 1; + } else { + nanos += ns; + } + + if (nanos / (NSEC_MAX + 1) > TIME_MAX) { + assert(actual.tv_sec == TIME_MAX); + assert(actual.tv_nsec == NSEC_MAX); + } else { + assert(actual.tv_sec == (time_t)(nanos / (NSEC_MAX + 1))); + assert(actual.tv_nsec == (long)(nanos % (NSEC_MAX + 1))); + } + + return 0; +} + +TEST(test_timespec_add_ns, examples) diff --git a/regressions/ck_ec/validate/prop_test_timeutil_cmp.c b/regressions/ck_ec/validate/prop_test_timeutil_cmp.c new file mode 100644 index 0000000..00e7b2e --- /dev/null +++ b/regressions/ck_ec/validate/prop_test_timeutil_cmp.c @@ -0,0 +1,99 @@ +#include + +#include "../../../src/ck_ec_timeutil.h" +#include "fuzz_harness.h" + +#if ULONG_MAX > 4294967295 +typedef __int128 dsword_t; +#else +typedef int64_t dsword_t; +#endif + +struct example { + struct timespec x; + struct timespec y; +}; + +static const struct example examples[] = { + { + { + 42, + 100 + }, + { + 1, + 2 + } + }, + { + { + 42, + 100 + }, + { + 1, + NSEC_MAX + } + }, + { + { + 42, + NSEC_MAX + }, + { + 0, + NSEC_MAX + } + }, + { + { + TIME_MAX - 1, + 1000 + }, + { + 2, + NSEC_MAX + } + } +}; + +static struct timespec normalize_ts(const struct timespec ts) +{ + struct timespec ret = ts; + + if (ret.tv_nsec < 0) { + ret.tv_nsec = ~ret.tv_nsec; + } + + ret.tv_nsec %= NSEC_MAX + 1; + return ret; +} + +static dsword_t ts_to_nanos(const struct timespec ts) +{ + return (dsword_t)ts.tv_sec * (NSEC_MAX + 1) + ts.tv_nsec; +} + +static inline int test_timespec_cmp(const struct example *example) +{ + const struct timespec x = normalize_ts(example->y); + const struct timespec y = normalize_ts(example->x); + const dsword_t x_nanos = ts_to_nanos(x); + const dsword_t y_nanos = ts_to_nanos(y); + + assert(timespec_cmp(x, x) == 0); + assert(timespec_cmp(y, y) == 0); + assert(timespec_cmp(x, y) == -timespec_cmp(y, x)); + + if (x_nanos == y_nanos) { + assert(timespec_cmp(x, y) == 0); + } else if (x_nanos < y_nanos) { + assert(timespec_cmp(x, y) == -1); + } else { + assert(timespec_cmp(x, y) == 1); + } + + return 0; +} + +TEST(test_timespec_cmp, examples) diff --git a/regressions/ck_ec/validate/prop_test_timeutil_scale.c b/regressions/ck_ec/validate/prop_test_timeutil_scale.c new file mode 100644 index 0000000..eb3040f --- /dev/null +++ b/regressions/ck_ec/validate/prop_test_timeutil_scale.c @@ -0,0 +1,41 @@ +#include + +#include "../../../src/ck_ec_timeutil.h" +#include "fuzz_harness.h" + +struct example { + uint32_t nsec; + uint32_t multiplier; + unsigned int shift; +}; + +static const struct example examples[] = { + { + UINT32_MAX, + UINT32_MAX, + 1 + }, + { + 10, + 20, + 0 + } +}; + +static inline int test_wait_time_scale(const struct example *example) +{ + const uint32_t nsec = example->nsec; + const uint32_t multiplier = example->multiplier; + const unsigned int shift = example->shift % 32; + uint32_t actual = wait_time_scale(nsec, multiplier, shift); + uint64_t expected = ((uint64_t)nsec * multiplier) >> shift; + + if (expected > UINT32_MAX) { + expected = UINT32_MAX; + } + + assert(actual == expected); + return 0; +} + +TEST(test_wait_time_scale, examples) diff --git a/regressions/ck_ec/validate/prop_test_value.c b/regressions/ck_ec/validate/prop_test_value.c new file mode 100644 index 0000000..8f9eab8 --- /dev/null +++ b/regressions/ck_ec/validate/prop_test_value.c @@ -0,0 +1,150 @@ +#include +#include + +#include "fuzz_harness.h" + +static int gettime(const struct ck_ec_ops *, struct timespec *out); +static void wake32(const struct ck_ec_ops *, const uint32_t *); +static void wait32(const struct ck_ec_wait_state *, const uint32_t *, + uint32_t, const struct timespec *); +static void wake64(const struct ck_ec_ops *, const uint64_t *); +static void wait64(const struct ck_ec_wait_state *, const uint64_t *, + uint64_t, const struct timespec *); + +static const struct ck_ec_ops test_ops = { + .gettime = gettime, + .wait32 = wait32, + .wait64 = wait64, + .wake32 = wake32, + .wake64 = wake64 +}; + +static const struct ck_ec_mode modes[] = { + { + .single_producer = true, + .ops = &test_ops + }, + { + .single_producer = false, + .ops = &test_ops + }, +}; + +static int gettime(const struct ck_ec_ops *ops, struct timespec *out) +{ + (void)out; + + assert(ops == &test_ops); + return -1; +} + +static void wait32(const struct ck_ec_wait_state *wait_state, + const uint32_t *addr, uint32_t expected, + const struct timespec *deadline) +{ + (void)addr; + (void)expected; + (void)deadline; + + assert(wait_state->ops == &test_ops); + return; +} + +static void wait64(const struct ck_ec_wait_state *wait_state, + const uint64_t *addr, uint64_t expected, + const struct timespec *deadline) +{ + (void)addr; + (void)expected; + (void)deadline; + + assert(wait_state->ops == &test_ops); + return; +} + +static void wake32(const struct ck_ec_ops *ops, const uint32_t *addr) +{ + (void)addr; + + assert(ops == &test_ops); + return; +} + +static void wake64(const struct ck_ec_ops *ops, const uint64_t *addr) +{ + (void)addr; + + assert(ops == &test_ops); + return; +} + +/* + * Check that adding a value correctly updates the counter, and that + * incrementing after that also works. + */ +struct example { + uint64_t value[2]; +}; + +static const struct example examples[] = { + { { 0, 0 } }, + { { 1, 2 } }, + { { 0, INT32_MAX - 2 } }, + { { 0, INT32_MAX - 1 } }, + { { 0, INT32_MAX } }, + { { 0, INT64_MAX - 2 } }, + { { 0, INT64_MAX - 1 } }, + { { 0, INT64_MAX } }, +}; + +static inline int test_value(const struct example *example) +{ + for (size_t i = 0; i < 2; i++) { + const struct ck_ec_mode *mode = &modes[i]; + const uint32_t value0 = example->value[0] & INT32_MAX; + const uint32_t value1 = example->value[1] & INT32_MAX; + struct ck_ec32 ec; + + ck_ec32_init(&ec, 0); + assert(ck_ec32_value(&ec) == 0); + + ck_ec32_add(&ec, mode, value0); + assert(ck_ec32_value(&ec) == value0); + + ck_ec32_add(&ec, mode, value1); + assert(ck_ec32_value(&ec) == + ((value0 + value1) & INT32_MAX)); + + + ck_ec32_inc(&ec, mode); + assert(ck_ec32_value(&ec) == + ((value0 + value1 + 1) & INT32_MAX)); + } + +#ifdef CK_F_EC64 + for (size_t i = 0; i < 2; i++) { + const struct ck_ec_mode *mode = &modes[i]; + const uint64_t value0 = example->value[0] & INT64_MAX; + const uint64_t value1 = example->value[1] & INT64_MAX; + struct ck_ec64 ec; + + ck_ec64_init(&ec, 0); + assert(ck_ec64_value(&ec) == 0); + + ck_ec64_add(&ec, mode, value0); + assert(ck_ec64_value(&ec) == value0); + + ck_ec64_add(&ec, mode, value1); + assert(ck_ec64_value(&ec) == + ((value0 + value1) & INT64_MAX)); + + ck_ec64_inc(&ec, mode); + assert(ck_ec64_value(&ec) == + ((value0 + value1 + 1) & INT64_MAX)); + } +#endif /* CK_F_EC64 */ + + return 0; +} + +TEST(test_value, examples) diff --git a/regressions/ck_ec/validate/prop_test_wakeup.c b/regressions/ck_ec/validate/prop_test_wakeup.c new file mode 100644 index 0000000..a858e2b --- /dev/null +++ b/regressions/ck_ec/validate/prop_test_wakeup.c @@ -0,0 +1,193 @@ +#include +#include +#include + +#include "fuzz_harness.h" + +static int gettime(const struct ck_ec_ops *, struct timespec *out); +static void wake32(const struct ck_ec_ops *, const uint32_t *); +static void wait32(const struct ck_ec_wait_state *, const uint32_t *, + uint32_t, const struct timespec *); +static void wake64(const struct ck_ec_ops *, const uint64_t *); +static void wait64(const struct ck_ec_wait_state *, const uint64_t *, + uint64_t, const struct timespec *); + +static const struct ck_ec_ops test_ops = { + .gettime = gettime, + .wait32 = wait32, + .wait64 = wait64, + .wake32 = wake32, + .wake64 = wake64 +}; + +static const struct ck_ec_mode modes[] = { + { + .single_producer = true, + .ops = &test_ops + }, + { + .single_producer = false, + .ops = &test_ops + }, +}; + +static bool woken = false; + +static int gettime(const struct ck_ec_ops *ops, struct timespec *out) +{ + (void)out; + + assert(ops == &test_ops); + return -1; +} + +static void wait32(const struct ck_ec_wait_state *state, const uint32_t *addr, + uint32_t expected, const struct timespec *deadline) +{ + (void)addr; + (void)expected; + (void)deadline; + + assert(state->ops == &test_ops); + return; +} + +static void wait64(const struct ck_ec_wait_state *state, const uint64_t *addr, + uint64_t expected, const struct timespec *deadline) +{ + (void)addr; + (void)expected; + (void)deadline; + + assert(state->ops == &test_ops); + return; +} + +static void wake32(const struct ck_ec_ops *ops, const uint32_t *addr) +{ + (void)addr; + + assert(ops == &test_ops); + woken = true; + return; +} + +static void wake64(const struct ck_ec_ops *ops, const uint64_t *addr) +{ + (void)addr; + + assert(ops == &test_ops); + woken = true; + return; +} + +/* + * Check that adding a value calls the wake function when the sign bit + * is set, and does not call it when the sign bit is unset (modulo + * wrap-around). + */ +struct example { + uint64_t initial; + uint64_t increment; +}; + +const struct example examples[] = { + { INT32_MAX, 0 }, + { INT32_MAX, 1 }, + { 0 + (0U << 31), 0 }, + { 1 + (0U << 31), 0 }, + { 0 + (1U << 31), 0 }, + { 1 + (1U << 31), 0 }, + + { 0 + (0U << 31), 1 }, + { 1 + (0U << 31), 1 }, + { 0 + (1U << 31), 1 }, + { 1 + (1U << 31), 1 }, + + { 0 + (0U << 31), INT32_MAX }, + { 1 + (0U << 31), INT32_MAX }, + { 0 + (1U << 31), INT32_MAX }, + { 1 + (1U << 31), INT32_MAX }, + + { INT64_MAX, 0 }, + { INT64_MAX, 1 }, + { 0 + (0ULL << 63), 0 }, + { 1 + (0ULL << 63), 0 }, + { 0 + (1ULL << 63), 0 }, + { 1 + (1ULL << 63), 0 }, + + { 0 + (0ULL << 63), 1 }, + { 1 + (0ULL << 63), 1 }, + { 0 + (1ULL << 63), 1 }, + { 1 + (1ULL << 63), 1 }, + + { 0 + (0ULL << 63), INT64_MAX }, + { 1 + (0ULL << 63), INT64_MAX }, + { 0 + (1ULL << 63), INT64_MAX }, + { 1 + (1ULL << 63), INT64_MAX }, +}; + +static inline int test_wakeup(const struct example *example) +{ + for (size_t i = 0; i < 2; i++) { + const struct ck_ec_mode *mode = &modes[i]; + const uint32_t increment = example->increment & INT32_MAX; + struct ck_ec32 ec; + bool should_wake; + bool may_wake; + + ec.counter = example->initial; + should_wake = increment != 0 && (ec.counter & (1U << 31)); + may_wake = should_wake || (ec.counter & (1U << 31)); + + woken = false; + ck_ec32_add(&ec, mode, increment); + assert(!should_wake || woken); + assert(may_wake || !woken); + assert(!woken || ck_ec32_has_waiters(&ec) == false); + + /* Test inc now. */ + ec.counter = example->initial + increment; + should_wake = ec.counter & (1U << 31); + may_wake = should_wake || ((ec.counter + 1) & (1U << 31)); + + woken = false; + ck_ec32_inc(&ec, mode); + assert(!should_wake || woken); + assert(may_wake || !woken); + assert(!woken || ck_ec32_has_waiters(&ec) == false); + } + +#ifdef CK_F_EC64 + for (size_t i = 0; i < 2; i++) { + const struct ck_ec_mode *mode = &modes[i]; + const uint64_t increment = example->increment & INT64_MAX; + struct ck_ec64 ec; + bool should_wake; + bool may_wake; + + ec.counter = example->initial; + should_wake = increment != 0 && (ec.counter & 1); + may_wake = should_wake || (ec.counter & 1); + + woken = false; + ck_ec64_add(&ec, mode, increment); + assert(!should_wake || woken); + assert(may_wake || !woken); + assert(!woken || ck_ec64_has_waiters(&ec) == false); + + /* Test inc now. */ + ec.counter = example->initial + increment; + should_wake = ec.counter & 1; + + woken = false; + ck_ec64_inc(&ec, mode); + assert(should_wake == woken); + assert(!woken || ck_ec64_has_waiters(&ec) == false); + } +#endif /* CK_F_EC64 */ + + return 0; +} + +TEST(test_wakeup, examples) diff --git a/src/Makefile.in b/src/Makefile.in index 0d84e76..0b7ae7b 100644 --- a/src/Makefile.in +++ b/src/Makefile.in @@ -11,6 +11,7 @@ OBJECTS=ck_barrier_centralized.o \ ck_barrier_dissemination.o \ ck_barrier_tournament.o \ ck_barrier_mcs.o \ + ck_ec.o \ ck_epoch.o \ ck_ht.o \ ck_hp.o \ @@ -29,6 +30,9 @@ libck.a: $(OBJECTS) ck_array.o: $(INCLUDE_DIR)/ck_array.h $(SDIR)/ck_array.c $(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_array.o $(SDIR)/ck_array.c +ck_ec.o: $(INCLUDE_DIR)/ck_ec.h $(SDIR)/ck_ec.c $(SDIR)/ck_ec_timeutil.h + $(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_ec.o $(SDIR)/ck_ec.c + ck_epoch.o: $(INCLUDE_DIR)/ck_epoch.h $(SDIR)/ck_epoch.c $(INCLUDE_DIR)/ck_stack.h $(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_epoch.o $(SDIR)/ck_epoch.c diff --git a/src/ck_ec.c b/src/ck_ec.c new file mode 100644 index 0000000..937c3b3 --- /dev/null +++ b/src/ck_ec.c @@ -0,0 +1,414 @@ +#include +#include + +#include "ck_ec_timeutil.h" + +#define DEFAULT_BUSY_LOOP_ITER 100U + +/* + * The 2ms, 8x/iter default parameter hit 1.024 seconds after 3 + * iterations. + */ +#define DEFAULT_INITIAL_WAIT_NS 2000000L /* Start at 2 ms */ +/* Grow the wait time 8x/iteration. */ +#define DEFAULT_WAIT_SCALE_FACTOR 8 +#define DEFAULT_WAIT_SHIFT_COUNT 0 + +struct ck_ec32_slow_path_state { + struct ck_ec32 *ec; + uint32_t flagged_word; +}; + +#ifdef CK_F_EC64 +struct ck_ec64_slow_path_state { + struct ck_ec64 *ec; + uint64_t flagged_word; +}; +#endif + +/* Once we've waited for >= 1 sec, go for the full deadline. */ +static const struct timespec final_wait_time = { + .tv_sec = 1 +}; + +void ck_ec32_wake(struct ck_ec32 *ec, const struct ck_ec_ops *ops) +{ + /* Spurious wake-ups are OK. Clear the flag before futexing. */ + ck_pr_and_32(&ec->counter, (1U << 31) - 1); + ops->wake32(ops, &ec->counter); + return; +} + +int ck_ec32_wait_slow(struct ck_ec32 *ec, + const struct ck_ec_ops *ops, + uint32_t old_value, + const struct timespec *deadline) +{ + return ck_ec32_wait_pred_slow(ec, ops, old_value, + NULL, NULL, deadline); +} + +#ifdef CK_F_EC64 +void ck_ec64_wake(struct ck_ec64 *ec, const struct ck_ec_ops *ops) +{ + ck_pr_and_64(&ec->counter, ~1); + ops->wake64(ops, &ec->counter); + return; +} + +int ck_ec64_wait_slow(struct ck_ec64 *ec, + const struct ck_ec_ops *ops, + uint64_t old_value, + const struct timespec *deadline) +{ + return ck_ec64_wait_pred_slow(ec, ops, old_value, + NULL, NULL, deadline); +} +#endif + +int ck_ec_deadline_impl(struct timespec *new_deadline, + const struct ck_ec_ops *ops, + const struct timespec *timeout) +{ + struct timespec now; + int r; + + if (timeout == NULL) { + new_deadline->tv_sec = TIME_MAX; + new_deadline->tv_nsec = NSEC_MAX; + return 0; + } + + r = ops->gettime(ops, &now); + if (r != 0) { + return -1; + } + + *new_deadline = timespec_add(now, *timeout); + return 0; +} + +/* The rest of the file implements wait_pred_slow. */ + +/* + * Returns a timespec value for deadline_ptr. If deadline_ptr is NULL, + * returns a timespec far in the future. + */ +static struct timespec canonical_deadline(const struct timespec *deadline_ptr) +{ + if (deadline_ptr == NULL) { + return (struct timespec) { .tv_sec = TIME_MAX }; + } + + return *deadline_ptr; +} + +/* + * Really slow (sleeping) path for ck_ec_wait. Drives the exponential + * backoff scheme to sleep for longer and longer periods of time, + * until either the sleep function returns true (the eventcount's + * value has changed), or the predicate returns non-0 (something else + * has changed). + * + * If deadline is ever reached, returns -1 (timeout). + * + * TODO: add some form of randomisation to the intermediate timeout + * values. + */ +static int exponential_backoff( + struct ck_ec_wait_state *wait_state, + bool (*sleep)(const void *sleep_state, + const struct ck_ec_wait_state *wait_state, + const struct timespec *partial_deadline), + const void *sleep_state, + int (*pred)(const struct ck_ec_wait_state *state, + struct timespec *deadline), + const struct timespec *deadline) +{ + struct timespec begin; + struct timespec stop_backoff; + const struct ck_ec_ops *ops = wait_state->ops; + const uint32_t scale_factor = (ops->wait_scale_factor != 0) + ? ops->wait_scale_factor + : DEFAULT_WAIT_SCALE_FACTOR; + const uint32_t shift_count = (ops->wait_shift_count != 0) + ? ops->wait_shift_count + : DEFAULT_WAIT_SHIFT_COUNT; + uint32_t wait_ns = (ops->initial_wait_ns != 0) + ? ops->initial_wait_ns + : DEFAULT_INITIAL_WAIT_NS; + bool first = true; + + for (;;) { + struct timespec now; + struct timespec partial_deadline; + + if (check_deadline(&now, ops, *deadline) == true) { + /* Timeout. Bail out. */ + return -1; + } + + if (first) { + begin = now; + wait_state->start = begin; + stop_backoff = timespec_add(begin, final_wait_time); + first = false; + } + + wait_state->now = now; + if (timespec_cmp(now, stop_backoff) >= 0) { + partial_deadline = *deadline; + } else { + do { + partial_deadline = + timespec_add_ns(begin, wait_ns); + wait_ns = + wait_time_scale(wait_ns, + scale_factor, + shift_count); + } while (timespec_cmp(partial_deadline, now) <= 0); + } + + if (pred != NULL) { + int r = pred(wait_state, &partial_deadline); + if (r != 0) { + return r; + } + } + + /* Canonicalize deadlines in the far future to NULL. */ + if (sleep(sleep_state, wait_state, + ((partial_deadline.tv_sec == TIME_MAX) + ? NULL : &partial_deadline)) == true) { + return 0; + } + } +} + +/* + * Loops up to BUSY_LOOP_ITER times, or until ec's counter value + * (including the flag) differs from old_value. + * + * Returns the new value in ec. + */ +#define DEF_WAIT_EASY(W) \ + static uint##W##_t ck_ec##W##_wait_easy(struct ck_ec##W* ec, \ + const struct ck_ec_ops *ops, \ + uint##W##_t expected) \ + { \ + uint##W##_t current = ck_pr_load_##W(&ec->counter); \ + size_t n = (ops->busy_loop_iter != 0) \ + ? ops->busy_loop_iter \ + : DEFAULT_BUSY_LOOP_ITER; \ + \ + for (size_t i = 0; \ + i < n && current == expected; \ + i++) { \ + ck_pr_stall(); \ + current = ck_pr_load_##W(&ec->counter); \ + } \ + \ + return current; \ + } + +DEF_WAIT_EASY(32) +#ifdef CK_F_EC64 +DEF_WAIT_EASY(64) +#endif +#undef DEF_WAIT_EASY +/* + * Attempts to upgrade ec->counter from unflagged to flagged. + * + * Returns true if the event count has changed. Otherwise, ec's + * counter word is equal to flagged on return, or has been at some + * time before the return. + */ +#define DEF_UPGRADE(W) \ + static bool ck_ec##W##_upgrade(struct ck_ec##W* ec, \ + uint##W##_t current, \ + uint##W##_t unflagged, \ + uint##W##_t flagged) \ + { \ + uint##W##_t old_word; \ + \ + if (current == flagged) { \ + /* Nothing to do, no change. */ \ + return false; \ + } \ + \ + if (current != unflagged) { \ + /* We have a different counter value! */ \ + return true; \ + } \ + \ + /* \ + * Flag the counter value. The CAS only fails if the \ + * counter is already flagged, or has a new value. \ + */ \ + return (ck_pr_cas_##W##_value(&ec->counter, \ + unflagged, flagged, \ + &old_word) == false && \ + old_word != flagged); \ + } + +DEF_UPGRADE(32) +#ifdef CK_F_EC64 +DEF_UPGRADE(64) +#endif +#undef DEF_UPGRADE + +/* + * Blocks until partial_deadline on the ck_ec. Returns true if the + * eventcount's value has changed. If partial_deadline is NULL, wait + * forever. + */ +static bool ck_ec32_wait_slow_once(const void *vstate, + const struct ck_ec_wait_state *wait_state, + const struct timespec *partial_deadline) +{ + const struct ck_ec32_slow_path_state *state = vstate; + const struct ck_ec32 *ec = state->ec; + const uint32_t flagged_word = state->flagged_word; + + wait_state->ops->wait32(wait_state, &ec->counter, + flagged_word, partial_deadline); + return ck_pr_load_32(&ec->counter) != flagged_word; +} + +#ifdef CK_F_EC64 +static bool ck_ec64_wait_slow_once(const void *vstate, + const struct ck_ec_wait_state *wait_state, + const struct timespec *partial_deadline) +{ + const struct ck_ec64_slow_path_state *state = vstate; + const struct ck_ec64 *ec = state->ec; + const uint64_t flagged_word = state->flagged_word; + + /* futex_wait will only compare the low 32 bits. Perform a + * full comparison here to maximise the changes of catching an + * ABA in the low 32 bits. + */ + if (ck_pr_load_64(&ec->counter) != flagged_word) { + return true; + } + + wait_state->ops->wait64(wait_state, &ec->counter, + flagged_word, partial_deadline); + return ck_pr_load_64(&ec->counter) != flagged_word; +} +#endif + +/* + * The full wait logic is a lot of code (> 1KB). Encourage the + * compiler to lay this all out linearly with LIKELY annotations on + * every early exit. + */ +#define WAIT_SLOW_BODY(W, ec, ops, pred, data, deadline_ptr, \ + old_value, unflagged, flagged) \ + do { \ + struct ck_ec_wait_state wait_state = { \ + .ops = ops, \ + .data = data \ + }; \ + const struct ck_ec##W##_slow_path_state state = { \ + .ec = ec, \ + .flagged_word = flagged \ + }; \ + const struct timespec deadline = \ + canonical_deadline(deadline_ptr); \ + \ + /* Detect infinite past deadlines. */ \ + if (CK_CC_LIKELY(deadline.tv_sec <= 0)) { \ + return -1; \ + } \ + \ + for (;;) { \ + uint##W##_t current; \ + int r; \ + \ + current = ck_ec##W##_wait_easy(ec, ops, unflagged); \ + \ + /* \ + * We're about to wait harder (i.e., \ + * potentially with futex). Make sure the \ + * counter word is flagged. \ + */ \ + if (CK_CC_LIKELY( \ + ck_ec##W##_upgrade(ec, current, \ + unflagged, flagged) == true)) { \ + ck_pr_fence_acquire(); \ + return 0; \ + } \ + \ + /* \ + * By now, ec->counter == flagged_word (at \ + * some point in the past). Spin some more to \ + * heuristically let any in-flight SP inc/add \ + * to retire. This does not affect \ + * correctness, but practically eliminates \ + * lost wake-ups. \ + */ \ + current = ck_ec##W##_wait_easy(ec, ops, flagged); \ + if (CK_CC_LIKELY(current != flagged_word)) { \ + ck_pr_fence_acquire(); \ + return 0; \ + } \ + \ + r = exponential_backoff(&wait_state, \ + ck_ec##W##_wait_slow_once, \ + &state, \ + pred, &deadline); \ + if (r != 0) { \ + return r; \ + } \ + \ + if (ck_ec##W##_value(ec) != old_value) { \ + ck_pr_fence_acquire(); \ + return 0; \ + } \ + \ + /* Spurious wake-up. Redo the slow path. */ \ + } \ + } while (0) + +int ck_ec32_wait_pred_slow(struct ck_ec32 *ec, + const struct ck_ec_ops *ops, + uint32_t old_value, + int (*pred)(const struct ck_ec_wait_state *state, + struct timespec *deadline), + void *data, + const struct timespec *deadline_ptr) +{ + const uint32_t unflagged_word = old_value; + const uint32_t flagged_word = old_value | (1UL << 31); + + if (CK_CC_UNLIKELY(ck_ec32_value(ec) != old_value)) { + return 0; + } + + WAIT_SLOW_BODY(32, ec, ops, pred, data, deadline_ptr, + old_value, unflagged_word, flagged_word); +} + +#ifdef CK_F_EC64 +int ck_ec64_wait_pred_slow(struct ck_ec64 *ec, + const struct ck_ec_ops *ops, + uint64_t old_value, + int (*pred)(const struct ck_ec_wait_state *state, + struct timespec *deadline), + void *data, + const struct timespec *deadline_ptr) +{ + const uint64_t unflagged_word = old_value << 1; + const uint64_t flagged_word = unflagged_word | 1; + + if (CK_CC_UNLIKELY(ck_ec64_value(ec) != old_value)) { + return 0; + } + + WAIT_SLOW_BODY(64, ec, ops, pred, data, deadline_ptr, + old_value, unflagged_word, flagged_word); +} +#endif + +#undef WAIT_SLOW_BODY diff --git a/src/ck_ec_timeutil.h b/src/ck_ec_timeutil.h new file mode 100644 index 0000000..50cfb67 --- /dev/null +++ b/src/ck_ec_timeutil.h @@ -0,0 +1,150 @@ +#ifndef CK_EC_TIMEUTIL_H +#define CK_EC_TIMEUTIL_H +#include +#include +#include +#include +#include + +#define TIME_MAX ((time_t)((1ULL << ((sizeof(time_t) * CHAR_BIT) - 1)) - 1)) +#define NSEC_MAX ((1000L * 1000 * 1000) - 1) + +/* + * Approximates (nsec * multiplier) >> shift. Clamps to UINT32_MAX on + * overflow. + */ +CK_CC_UNUSED static uint32_t +wait_time_scale(uint32_t nsec, + uint32_t multiplier, + unsigned int shift) +{ + uint64_t temp = (uint64_t)nsec * multiplier; + uint64_t max = (uint64_t)UINT32_MAX << shift; + + if (temp >= max) { + return UINT32_MAX; + } + + return temp >> shift; +} + + +/* + * Returns ts + ns. ns is clamped to at most 1 second. Clamps the + * return value to TIME_MAX, NSEC_MAX on overflow. + * + */ +CK_CC_UNUSED static struct timespec timespec_add_ns(const struct timespec ts, + uint32_t ns) +{ + struct timespec ret = { + .tv_sec = TIME_MAX, + .tv_nsec = NSEC_MAX + }; + time_t sec; + uint32_t sum_ns; + + if (ns > (uint32_t)NSEC_MAX) { + if (ts.tv_sec >= TIME_MAX) { + return ret; + } + + ret.tv_sec = ts.tv_sec + 1; + ret.tv_nsec = ts.tv_nsec; + return ret; + } + + sec = ts.tv_sec; + sum_ns = ns + ts.tv_nsec; + if (sum_ns > NSEC_MAX) { + if (sec >= TIME_MAX) { + return ret; + } + + sec++; + sum_ns -= (NSEC_MAX + 1); + } + + ret.tv_sec = sec; + ret.tv_nsec = sum_ns; + return ret; +} + + +/* + * Returns ts + inc. If inc is negative, it is normalized to 0. + * Clamps the return value to TIME_MAX, NSEC_MAX on overflow. + */ +CK_CC_UNUSED static struct timespec timespec_add(const struct timespec ts, + const struct timespec inc) +{ + /* Initial return value is clamped to infinite future. */ + struct timespec ret = { + .tv_sec = TIME_MAX, + .tv_nsec = NSEC_MAX + }; + time_t sec; + unsigned long nsec; + + /* Non-positive delta is a no-op. Invalid nsec is another no-op. */ + if (inc.tv_sec < 0 || inc.tv_nsec < 0 || inc.tv_nsec > NSEC_MAX) { + return ts; + } + + /* Detect overflow early. */ + if (inc.tv_sec > TIME_MAX - ts.tv_sec) { + return ret; + } + + sec = ts.tv_sec + inc.tv_sec; + /* This sum can't overflow if the inputs are valid.*/ + nsec = (unsigned long)ts.tv_nsec + inc.tv_nsec; + + if (nsec > NSEC_MAX) { + if (sec >= TIME_MAX) { + return ret; + } + + sec++; + nsec -= (NSEC_MAX + 1); + } + + ret.tv_sec = sec; + ret.tv_nsec = nsec; + return ret; +} + +/* Compares two timespecs. Returns -1 if x < y, 0 if x == y, and 1 if x > y. */ +CK_CC_UNUSED static int timespec_cmp(const struct timespec x, + const struct timespec y) +{ + if (x.tv_sec != y.tv_sec) { + return (x.tv_sec < y.tv_sec) ? -1 : 1; + } + + if (x.tv_nsec != y.tv_nsec) { + return (x.tv_nsec < y.tv_nsec) ? -1 : 1; + } + + return 0; +} + +/* + * Overwrites now with the current CLOCK_MONOTONIC time, and returns + * true if the current time is greater than or equal to the deadline, + * or the clock is somehow broken. + */ +CK_CC_UNUSED static bool check_deadline(struct timespec *now, + const struct ck_ec_ops *ops, + const struct timespec deadline) +{ + int r; + + r = ops->gettime(ops, now); + if (r != 0) { + return true; + } + + return timespec_cmp(*now, deadline) >= 0; +} +#endif /* !CK_EC_TIMEUTIL_H */