whitespace: Strictly conform to C namespacing rules.

ck_pring
Samy Al Bahra 10 years ago
parent 3c2c91daa4
commit 554e2f0874

@ -25,8 +25,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_ARRAY_H #ifndef CK_ARRAY_H
#define _CK_ARRAY_H #define CK_ARRAY_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_malloc.h> #include <ck_malloc.h>
@ -97,4 +97,4 @@ ck_array_initialized(struct ck_array *array)
((*b) = (a)->active->values[_ck_i], 1); \ ((*b) = (a)->active->values[_ck_i], 1); \
_ck_i++) _ck_i++)
#endif /* _CK_ARRAY_H */ #endif /* CK_ARRAY_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_BACKOFF_H #ifndef CK_BACKOFF_H
#define _CK_BACKOFF_H #define CK_BACKOFF_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_pr.h> #include <ck_pr.h>
@ -54,4 +54,4 @@ ck_backoff_eb(unsigned int *c)
return; return;
} }
#endif /* _CK_BACKOFF_H */ #endif /* CK_BACKOFF_H */

@ -25,8 +25,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_BARRIER_H #ifndef CK_BARRIER_H
#define _CK_BARRIER_H #define CK_BARRIER_H
#include <ck_spinlock.h> #include <ck_spinlock.h>
@ -161,4 +161,4 @@ void ck_barrier_mcs_init(ck_barrier_mcs_t *, unsigned int);
void ck_barrier_mcs_subscribe(ck_barrier_mcs_t *, ck_barrier_mcs_state_t *); void ck_barrier_mcs_subscribe(ck_barrier_mcs_t *, ck_barrier_mcs_state_t *);
void ck_barrier_mcs(ck_barrier_mcs_t *, ck_barrier_mcs_state_t *); void ck_barrier_mcs(ck_barrier_mcs_t *, ck_barrier_mcs_state_t *);
#endif /* _CK_BARRIER_H */ #endif /* CK_BARRIER_H */

@ -26,8 +26,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_BITMAP_H #ifndef CK_BITMAP_H
#define _CK_BITMAP_H #define CK_BITMAP_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_limits.h> #include <ck_limits.h>
@ -510,4 +510,4 @@ non_zero:
return true; return true;
} }
#endif /* _CK_BITMAP_H */ #endif /* CK_BITMAP_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_BRLOCK_H #ifndef CK_BRLOCK_H
#define _CK_BRLOCK_H #define CK_BRLOCK_H
/* /*
* Big reader spinlocks provide cache-local contention-free read * Big reader spinlocks provide cache-local contention-free read
@ -276,4 +276,4 @@ ck_brlock_read_unlock(struct ck_brlock_reader *reader)
return; return;
} }
#endif /* _CK_BRLOCK_H */ #endif /* CK_BRLOCK_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_BYTELOCK_H #ifndef CK_BYTELOCK_H
#define _CK_BYTELOCK_H #define CK_BYTELOCK_H
/* /*
* The implementations here are derived from the work described in: * The implementations here are derived from the work described in:
@ -183,4 +183,4 @@ ck_bytelock_read_unlock(struct ck_bytelock *bytelock, unsigned int slot)
return; return;
} }
#endif /* _CK_BYTELOCK_H */ #endif /* CK_BYTELOCK_H */

@ -25,8 +25,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_CC_H #ifndef CK_CC_H
#define _CK_CC_H #define CK_CC_H
#if defined(__GNUC__) || defined(__SUNPRO_C) #if defined(__GNUC__) || defined(__SUNPRO_C)
#include "gcc/ck_cc.h" #include "gcc/ck_cc.h"
@ -152,4 +152,4 @@ ck_cc_popcount(unsigned int x)
} }
#endif #endif
#endif /* _CK_CC_H */ #endif /* CK_CC_H */

@ -25,8 +25,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_COHORT_H #ifndef CK_COHORT_H
#define _CK_COHORT_H #define CK_COHORT_H
/* /*
* This is an implementation of lock cohorts as described in: * This is an implementation of lock cohorts as described in:
@ -158,4 +158,4 @@ enum ck_cohort_state {
.local_pass_limit = CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT \ .local_pass_limit = CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT \
} }
#endif /* _CK_COHORT_H */ #endif /* CK_COHORT_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_ELIDE_H #ifndef CK_ELIDE_H
#define _CK_ELIDE_H #define CK_ELIDE_H
/* /*
* As RTM is currently only supported on TSO x86 architectures, * As RTM is currently only supported on TSO x86 architectures,
@ -88,7 +88,7 @@ enum _ck_elide_hint {
CK_ELIDE_HINT_STOP CK_ELIDE_HINT_STOP
}; };
#define _CK_ELIDE_LOCK_BUSY 0xFF #define CK_ELIDE_LOCK_BUSY 0xFF
static enum _ck_elide_hint static enum _ck_elide_hint
_ck_elide_fallback(int *retry, _ck_elide_fallback(int *retry,
@ -105,7 +105,7 @@ _ck_elide_fallback(int *retry,
return CK_ELIDE_HINT_STOP; return CK_ELIDE_HINT_STOP;
if (status & CK_PR_RTM_EXPLICIT) { if (status & CK_PR_RTM_EXPLICIT) {
if (CK_PR_RTM_CODE(status) == _CK_ELIDE_LOCK_BUSY) { if (CK_PR_RTM_CODE(status) == CK_ELIDE_LOCK_BUSY) {
st->skip = c->skip_busy; st->skip = c->skip_busy;
*retry = c->retry_busy; *retry = c->retry_busy;
return CK_ELIDE_HINT_SPIN; return CK_ELIDE_HINT_SPIN;
@ -159,7 +159,7 @@ _ck_elide_fallback(int *retry,
unsigned int status = ck_pr_rtm_begin(); \ unsigned int status = ck_pr_rtm_begin(); \
if (status == CK_PR_RTM_STARTED) { \ if (status == CK_PR_RTM_STARTED) { \
if (L_P(lock) == true) \ if (L_P(lock) == true) \
ck_pr_rtm_abort(_CK_ELIDE_LOCK_BUSY); \ ck_pr_rtm_abort(CK_ELIDE_LOCK_BUSY); \
\ \
return; \ return; \
} \ } \
@ -211,7 +211,7 @@ _ck_elide_fallback(int *retry,
} \ } \
\ \
if (L_P(lock) == true) \ if (L_P(lock) == true) \
ck_pr_rtm_abort(_CK_ELIDE_LOCK_BUSY); \ ck_pr_rtm_abort(CK_ELIDE_LOCK_BUSY); \
\ \
return; \ return; \
} \ } \
@ -237,7 +237,7 @@ _ck_elide_fallback(int *retry,
return false; \ return false; \
\ \
if (TL_P(lock) == true) \ if (TL_P(lock) == true) \
ck_pr_rtm_abort(_CK_ELIDE_LOCK_BUSY); \ ck_pr_rtm_abort(CK_ELIDE_LOCK_BUSY); \
\ \
return true; \ return true; \
} }
@ -318,4 +318,4 @@ _ck_elide_fallback(int *retry,
#define CK_ELIDE_UNLOCK_ADAPTIVE(NAME, STAT, LOCK) \ #define CK_ELIDE_UNLOCK_ADAPTIVE(NAME, STAT, LOCK) \
ck_elide_##NAME##_unlock_adaptive(STAT, LOCK) ck_elide_##NAME##_unlock_adaptive(STAT, LOCK)
#endif /* _CK_ELIDE_H */ #endif /* CK_ELIDE_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_EPOCH_H #ifndef CK_EPOCH_H
#define _CK_EPOCH_H #define CK_EPOCH_H
/* /*
* The implementation here is inspired from the work described in: * The implementation here is inspired from the work described in:
@ -159,4 +159,4 @@ void ck_epoch_synchronize(ck_epoch_t *, ck_epoch_record_t *);
void ck_epoch_barrier(ck_epoch_t *, ck_epoch_record_t *); void ck_epoch_barrier(ck_epoch_t *, ck_epoch_record_t *);
void ck_epoch_reclaim(ck_epoch_record_t *); void ck_epoch_reclaim(ck_epoch_record_t *);
#endif /* _CK_EPOCH_H */ #endif /* CK_EPOCH_H */

@ -25,8 +25,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_FIFO_H #ifndef CK_FIFO_H
#define _CK_FIFO_H #define CK_FIFO_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_md.h> #include <ck_md.h>
@ -475,4 +475,4 @@ ck_fifo_mpmc_trydequeue(struct ck_fifo_mpmc *fifo,
#endif /* CK_F_FIFO_MPMC */ #endif /* CK_F_FIFO_MPMC */
#endif /* CK_F_PR_CAS_PTR_2 */ #endif /* CK_F_PR_CAS_PTR_2 */
#endif /* _CK_FIFO_H */ #endif /* CK_FIFO_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_HP_H #ifndef CK_HP_H
#define _CK_HP_H #define CK_HP_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_pr.h> #include <ck_pr.h>
@ -103,4 +103,4 @@ void ck_hp_free(ck_hp_record_t *, ck_hp_hazard_t *, void *, void *);
void ck_hp_retire(ck_hp_record_t *, ck_hp_hazard_t *, void *, void *); void ck_hp_retire(ck_hp_record_t *, ck_hp_hazard_t *, void *, void *);
void ck_hp_purge(ck_hp_record_t *); void ck_hp_purge(ck_hp_record_t *);
#endif /* _CK_HP_H */ #endif /* CK_HP_H */

@ -25,8 +25,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_HP_FIFO_H #ifndef CK_HP_FIFO_H
#define _CK_HP_FIFO_H #define CK_HP_FIFO_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_hp.h> #include <ck_hp.h>
@ -218,4 +218,4 @@ ck_hp_fifo_trydequeue_mpmc(ck_hp_record_t *record,
(entry) != NULL && ((T) = (entry)->next, 1); \ (entry) != NULL && ((T) = (entry)->next, 1); \
(entry) = (T)) (entry) = (T))
#endif /* _CK_HP_FIFO_H */ #endif /* CK_HP_FIFO_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_HP_STACK_H #ifndef CK_HP_STACK_H
#define _CK_HP_STACK_H #define CK_HP_STACK_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_hp.h> #include <ck_hp.h>
@ -110,4 +110,4 @@ leave:
return false; return false;
} }
#endif /* _CK_HP_STACK_H */ #endif /* CK_HP_STACK_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_HS_H #ifndef CK_HS_H
#define _CK_HS_H #define CK_HS_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_malloc.h> #include <ck_malloc.h>
@ -131,4 +131,4 @@ bool ck_hs_reset(ck_hs_t *);
bool ck_hs_reset_size(ck_hs_t *, unsigned long); bool ck_hs_reset_size(ck_hs_t *, unsigned long);
void ck_hs_stat(ck_hs_t *, struct ck_hs_stat *); void ck_hs_stat(ck_hs_t *, struct ck_hs_stat *);
#endif /* _CK_HS_H */ #endif /* CK_HS_H */

@ -24,12 +24,12 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_HT_H #ifndef CK_HT_H
#define _CK_HT_H #define CK_HT_H
#ifndef _CK_HT_IM #ifndef CK_HT_IM
#warning ck_ht is deprecated, see ck_hm, ck_hs or ck_rhs. #warning ck_ht is deprecated, see ck_hm, ck_hs or ck_rhs.
#endif /* !_CK_HT_IM */ #endif /* !CK_HT_IM */
#include <ck_pr.h> #include <ck_pr.h>
@ -262,4 +262,4 @@ bool ck_ht_reset_size_spmc(ck_ht_t *, uint64_t);
uint64_t ck_ht_count(ck_ht_t *); uint64_t ck_ht_count(ck_ht_t *);
#endif /* CK_F_PR_LOAD_64 && CK_F_PR_STORE_64 */ #endif /* CK_F_PR_LOAD_64 && CK_F_PR_STORE_64 */
#endif /* _CK_HT_H */ #endif /* CK_HT_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_MALLOC_H #ifndef CK_MALLOC_H
#define _CK_MALLOC_H #define CK_MALLOC_H
#include <stdbool.h> #include <stdbool.h>
#include <sys/types.h> #include <sys/types.h>
@ -36,4 +36,4 @@ struct ck_malloc {
void (*free)(void *, size_t, bool); void (*free)(void *, size_t, bool);
}; };
#endif /* _CK_MALLOC_H */ #endif /* CK_MALLOC_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_MD_H #ifndef CK_MD_H
#define _CK_MD_H #define CK_MD_H
#ifndef CK_MD_CACHELINE #ifndef CK_MD_CACHELINE
#define CK_MD_CACHELINE (64) #define CK_MD_CACHELINE (64)
@ -51,4 +51,4 @@
#define @MM@ #define @MM@
#endif /* @MM@ */ #endif /* @MM@ */
#endif /* _CK_MD_H */ #endif /* CK_MD_H */

@ -25,8 +25,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_PFLOCK_H #ifndef CK_PFLOCK_H
#define _CK_PFLOCK_H #define CK_PFLOCK_H
/* /*
* This is an implementation of phase-fair locks derived from the work * This is an implementation of phase-fair locks derived from the work
@ -139,4 +139,4 @@ leave:
return; return;
} }
#endif /* _CK_PFLOCK_H */ #endif /* CK_PFLOCK_H */

@ -25,8 +25,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_PR_H #ifndef CK_PR_H
#define _CK_PR_H #define CK_PR_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_limits.h> #include <ck_limits.h>
@ -1159,4 +1159,4 @@ CK_PR_FAS_S(8, uint8_t)
#undef CK_PR_FAA #undef CK_PR_FAA
#undef CK_PR_FAS #undef CK_PR_FAS
#endif /* _CK_PR_H */ #endif /* CK_PR_H */

@ -56,8 +56,8 @@
* $FreeBSD: release/9.0.0/sys/sys/queue.h 221843 2011-05-13 15:49:23Z mdf $ * $FreeBSD: release/9.0.0/sys/sys/queue.h 221843 2011-05-13 15:49:23Z mdf $
*/ */
#ifndef _CK_QUEUE_H_ #ifndef CK_QUEUE_H
#define _CK_QUEUE_H_ #define CK_QUEUE_H
#include <ck_pr.h> #include <ck_pr.h>
@ -414,4 +414,4 @@ struct { \
swap_tmp->field.le_prev = &(head2)->lh_first; \ swap_tmp->field.le_prev = &(head2)->lh_first; \
} while (0) } while (0)
#endif /* _CK_QUEUE_H */ #endif /* CK_QUEUE_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_RHS_H #ifndef CK_RHS_H
#define _CK_RHS_H #define CK_RHS_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_malloc.h> #include <ck_malloc.h>
@ -129,4 +129,4 @@ bool ck_rhs_reset(ck_rhs_t *);
bool ck_rhs_reset_size(ck_rhs_t *, unsigned long); bool ck_rhs_reset_size(ck_rhs_t *, unsigned long);
void ck_rhs_stat(ck_rhs_t *, struct ck_rhs_stat *); void ck_rhs_stat(ck_rhs_t *, struct ck_rhs_stat *);
#endif /* _CK_RHS_H */ #endif /* CK_RHS_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_RING_H #ifndef CK_RING_H
#define _CK_RING_H #define CK_RING_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_md.h> #include <ck_md.h>
@ -431,4 +431,4 @@ ck_ring_dequeue_spmc_##name(struct ck_ring *a, \
#define CK_RING_DEQUEUE_SPMC(name, a, b, c) \ #define CK_RING_DEQUEUE_SPMC(name, a, b, c) \
ck_ring_dequeue_spmc_##name(a, b, c) ck_ring_dequeue_spmc_##name(a, b, c)
#endif /* _CK_RING_H */ #endif /* CK_RING_H */

@ -25,8 +25,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_RWCOHORT_H #ifndef CK_RWCOHORT_H
#define _CK_RWCOHORT_H #define CK_RWCOHORT_H
/* /*
* This is an implementation of NUMA-aware reader-writer locks as described in: * This is an implementation of NUMA-aware reader-writer locks as described in:
@ -314,4 +314,4 @@
.read_counter = 0, \ .read_counter = 0, \
} }
#endif /* _CK_RWCOHORT_H */ #endif /* CK_RWCOHORT_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_RWLOCK_H #ifndef CK_RWLOCK_H
#define _CK_RWLOCK_H #define CK_RWLOCK_H
#include <ck_elide.h> #include <ck_elide.h>
#include <ck_pr.h> #include <ck_pr.h>
@ -294,4 +294,4 @@ ck_rwlock_recursive_read_unlock(ck_rwlock_recursive_t *rw)
return; return;
} }
#endif /* _CK_RWLOCK_H */ #endif /* CK_RWLOCK_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_SEQUENCE_H #ifndef CK_SEQUENCE_H
#define _CK_SEQUENCE_H #define CK_SEQUENCE_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_pr.h> #include <ck_pr.h>
@ -122,4 +122,4 @@ ck_sequence_write_end(struct ck_sequence *sq)
return; return;
} }
#endif /* _CK_SEQUENCE_H */ #endif /* CK_SEQUENCE_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_SPINLOCK_H #ifndef CK_SPINLOCK_H
#define _CK_SPINLOCK_H #define CK_SPINLOCK_H
#include "spinlock/anderson.h" #include "spinlock/anderson.h"
#include "spinlock/cas.h" #include "spinlock/cas.h"
@ -58,4 +58,4 @@ CK_ELIDE_PROTOTYPE(ck_spinlock, ck_spinlock_t,
CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock, ck_spinlock_t, CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock, ck_spinlock_t,
ck_spinlock_locked, ck_spinlock_trylock) ck_spinlock_locked, ck_spinlock_trylock)
#endif /* _CK_SPINLOCK_H */ #endif /* CK_SPINLOCK_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_STACK_H #ifndef CK_STACK_H
#define _CK_STACK_H #define CK_STACK_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_pr.h> #include <ck_pr.h>
@ -354,4 +354,4 @@ ck_stack_init(struct ck_stack *stack)
(entry) != NULL && ((T) = (entry)->next, 1); \ (entry) != NULL && ((T) = (entry)->next, 1); \
(entry) = (T)) (entry) = (T))
#endif /* _CK_STACK_H */ #endif /* CK_STACK_H */

@ -25,8 +25,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_SWLOCK_H #ifndef CK_SWLOCK_H
#define _CK_SWLOCK_H #define CK_SWLOCK_H
#include <ck_elide.h> #include <ck_elide.h>
#include <ck_limits.h> #include <ck_limits.h>
@ -214,4 +214,4 @@ CK_ELIDE_PROTOTYPE(ck_swlock_read, ck_swlock_t,
ck_swlock_locked_writer, ck_swlock_read_lock, ck_swlock_locked_writer, ck_swlock_read_lock,
ck_swlock_locked_reader, ck_swlock_read_unlock) ck_swlock_locked_reader, ck_swlock_read_unlock)
#endif /* _CK_SWLOCK_H */ #endif /* CK_SWLOCK_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_TFLOCK_TICKET_H #ifndef CK_TFLOCK_TICKET_H
#define _CK_TFLOCK_TICKET_H #define CK_TFLOCK_TICKET_H
/* /*
* This is an implementation of task-fair locks derived from the work * This is an implementation of task-fair locks derived from the work
@ -130,4 +130,4 @@ ck_tflock_ticket_read_unlock(struct ck_tflock_ticket *lock)
return; return;
} }
#endif /* _CK_TFLOCK_TICKET_H */ #endif /* CK_TFLOCK_TICKET_H */

@ -25,8 +25,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_GCC_CC_H #ifndef CK_GCC_CC_H
#define _CK_GCC_CC_H #define CK_GCC_CC_H
#include <ck_md.h> #include <ck_md.h>
@ -134,4 +134,4 @@ ck_cc_popcount(unsigned int x)
return __builtin_popcount(x); return __builtin_popcount(x);
} }
#endif /* _CK_GCC_CC_H */ #endif /* CK_GCC_CC_H */

@ -24,10 +24,10 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_PR_GCC_H #ifndef CK_PR_GCC_H
#define _CK_PR_GCC_H #define CK_PR_GCC_H
#ifndef _CK_PR_H #ifndef CK_PR_H
#error Do not include this file directly, use ck_pr.h #error Do not include this file directly, use ck_pr.h
#endif #endif
@ -277,5 +277,5 @@ CK_PR_UNARY_S(8, uint8_t)
#undef CK_PR_UNARY_S #undef CK_PR_UNARY_S
#undef CK_PR_UNARY #undef CK_PR_UNARY
#endif /* !CK_F_PR */ #endif /* !CK_F_PR */
#endif /* _CK_PR_GCC_H */ #endif /* CK_PR_GCC_H */

@ -25,10 +25,10 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_PR_PPC_H #ifndef CK_PR_PPC_H
#define _CK_PR_PPC_H #define CK_PR_PPC_H
#ifndef _CK_PR_H #ifndef CK_PR_H
#error Do not include this file directly, use ck_pr.h #error Do not include this file directly, use ck_pr.h
#endif #endif
@ -318,5 +318,5 @@ CK_PR_FAA(int, int, "w")
#undef CK_PR_FAA #undef CK_PR_FAA
#endif /* _CK_PR_PPC_H */ #endif /* CK_PR_PPC_H */

@ -24,10 +24,10 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_PR_PPC64_H #ifndef CK_PR_PPC64_H
#define _CK_PR_PPC64_H #define CK_PR_PPC64_H
#ifndef _CK_PR_H #ifndef CK_PR_H
#error Do not include this file directly, use ck_pr.h #error Do not include this file directly, use ck_pr.h
#endif #endif
@ -415,5 +415,5 @@ CK_PR_FAA(int, int, "w")
#undef CK_PR_FAA #undef CK_PR_FAA
#endif /* _CK_PR_PPC64_H */ #endif /* CK_PR_PPC64_H */

@ -24,10 +24,10 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_PR_SPARCV9_H #ifndef CK_PR_SPARCV9_H
#define _CK_PR_SPARCV9_H #define CK_PR_SPARCV9_H
#ifndef _CK_PR_H #ifndef CK_PR_H
#error Do not include this file directly, use ck_pr.h #error Do not include this file directly, use ck_pr.h
#endif #endif
@ -221,5 +221,5 @@ CK_PR_FAS(32, uint32_t)
#undef CK_PR_FAS #undef CK_PR_FAS
#endif /* _CK_PR_SPARCV9_H */ #endif /* CK_PR_SPARCV9_H */

@ -25,10 +25,10 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_PR_X86_H #ifndef CK_PR_X86_H
#define _CK_PR_X86_H #define CK_PR_X86_H
#ifndef _CK_PR_H #ifndef CK_PR_H
#error Do not include this file directly, use ck_pr.h #error Do not include this file directly, use ck_pr.h
#endif #endif
@ -384,5 +384,5 @@ CK_PR_GENERATE(btr)
#undef CK_PR_GENERATE #undef CK_PR_GENERATE
#undef CK_PR_BT #undef CK_PR_BT
#endif /* _CK_PR_X86_H */ #endif /* CK_PR_X86_H */

@ -24,10 +24,10 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_PR_X86_64_H #ifndef CK_PR_X86_64_H
#define _CK_PR_X86_64_H #define CK_PR_X86_64_H
#ifndef _CK_PR_H #ifndef CK_PR_H
#error Do not include this file directly, use ck_pr.h #error Do not include this file directly, use ck_pr.h
#endif #endif
@ -561,5 +561,5 @@ CK_PR_GENERATE(btr)
#undef CK_PR_GENERATE #undef CK_PR_GENERATE
#undef CK_PR_BT #undef CK_PR_BT
#endif /* _CK_PR_X86_64_H */ #endif /* CK_PR_X86_64_H */

@ -40,10 +40,10 @@
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/ */
#ifndef _CK_PR_X86_64_RTM_H #ifndef CK_PR_X86_64_RTM_H
#define _CK_PR_X86_64_RTM_H #define CK_PR_X86_64_RTM_H
#ifndef _CK_PR_X86_64_H #ifndef CK_PR_X86_64_H
#error Do not include this file directly, use ck_pr.h #error Do not include this file directly, use ck_pr.h
#endif #endif
@ -105,5 +105,5 @@ ck_pr_rtm_test(void)
return r; return r;
} }
#endif /* _CK_PR_X86_64_RTM_H */ #endif /* CK_PR_X86_64_RTM_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_SPINLOCK_ANDERSON_H #ifndef CK_SPINLOCK_ANDERSON_H
#define _CK_SPINLOCK_ANDERSON_H #define CK_SPINLOCK_ANDERSON_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_limits.h> #include <ck_limits.h>
@ -161,4 +161,4 @@ ck_spinlock_anderson_unlock(struct ck_spinlock_anderson *lock,
return; return;
} }
#endif /* CK_F_SPINLOCK_ANDERSON */ #endif /* CK_F_SPINLOCK_ANDERSON */
#endif /* _CK_SPINLOCK_ANDERSON_H */ #endif /* CK_SPINLOCK_ANDERSON_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_SPINLOCK_CAS_H #ifndef CK_SPINLOCK_CAS_H
#define _CK_SPINLOCK_CAS_H #define CK_SPINLOCK_CAS_H
#include <ck_backoff.h> #include <ck_backoff.h>
#include <ck_cc.h> #include <ck_cc.h>
@ -117,4 +117,4 @@ CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_cas, ck_spinlock_cas_t,
ck_spinlock_cas_locked, ck_spinlock_cas_trylock) ck_spinlock_cas_locked, ck_spinlock_cas_trylock)
#endif /* CK_F_SPINLOCK_CAS */ #endif /* CK_F_SPINLOCK_CAS */
#endif /* _CK_SPINLOCK_CAS_H */ #endif /* CK_SPINLOCK_CAS_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_SPINLOCK_CLH_H #ifndef CK_SPINLOCK_CLH_H
#define _CK_SPINLOCK_CLH_H #define CK_SPINLOCK_CLH_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_limits.h> #include <ck_limits.h>
@ -113,4 +113,4 @@ ck_spinlock_clh_unlock(struct ck_spinlock_clh **thread)
return; return;
} }
#endif /* CK_F_SPINLOCK_CLH */ #endif /* CK_F_SPINLOCK_CLH */
#endif /* _CK_SPINLOCK_CLH_H */ #endif /* CK_SPINLOCK_CLH_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_SPINLOCK_DEC_H #ifndef CK_SPINLOCK_DEC_H
#define _CK_SPINLOCK_DEC_H #define CK_SPINLOCK_DEC_H
#include <ck_backoff.h> #include <ck_backoff.h>
#include <ck_cc.h> #include <ck_cc.h>
@ -139,4 +139,4 @@ CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_dec, ck_spinlock_dec_t,
ck_spinlock_dec_locked, ck_spinlock_dec_trylock) ck_spinlock_dec_locked, ck_spinlock_dec_trylock)
#endif /* CK_F_SPINLOCK_DEC */ #endif /* CK_F_SPINLOCK_DEC */
#endif /* _CK_SPINLOCK_DEC_H */ #endif /* CK_SPINLOCK_DEC_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_SPINLOCK_FAS_H #ifndef CK_SPINLOCK_FAS_H
#define _CK_SPINLOCK_FAS_H #define CK_SPINLOCK_FAS_H
#include <ck_backoff.h> #include <ck_backoff.h>
#include <ck_cc.h> #include <ck_cc.h>
@ -114,4 +114,4 @@ CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_fas, ck_spinlock_fas_t,
ck_spinlock_fas_locked, ck_spinlock_fas_trylock) ck_spinlock_fas_locked, ck_spinlock_fas_trylock)
#endif /* CK_F_SPINLOCK_FAS */ #endif /* CK_F_SPINLOCK_FAS */
#endif /* _CK_SPINLOCK_FAS_H */ #endif /* CK_SPINLOCK_FAS_H */

@ -25,8 +25,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_SPINLOCK_HCLH_H #ifndef CK_SPINLOCK_HCLH_H
#define _CK_SPINLOCK_HCLH_H #define CK_SPINLOCK_HCLH_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_pr.h> #include <ck_pr.h>
@ -141,4 +141,4 @@ ck_spinlock_hclh_unlock(struct ck_spinlock_hclh **thread)
return; return;
} }
#endif /* CK_F_SPINLOCK_HCLH */ #endif /* CK_F_SPINLOCK_HCLH */
#endif /* _CK_SPINLOCK_HCLH_H */ #endif /* CK_SPINLOCK_HCLH_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_SPINLOCK_MCS_H #ifndef CK_SPINLOCK_MCS_H
#define _CK_SPINLOCK_MCS_H #define CK_SPINLOCK_MCS_H
#include <ck_cc.h> #include <ck_cc.h>
#include <ck_pr.h> #include <ck_pr.h>
@ -145,4 +145,4 @@ ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *n
return; return;
} }
#endif /* CK_F_SPINLOCK_MCS */ #endif /* CK_F_SPINLOCK_MCS */
#endif /* _CK_SPINLOCK_MCS_H */ #endif /* CK_SPINLOCK_MCS_H */

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_SPINLOCK_TICKET_H #ifndef CK_SPINLOCK_TICKET_H
#define _CK_SPINLOCK_TICKET_H #define CK_SPINLOCK_TICKET_H
#include <ck_backoff.h> #include <ck_backoff.h>
#include <ck_cc.h> #include <ck_cc.h>
@ -294,4 +294,4 @@ CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_ticket, ck_spinlock_ticket_t,
ck_spinlock_ticket_locked, ck_spinlock_ticket_trylock) ck_spinlock_ticket_locked, ck_spinlock_ticket_trylock)
#endif /* CK_F_SPINLOCK_TICKET */ #endif /* CK_F_SPINLOCK_TICKET */
#endif /* _CK_SPINLOCK_TICKET_H */ #endif /* CK_SPINLOCK_TICKET_H */

@ -24,7 +24,7 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#define _CK_HT_IM #define CK_HT_IM
#include <ck_ht.h> #include <ck_ht.h>
#ifdef CK_F_HT #ifdef CK_F_HT

@ -24,8 +24,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef _CK_HT_HASH_H #ifndef CK_HT_HASH_H
#define _CK_HT_HASH_H #define CK_HT_HASH_H
/* /*
* This is the Murmur hash written by Austin Appleby. * This is the Murmur hash written by Austin Appleby.
@ -266,4 +266,4 @@ static inline uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed
return h; return h;
} }
#endif /* _CK_HT_HASH_H */ #endif /* CK_HT_HASH_H */

Loading…
Cancel
Save