ck_pr: Add ck_pr_barrier for compiler barrier.

CK_CC_BARRIER isn't idiomatic, roll this into PR memory model
instead.
ck_pring
Samy Al Bahra 12 years ago
parent 1e8fe57028
commit 8043f52130

@ -45,10 +45,6 @@
#define CK_CC_PAD(x) union { char pad[x]; } #define CK_CC_PAD(x) union { char pad[x]; }
#ifndef CK_CC_BARRIER
#define CK_CC_BARRIER()
#endif
#ifndef CK_CC_ALIASED #ifndef CK_CC_ALIASED
#define CK_CC_ALIASED #define CK_CC_ALIASED
#endif #endif

@ -68,11 +68,6 @@
*/ */
#define CK_CC_CACHELINE CK_CC_ALIGN(CK_MD_CACHELINE) #define CK_CC_CACHELINE CK_CC_ALIGN(CK_MD_CACHELINE)
/*
* Compiler barrier.
*/
#define CK_CC_BARRIER() __asm__ __volatile__("" ::: "memory")
/* /*
* These are functions which should be avoided. * These are functions which should be avoided.
*/ */

@ -109,6 +109,14 @@ CK_PR_FENCE(memory)
#undef CK_PR_FENCE #undef CK_PR_FENCE
static inline void
ck_pr_barrier(void)
{
__asm__ __volatile__("" ::: "memory");
return;
}
/* /*
* Atomic compare and swap. * Atomic compare and swap.
*/ */

@ -75,6 +75,14 @@ CK_PR_FENCE(memory, "sync")
#undef CK_PR_FENCE #undef CK_PR_FENCE
static inline void
ck_pr_barrier(void)
{
__asm__ __volatile__("" ::: "memory");
return;
}
#define CK_PR_LOAD(S, M, T, C, I) \ #define CK_PR_LOAD(S, M, T, C, I) \
CK_CC_INLINE static T \ CK_CC_INLINE static T \
ck_pr_load_##S(M *target) \ ck_pr_load_##S(M *target) \

@ -74,6 +74,14 @@ CK_PR_FENCE(memory, "sync")
#undef CK_PR_FENCE #undef CK_PR_FENCE
static inline void
ck_pr_barrier(void)
{
__asm__ __volatile__("" ::: "memory");
return;
}
#define CK_PR_LOAD(S, M, T, C, I) \ #define CK_PR_LOAD(S, M, T, C, I) \
CK_CC_INLINE static T \ CK_CC_INLINE static T \
ck_pr_load_##S(M *target) \ ck_pr_load_##S(M *target) \

@ -71,6 +71,14 @@ CK_PR_FENCE(memory, "membar #MemIssue")
#undef CK_PR_FENCE #undef CK_PR_FENCE
static inline void
ck_pr_barrier(void)
{
__asm__ __volatile__("" ::: "memory");
return;
}
#define CK_PR_LOAD(S, M, T, C, I) \ #define CK_PR_LOAD(S, M, T, C, I) \
CK_CC_INLINE static T \ CK_CC_INLINE static T \
ck_pr_load_##S(M *target) \ ck_pr_load_##S(M *target) \

@ -87,6 +87,14 @@ CK_PR_FENCE(memory, "mfence")
#undef CK_PR_FENCE #undef CK_PR_FENCE
static inline void
ck_pr_barrier(void)
{
__asm__ __volatile__("" ::: "memory");
return;
}
/* /*
* Atomic fetch-and-store operations. * Atomic fetch-and-store operations.
*/ */

@ -86,6 +86,14 @@ CK_PR_FENCE(memory, "mfence")
#undef CK_PR_FENCE #undef CK_PR_FENCE
static inline void
ck_pr_barrier(void)
{
__asm__ __volatile__("" ::: "memory");
return;
}
/* /*
* Atomic fetch-and-store operations. * Atomic fetch-and-store operations.
*/ */

@ -81,16 +81,15 @@ test(void *c)
entry->tid = context->tid; entry->tid = context->tid;
ck_hp_fifo_enqueue_mpmc(&record, &fifo, fifo_entry, entry); ck_hp_fifo_enqueue_mpmc(&record, &fifo, fifo_entry, entry);
CK_CC_BARRIER(); ck_pr_barrier();
fifo_entry = ck_hp_fifo_dequeue_mpmc(&record, &fifo, &entry); fifo_entry = ck_hp_fifo_dequeue_mpmc(&record, &fifo, &entry);
if (fifo_entry == NULL) { if (fifo_entry == NULL) {
fprintf(stderr, "ERROR [%u] Queue should never be empty.\n", context->tid); fprintf(stderr, "ERROR [%u] Queue should never be empty.\n", context->tid);
pause();
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
CK_CC_BARRIER(); ck_pr_barrier();
if (entry->tid < 0 || entry->tid >= nthr) { if (entry->tid < 0 || entry->tid >= nthr) {
fprintf(stderr, "ERROR [%u] Incorrect value in entry.\n", entry->tid); fprintf(stderr, "ERROR [%u] Incorrect value in entry.\n", entry->tid);

Loading…
Cancel
Save