whitespace: Sweeping whitespace changes.

Removing trailing whitespaces and newlines.
ck_pring
Samy Al Bahra 11 years ago
parent 29db80432f
commit d6ba2a6273

@ -98,4 +98,3 @@ ck_array_initialized(struct ck_array *array)
_ck_i++)
#endif /* _CK_ARRAY_H */

@ -55,4 +55,3 @@ ck_backoff_eb(volatile unsigned int *c)
}
#endif /* _CK_BACKOFF_H */

@ -162,4 +162,3 @@ void ck_barrier_mcs_subscribe(ck_barrier_mcs_t *, ck_barrier_mcs_state_t *);
void ck_barrier_mcs(ck_barrier_mcs_t *, ck_barrier_mcs_state_t *);
#endif /* _CK_BARRIER_H */

@ -277,4 +277,3 @@ ck_brlock_read_unlock(struct ck_brlock_reader *reader)
}
#endif /* _CK_BRLOCK_H */

@ -184,4 +184,3 @@ ck_bytelock_read_unlock(struct ck_bytelock *bytelock, unsigned int slot)
}
#endif /* _CK_BYTELOCK_H */

@ -159,4 +159,3 @@ enum ck_cohort_state {
}
#endif /* _CK_COHORT_H */

@ -303,7 +303,7 @@ _ck_elide_fallback(int *retry,
* semantics. In environments where jitter is low, this may yield a tighter
* fast path.
*/
#define CK_ELIDE_LOCK(NAME, LOCK) ck_elide_##NAME##_lock(LOCK)
#define CK_ELIDE_LOCK(NAME, LOCK) ck_elide_##NAME##_lock(LOCK)
#define CK_ELIDE_UNLOCK(NAME, LOCK) ck_elide_##NAME##_unlock(LOCK)
#define CK_ELIDE_TRYLOCK(NAME, LOCK) ck_elide_##NAME##_trylock(LOCK)
@ -319,4 +319,3 @@ _ck_elide_fallback(int *retry,
ck_elide_##NAME##_unlock_adaptive(STAT, LOCK)
#endif /* _CK_ELIDE_H */

@ -160,4 +160,3 @@ void ck_epoch_barrier(ck_epoch_t *, ck_epoch_record_t *);
void ck_epoch_reclaim(ck_epoch_record_t *);
#endif /* _CK_EPOCH_H */

@ -472,4 +472,3 @@ ck_fifo_mpmc_trydequeue(struct ck_fifo_mpmc *fifo,
#endif /* CK_F_PR_CAS_PTR_2 */
#endif /* _CK_FIFO_H */

@ -104,4 +104,3 @@ void ck_hp_retire(ck_hp_record_t *, ck_hp_hazard_t *, void *, void *);
void ck_hp_purge(ck_hp_record_t *);
#endif /* _CK_HP_H */

@ -219,4 +219,3 @@ ck_hp_fifo_trydequeue_mpmc(ck_hp_record_t *record,
(entry) = (T))
#endif /* _CK_HP_FIFO_H */

@ -111,4 +111,3 @@ leave:
}
#endif /* _CK_HP_STACK_H */

@ -48,7 +48,7 @@
*/
#define CK_HS_MODE_DIRECT 2
/*
/*
* Indicates that the values to be stored are pointers.
* Allows for space optimizations in the presence of pointer
* packing. Mutually exclusive with CK_HS_MODE_DIRECT.
@ -132,4 +132,3 @@ bool ck_hs_reset_size(ck_hs_t *, unsigned long);
void ck_hs_stat(ck_hs_t *, struct ck_hs_stat *);
#endif /* _CK_HS_H */

@ -259,4 +259,3 @@ uint64_t ck_ht_count(ck_ht_t *);
#endif /* CK_F_PR_LOAD_64 && CK_F_PR_STORE_64 */
#endif /* _CK_HT_H */

@ -29,4 +29,3 @@
#else
#include <limits.h>
#endif /* __linux__ && __KERNEL__ */

@ -37,4 +37,3 @@ struct ck_malloc {
};
#endif /* _CK_MALLOC_H */

@ -140,4 +140,3 @@ leave:
}
#endif /* _CK_PFLOCK_H */

@ -1149,4 +1149,3 @@ CK_PR_FAS_S(8, uint8_t)
#undef CK_PR_FAS
#endif /* _CK_PR_H */

@ -415,4 +415,3 @@ struct { \
} while (0)
#endif /* _CK_QUEUE_H */

@ -48,7 +48,7 @@
*/
#define CK_RHS_MODE_DIRECT 2
/*
/*
* Indicates that the values to be stored are pointers.
* Allows for space optimizations in the presence of pointer
* packing. Mutually exclusive with CK_RHS_MODE_DIRECT.

@ -307,7 +307,7 @@ _ck_ring_dequeue_spmc(struct ck_ring *ring,
return false;
ck_pr_fence_load();
target = (char *)buffer + size * (consumer & mask);
memcpy(data, target, size);
@ -432,4 +432,3 @@ ck_ring_dequeue_spmc_##name(struct ck_ring *a, \
ck_ring_dequeue_spmc_##name(a, b, c)
#endif /* _CK_RING_H */

@ -315,4 +315,3 @@
}
#endif /* _CK_RWCOHORT_H */

@ -295,4 +295,3 @@ ck_rwlock_recursive_read_unlock(ck_rwlock_recursive_t *rw)
}
#endif /* _CK_RWLOCK_H */

@ -123,4 +123,3 @@ ck_sequence_write_end(struct ck_sequence *sq)
}
#endif /* _CK_SEQUENCE_H */

@ -59,4 +59,3 @@ CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock, ck_spinlock_t,
ck_spinlock_locked, ck_spinlock_trylock)
#endif /* _CK_SPINLOCK_H */

@ -352,4 +352,3 @@ ck_stack_init(struct ck_stack *stack)
(entry) = (T))
#endif /* _CK_STACK_H */

@ -30,4 +30,3 @@
#else
#include <stdint.h>
#endif /* __linux__ && __KERNEL__ */

@ -215,4 +215,3 @@ CK_ELIDE_PROTOTYPE(ck_swlock_read, ck_swlock_t,
ck_swlock_locked_reader, ck_swlock_read_unlock)
#endif /* _CK_SWLOCK_H */

@ -32,7 +32,7 @@
* described in:
* John M. Mellor-Crummey and Michael L. Scott. 1991.
* Scalable reader-writer synchronization for shared-memory
* multiprocessors. SIGPLAN Not. 26, 7 (April 1991), 106-113.
* multiprocessors. SIGPLAN Not. 26, 7 (April 1991), 106-113.
*/
#include <ck_cc.h>
@ -65,7 +65,7 @@ ck_tflock_ticket_fca_32(uint32_t *target, uint32_t mask, uint32_t delta)
ck_pr_stall();
}
return snapshot;
}

@ -135,7 +135,7 @@ ck_pr_load_64(const uint64_t *target)
register uint64_t ret asm("r0");
__asm __volatile("ldrd %0, [%1]" : "+r" (ret)
: "r" (target)
: "r" (target)
: "memory", "cc");
return (ret);
}
@ -262,7 +262,7 @@ ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *value)
"=&r" (tmp)
: "r" (target),
"r" (set),
"r" (compare)
"r" (compare)
: "memory", "cc");
*(void **)value = previous;
return (previous == compare);
@ -283,7 +283,7 @@ ck_pr_cas_ptr(void *target, void *compare, void *set)
"=&r" (tmp)
: "r" (target),
"r" (set),
"r" (compare)
"r" (compare)
: "memory", "cc");
return (previous == compare);
}

@ -98,7 +98,7 @@ ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_queue,
/* We're head of the global queue, we're done */
if (ck_pr_load_uint(&previous->splice) == false)
return;
}
}
/* Now we need to splice the local queue into the global queue. */
local_tail = ck_pr_load_ptr(local_queue);

@ -43,7 +43,7 @@ static ck_hp_fifo_t fifo;
static ck_hp_t fifo_hp;
/* thread local element count */
static unsigned long *count;
static unsigned long *count;
static unsigned long thread_count;
@ -77,10 +77,10 @@ queue_50_50(void *elements)
record = malloc(sizeof(ck_hp_record_t));
assert(record);
slots = malloc(CK_HP_FIFO_SLOTS_SIZE);
assert(slots);
/* different seed for each thread */
seed = 1337; /*(unsigned int) pthread_self(); */
@ -166,7 +166,7 @@ main(int argc, char** argv)
/* array for local operation count */
count = malloc(sizeof(unsigned long *) * thread_count);
/*
* Initialize global hazard pointer safe memory reclamation to execute free()
* when a fifo_entry is safe to be deleted.

@ -144,7 +144,7 @@ set_init(void)
#ifdef HS_DELETE
mode |= CK_HS_MODE_DELETE;
#endif
#endif
ck_epoch_init(&epoch_hs);
ck_epoch_register(&epoch_hs, &epoch_wr);

@ -197,7 +197,7 @@ test(void *c)
for (i = 0; i < ITERATIONS; i++) {
for (j = 0; j < size; j++) {
buffer = _context[context->previous].buffer;
while (ck_ring_dequeue_spmc(ring + context->previous,
while (ck_ring_dequeue_spmc(ring + context->previous,
buffer, &entry) == false);
if (context->previous != (unsigned int)entry->tid) {
@ -315,7 +315,7 @@ main(int argc, char *argv[])
/* Wait until queue is not full. */
if (l & 1) {
while (ck_ring_enqueue_spmc(&ring_spmc,
buffer,
buffer,
entry) == false)
ck_pr_stall();
} else {

@ -200,7 +200,7 @@ test(void *c)
for (j = 0; j < size; j++) {
buffer = _context[context->previous].buffer;
while (CK_RING_DEQUEUE_SPMC(entry,
ring + context->previous,
ring + context->previous,
buffer, &entry) == false);
if (context->previous != (unsigned int)entry->tid) {

@ -289,7 +289,7 @@ CK_CC_UNUSED static int
aff_iterate_core(struct affinity *acb, unsigned int *core)
{
cpu_set_t s;
*core = ck_pr_faa_uint(&acb->request, acb->delta);
CPU_ZERO(&s);
CPU_SET((*core) % CORES, &s);
@ -454,4 +454,3 @@ ck_error(const char *message, ...)
va_end(ap);
exit(EXIT_FAILURE);
}

@ -87,7 +87,7 @@
* at e_g - 1 to still be accessed at e_g as threads are "active"
* at the same time (real-world time) mutating shared objects.
*
* Now, if the epoch counter is ticked to e_g + 1, then no new
* Now, if the epoch counter is ticked to e_g + 1, then no new
* hazardous references could exist to objects logically deleted at
* e_g - 1. The reason for this is that at e_g + 1, all epoch read-side
* critical sections started at e_g - 1 must have been completed. If
@ -118,7 +118,7 @@
* sufficient to represent e_g using only the values 0, 1 or 2. Every time
* a thread re-visits a e_g (which can be determined with a non-empty deferral
* list) it can assume objects in the e_g deferral list involved at least
* three e_g transitions and are thus, safe, for physical deletion.
* three e_g transitions and are thus, safe, for physical deletion.
*
* Blocking semantics for epoch reclamation have additional restrictions.
* Though we only require three deferral lists, reasonable blocking semantics

@ -598,7 +598,7 @@ ck_hs_gc(struct ck_hs *hs, unsigned long cycles, unsigned long seed)
if (maximum != map->probe_maximum)
ck_pr_store_uint(&map->probe_maximum, maximum);
if (bounds != NULL) {
if (bounds != NULL) {
for (i = 0; i < map->capacity; i++)
CK_HS_STORE(&map->probe_bound[i], bounds[i]);
@ -851,7 +851,7 @@ ck_hs_get(struct ck_hs *hs,
unsigned int g, g_p, probe;
unsigned int *generation;
do {
do {
map = ck_pr_load_ptr(&hs->map);
generation = &map->generation[h & CK_HS_G_MASK];
g = ck_pr_load_uint(generation);

@ -395,7 +395,7 @@ ck_ht_gc(struct ck_ht *ht, unsigned long cycles, unsigned long seed)
return true;
}
if (cycles == 0) {
maximum = 0;

@ -175,7 +175,7 @@ static inline uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed
while(data != end)
{
uint64_t k;
if (!((uintptr_t)data & 0x7))
k = *data++;
else {

@ -327,11 +327,11 @@ ck_rhs_map_create(struct ck_rhs *hs, unsigned long entries)
if (hs->mode & CK_RHS_MODE_READ_MOSTLY)
size = sizeof(struct ck_rhs_map) +
(sizeof(void *) * n_entries +
sizeof(struct ck_rhs_no_entry_desc) * n_entries +
sizeof(struct ck_rhs_no_entry_desc) * n_entries +
2 * CK_MD_CACHELINE - 1);
else
size = sizeof(struct ck_rhs_map) +
(sizeof(struct ck_rhs_entry_desc) * n_entries +
(sizeof(struct ck_rhs_entry_desc) * n_entries +
CK_MD_CACHELINE - 1);
map = hs->m->malloc(size);
if (map == NULL)
@ -408,7 +408,7 @@ ck_rhs_map_probe_next(struct ck_rhs_map *map,
{
if (probes & map->offset_mask) {
offset = (offset &~ map->offset_mask) +
offset = (offset &~ map->offset_mask) +
((offset + 1) & map->offset_mask);
return offset;
} else
@ -421,10 +421,10 @@ ck_rhs_map_probe_prev(struct ck_rhs_map *map, unsigned long offset,
{
if (probes & map->offset_mask) {
offset = (offset &~ map->offset_mask) + ((offset - 1) &
offset = (offset &~ map->offset_mask) + ((offset - 1) &
map->offset_mask);
return offset;
} else
} else
return ((offset - probes) & map->mask);
}
@ -616,7 +616,7 @@ ck_rhs_map_probe_rm(struct ck_rhs *hs,
if (behavior != CK_RHS_PROBE_NO_RH) {
struct ck_rhs_entry_desc *desc = (void *)&map->entries.no_entries.descs[offset];
if (pr == -1 &&
if (pr == -1 &&
desc->in_rh == false && desc->probes < probes) {
pr = offset;
*n_probes = probes;
@ -730,7 +730,7 @@ ck_rhs_map_probe(struct ck_rhs *hs,
if ((behavior != CK_RHS_PROBE_NO_RH)) {
struct ck_rhs_entry_desc *desc = &map->entries.descs[offset];
if (pr == -1 &&
if (pr == -1 &&
desc->in_rh == false && desc->probes < probes) {
pr = offset;
*n_probes = probes;
@ -818,7 +818,7 @@ ck_rhs_gc(struct ck_rhs *hs)
}
static void
ck_rhs_add_wanted(struct ck_rhs *hs, long end_offset, long old_slot,
ck_rhs_add_wanted(struct ck_rhs *hs, long end_offset, long old_slot,
unsigned long h)
{
struct ck_rhs_map *map = hs->map;
@ -872,7 +872,7 @@ ck_rhs_get_first_offset(struct ck_rhs_map *map, unsigned long offset, unsigned i
while (probes > (unsigned long)map->offset_mask + 1) {
offset -= ((probes - 1) &~ map->offset_mask);
offset &= map->mask;
offset = (offset &~ map->offset_mask) +
offset = (offset &~ map->offset_mask) +
((offset - map->offset_mask) & map->offset_mask);
probes -= map->offset_mask + 1;
}
@ -948,7 +948,7 @@ restart:
prev = prevs[--prevs_nb];
ck_pr_store_ptr(ck_rhs_entry_addr(map, orig_slot),
ck_rhs_entry(map, prev));
h = ck_rhs_get_first_offset(map, orig_slot,
h = ck_rhs_get_first_offset(map, orig_slot,
desc->probes);
ck_rhs_add_wanted(hs, orig_slot, prev, h);
ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
@ -966,7 +966,7 @@ ck_rhs_do_backward_shift_delete(struct ck_rhs *hs, long slot)
struct ck_rhs_map *map = hs->map;
struct ck_rhs_entry_desc *desc, *new_desc = NULL;
unsigned long h;
desc = ck_rhs_desc(map, slot);
h = ck_rhs_remove_wanted(hs, slot, -1);
while (desc->wanted > 0) {
@ -1140,7 +1140,7 @@ restart:
* period if we can guarantee earlier position of
* duplicate key.
*/
ck_rhs_add_wanted(hs, first, -1, h);
ck_rhs_add_wanted(hs, first, -1, h);
if (object != NULL) {
ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
ck_pr_fence_atomic_store();
@ -1155,7 +1155,7 @@ restart:
ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
ck_rhs_set_probes(map, slot, n_probes);
if (object == NULL)
ck_rhs_add_wanted(hs, slot, -1, h);
ck_rhs_add_wanted(hs, slot, -1, h);
}
if (object == NULL) {
@ -1209,12 +1209,12 @@ restart:
/* Insert key into first bucket in probe sequence. */
ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert);
desc->probes = n_probes;
ck_rhs_add_wanted(hs, first, -1, h);
ck_rhs_add_wanted(hs, first, -1, h);
} else {
/* An empty slot was found. */
ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
ck_rhs_set_probes(map, slot, n_probes);
ck_rhs_add_wanted(hs, slot, -1, h);
ck_rhs_add_wanted(hs, slot, -1, h);
}
map->n_entries++;
@ -1253,7 +1253,7 @@ ck_rhs_get(struct ck_rhs *hs,
unsigned int g, g_p, probe;
unsigned int *generation;
do {
do {
map = ck_pr_load_ptr(&hs->map);
generation = &map->generation[h & CK_RHS_G_MASK];
g = ck_pr_load_uint(generation);

Loading…
Cancel
Save