whitespace: More style fixes.

ck_pring
Samy Al Bahra 12 years ago
parent 0598307186
commit bdadd7bf6b

@ -45,8 +45,8 @@ static size_t allocator_overhead;
bool
ck_bag_init(struct ck_bag *bag,
size_t n_cachelines,
enum ck_bag_allocation_strategy as)
size_t n_cachelines,
enum ck_bag_allocation_strategy as)
{
size_t block_overhead, block_size;

@ -30,8 +30,8 @@
void
ck_barrier_centralized(struct ck_barrier_centralized *barrier,
struct ck_barrier_centralized_state *state,
unsigned int n_threads)
struct ck_barrier_centralized_state *state,
unsigned int n_threads)
{
unsigned int sense, value;
@ -57,3 +57,4 @@ ck_barrier_centralized(struct ck_barrier_centralized *barrier,
ck_pr_fence_memory();
return;
}

@ -45,13 +45,13 @@ ck_barrier_combining_queue_dequeue(struct ck_barrier_combining_queue *queue)
queue->head = queue->head->next;
}
return (front);
return front;
}
CK_CC_INLINE static void
ck_barrier_combining_insert(struct ck_barrier_combining_group *parent,
struct ck_barrier_combining_group *tnode,
struct ck_barrier_combining_group **child)
struct ck_barrier_combining_group *tnode,
struct ck_barrier_combining_group **child)
{
*child = tnode;
@ -74,7 +74,7 @@ ck_barrier_combining_insert(struct ck_barrier_combining_group *parent,
*/
CK_CC_INLINE static void
ck_barrier_combining_queue_enqueue(struct ck_barrier_combining_queue *queue,
struct ck_barrier_combining_group *node_value)
struct ck_barrier_combining_group *node_value)
{
node_value->next = NULL;
@ -92,8 +92,8 @@ ck_barrier_combining_queue_enqueue(struct ck_barrier_combining_queue *queue,
void
ck_barrier_combining_group_init(struct ck_barrier_combining *root,
struct ck_barrier_combining_group *tnode,
unsigned int nthr)
struct ck_barrier_combining_group *tnode,
unsigned int nthr)
{
struct ck_barrier_combining_group *node;
struct ck_barrier_combining_queue queue;
@ -142,7 +142,7 @@ leave:
void
ck_barrier_combining_init(struct ck_barrier_combining *root,
struct ck_barrier_combining_group *init_root)
struct ck_barrier_combining_group *init_root)
{
init_root->k = 0;
@ -156,8 +156,8 @@ ck_barrier_combining_init(struct ck_barrier_combining *root,
static void
ck_barrier_combining_aux(struct ck_barrier_combining *barrier,
struct ck_barrier_combining_group *tnode,
unsigned int sense)
struct ck_barrier_combining_group *tnode,
unsigned int sense)
{
/*
@ -195,8 +195,8 @@ ck_barrier_combining_aux(struct ck_barrier_combining *barrier,
void
ck_barrier_combining(struct ck_barrier_combining *barrier,
struct ck_barrier_combining_group *tnode,
struct ck_barrier_combining_state *state)
struct ck_barrier_combining_group *tnode,
struct ck_barrier_combining_state *state)
{
ck_barrier_combining_aux(barrier, tnode, state->sense);

@ -34,8 +34,8 @@
void
ck_barrier_dissemination_init(struct ck_barrier_dissemination *barrier,
struct ck_barrier_dissemination_flag **barrier_internal,
unsigned int nthr)
struct ck_barrier_dissemination_flag **barrier_internal,
unsigned int nthr)
{
unsigned int i, j, k, size, offset;
bool p = nthr & (nthr - 1);
@ -77,7 +77,7 @@ ck_barrier_dissemination_init(struct ck_barrier_dissemination *barrier,
void
ck_barrier_dissemination_subscribe(struct ck_barrier_dissemination *barrier,
struct ck_barrier_dissemination_state *state)
struct ck_barrier_dissemination_state *state)
{
state->parity = 0;
@ -95,7 +95,7 @@ ck_barrier_dissemination_size(unsigned int nthr)
void
ck_barrier_dissemination(struct ck_barrier_dissemination *barrier,
struct ck_barrier_dissemination_state *state)
struct ck_barrier_dissemination_state *state)
{
unsigned int i;
unsigned int size = barrier->size;

@ -31,8 +31,7 @@
#include <stdbool.h>
void
ck_barrier_mcs_init(struct ck_barrier_mcs *barrier,
unsigned int nthr)
ck_barrier_mcs_init(struct ck_barrier_mcs *barrier, unsigned int nthr)
{
unsigned int i, j;
@ -111,7 +110,7 @@ ck_barrier_mcs_reinitialize_children(struct ck_barrier_mcs *node)
void
ck_barrier_mcs(struct ck_barrier_mcs *barrier,
struct ck_barrier_mcs_state *state)
struct ck_barrier_mcs_state *state)
{
/*

@ -48,7 +48,7 @@ enum {
void
ck_barrier_tournament_subscribe(struct ck_barrier_tournament *barrier,
struct ck_barrier_tournament_state *state)
struct ck_barrier_tournament_state *state)
{
state->sense = ~0;
@ -58,8 +58,8 @@ ck_barrier_tournament_subscribe(struct ck_barrier_tournament *barrier,
void
ck_barrier_tournament_init(struct ck_barrier_tournament *barrier,
struct ck_barrier_tournament_round **rounds,
unsigned int nthr)
struct ck_barrier_tournament_round **rounds,
unsigned int nthr)
{
unsigned int i, k, size, twok, twokm1, imod2k;
@ -107,7 +107,7 @@ ck_barrier_tournament_size(unsigned int nthr)
void
ck_barrier_tournament(struct ck_barrier_tournament *barrier,
struct ck_barrier_tournament_state *state)
struct ck_barrier_tournament_state *state)
{
struct ck_barrier_tournament_round **rounds = ck_pr_load_ptr(&barrier->rounds);
int round = 1;

@ -156,7 +156,7 @@ ck_epoch_recycle(struct ck_epoch *global)
unsigned int state;
if (ck_pr_load_uint(&global->n_free) == 0)
return (NULL);
return NULL;
CK_STACK_FOREACH(&global->records, cursor) {
record = ck_epoch_record_container(cursor);
@ -216,9 +216,9 @@ ck_epoch_unregister(struct ck_epoch *global, struct ck_epoch_record *record)
static struct ck_epoch_record *
ck_epoch_scan(struct ck_epoch *global,
struct ck_epoch_record *cr,
unsigned int epoch,
bool *af)
struct ck_epoch_record *cr,
unsigned int epoch,
bool *af)
{
ck_stack_entry_t *cursor;

@ -98,7 +98,7 @@ ck_hp_recycle(struct ck_hp *global)
int state;
if (ck_pr_load_uint(&global->n_free) == 0)
return (NULL);
return NULL;
CK_STACK_FOREACH(&global->subscribers, entry) {
record = ck_hp_record_container(entry);
@ -113,7 +113,7 @@ ck_hp_recycle(struct ck_hp *global)
}
}
return (NULL);
return NULL;
}
void
@ -132,8 +132,8 @@ ck_hp_unregister(struct ck_hp_record *entry)
void
ck_hp_register(struct ck_hp *state,
struct ck_hp_record *entry,
void **pointers)
struct ck_hp_record *entry,
void **pointers)
{
entry->state = CK_HP_USED;
@ -269,9 +269,9 @@ ck_hp_reclaim(struct ck_hp_record *thread)
void
ck_hp_retire(struct ck_hp_record *thread,
struct ck_hp_hazard *hazard,
void *data,
void *pointer)
struct ck_hp_hazard *hazard,
void *data,
void *pointer)
{
ck_pr_store_ptr(&hazard->pointer, pointer);
@ -287,9 +287,9 @@ ck_hp_retire(struct ck_hp_record *thread,
void
ck_hp_free(struct ck_hp_record *thread,
struct ck_hp_hazard *hazard,
void *data,
void *pointer)
struct ck_hp_hazard *hazard,
void *data,
void *pointer)
{
struct ck_hp *global;

@ -182,7 +182,11 @@ ck_hs_reset(struct ck_hs *hs)
}
static inline unsigned long
ck_hs_map_probe_next(struct ck_hs_map *map, unsigned long offset, unsigned long h, unsigned long level, unsigned long probes)
ck_hs_map_probe_next(struct ck_hs_map *map,
unsigned long offset,
unsigned long h,
unsigned long level,
unsigned long probes)
{
unsigned long r;
unsigned long stride;
@ -198,7 +202,7 @@ ck_hs_map_probe_next(struct ck_hs_map *map, unsigned long offset, unsigned long
bool
ck_hs_grow(struct ck_hs *hs,
unsigned long capacity)
unsigned long capacity)
{
struct ck_hs_map *map, *update;
void **bucket, *previous;
@ -272,13 +276,13 @@ restart:
static void **
ck_hs_map_probe(struct ck_hs *hs,
struct ck_hs_map *map,
unsigned long *n_probes,
void ***priority,
unsigned long h,
const void *key,
void **object,
unsigned long probe_limit)
struct ck_hs_map *map,
unsigned long *n_probes,
void ***priority,
unsigned long h,
const void *key,
void **object,
unsigned long probe_limit)
{
void **bucket, **cursor, *k;
const void *compare;
@ -361,9 +365,9 @@ leave:
bool
ck_hs_set(struct ck_hs *hs,
unsigned long h,
const void *key,
void **previous)
unsigned long h,
const void *key,
void **previous)
{
void **slot, **first, *object, *insert;
unsigned long n_probes;
@ -428,8 +432,8 @@ restart:
bool
ck_hs_put(struct ck_hs *hs,
unsigned long h,
const void *key)
unsigned long h,
const void *key)
{
void **slot, **first, *object, *insert;
unsigned long n_probes;
@ -493,8 +497,8 @@ restart:
void *
ck_hs_get(struct ck_hs *hs,
unsigned long h,
const void *key)
unsigned long h,
const void *key)
{
void **slot, **first, *object;
struct ck_hs_map *map;
@ -522,8 +526,8 @@ ck_hs_get(struct ck_hs *hs,
void *
ck_hs_remove(struct ck_hs *hs,
unsigned long h,
const void *key)
unsigned long h,
const void *key)
{
void **slot, **first, *object;
struct ck_hs_map *map = hs->map;
@ -541,12 +545,12 @@ ck_hs_remove(struct ck_hs *hs,
bool
ck_hs_init(struct ck_hs *hs,
unsigned int mode,
ck_hs_hash_cb_t *hf,
ck_hs_compare_cb_t *compare,
struct ck_malloc *m,
unsigned long n_entries,
unsigned long seed)
unsigned int mode,
ck_hs_hash_cb_t *hf,
ck_hs_compare_cb_t *compare,
struct ck_malloc *m,
unsigned long n_entries,
unsigned long seed)
{
if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)

@ -76,7 +76,7 @@ struct ck_ht_map {
void
ck_ht_stat(struct ck_ht *table,
struct ck_ht_stat *st)
struct ck_ht_stat *st)
{
struct ck_ht_map *map = table->map;
@ -87,9 +87,9 @@ ck_ht_stat(struct ck_ht *table,
void
ck_ht_hash(struct ck_ht_hash *h,
struct ck_ht *table,
const void *key,
uint16_t key_length)
struct ck_ht *table,
const void *key,
uint16_t key_length)
{
h->value = MurmurHash64A(key, key_length, table->seed);
@ -98,8 +98,8 @@ ck_ht_hash(struct ck_ht_hash *h,
void
ck_ht_hash_direct(struct ck_ht_hash *h,
struct ck_ht *table,
uintptr_t key)
struct ck_ht *table,
uintptr_t key)
{
ck_ht_hash(h, table, &key, sizeof(key));
@ -108,9 +108,9 @@ ck_ht_hash_direct(struct ck_ht_hash *h,
static void
ck_ht_hash_wrapper(struct ck_ht_hash *h,
const void *key,
size_t length,
uint64_t seed)
const void *key,
size_t length,
uint64_t seed)
{
h->value = MurmurHash64A(key, length, seed);
@ -178,11 +178,11 @@ ck_ht_map_probe_next(struct ck_ht_map *map, size_t offset, ck_ht_hash_t h, size_
bool
ck_ht_init(ck_ht_t *table,
enum ck_ht_mode mode,
ck_ht_hash_cb_t *h,
struct ck_malloc *m,
uint64_t entries,
uint64_t seed)
enum ck_ht_mode mode,
ck_ht_hash_cb_t *h,
struct ck_malloc *m,
uint64_t entries,
uint64_t seed)
{
if (m == NULL || m->malloc == NULL || m->free == NULL)
@ -204,13 +204,13 @@ ck_ht_init(ck_ht_t *table,
static struct ck_ht_entry *
ck_ht_map_probe_wr(struct ck_ht_map *map,
ck_ht_hash_t h,
ck_ht_entry_t *snapshot,
ck_ht_entry_t **available,
const void *key,
uint16_t key_length,
uint64_t *probe_limit,
uint64_t *probe_wr)
ck_ht_hash_t h,
ck_ht_entry_t *snapshot,
ck_ht_entry_t **available,
const void *key,
uint16_t key_length,
uint64_t *probe_limit,
uint64_t *probe_wr)
{
struct ck_ht_entry *bucket, *cursor;
struct ck_ht_entry *first = NULL;
@ -314,10 +314,10 @@ leave:
static struct ck_ht_entry *
ck_ht_map_probe_rd(struct ck_ht_map *map,
ck_ht_hash_t h,
ck_ht_entry_t *snapshot,
const void *key,
uint16_t key_length)
ck_ht_hash_t h,
ck_ht_entry_t *snapshot,
const void *key,
uint16_t key_length)
{
struct ck_ht_entry *bucket, *cursor;
size_t offset, i, j;
@ -430,8 +430,8 @@ ck_ht_count(ck_ht_t *table)
bool
ck_ht_next(struct ck_ht *table,
struct ck_ht_iterator *i,
struct ck_ht_entry **entry)
struct ck_ht_iterator *i,
struct ck_ht_entry **entry)
{
struct ck_ht_map *map = table->map;
uintptr_t key;
@ -560,8 +560,8 @@ restart:
bool
ck_ht_remove_spmc(ck_ht_t *table,
ck_ht_hash_t h,
ck_ht_entry_t *entry)
ck_ht_hash_t h,
ck_ht_entry_t *entry)
{
struct ck_ht_map *map;
struct ck_ht_entry *candidate, *priority, snapshot;
@ -615,8 +615,8 @@ ck_ht_remove_spmc(ck_ht_t *table,
bool
ck_ht_get_spmc(ck_ht_t *table,
ck_ht_hash_t h,
ck_ht_entry_t *entry)
ck_ht_hash_t h,
ck_ht_entry_t *entry)
{
struct ck_ht_entry *candidate, snapshot;
struct ck_ht_map *map;
@ -658,8 +658,8 @@ restart:
bool
ck_ht_set_spmc(ck_ht_t *table,
ck_ht_hash_t h,
ck_ht_entry_t *entry)
ck_ht_hash_t h,
ck_ht_entry_t *entry)
{
struct ck_ht_entry snapshot, *candidate, *priority;
struct ck_ht_map *map;
@ -772,8 +772,8 @@ ck_ht_set_spmc(ck_ht_t *table,
bool
ck_ht_put_spmc(ck_ht_t *table,
ck_ht_hash_t h,
ck_ht_entry_t *entry)
ck_ht_hash_t h,
ck_ht_entry_t *entry)
{
struct ck_ht_entry snapshot, *candidate, *priority;
struct ck_ht_map *map;

Loading…
Cancel
Save