merged changes from main ck repo

ck_pring
Brendon Scheinman 12 years ago
commit ed5292546f

5
.gitignore vendored

@ -13,6 +13,11 @@ build/Makefile
*.a
*.so
*.dSYM
regressions/ck_cohort/benchmark/ck_cohort.LATENCY
regressions/ck_cohort/benchmark/ck_cohort.THROUGHPUT
regressions/ck_pflock/benchmark/latency
regressions/ck_pflock/benchmark/throughput
regressions/ck_pflock/validate/validate
regressions/ck_barrier/benchmark/throughput
regressions/ck_barrier/validate/barrier_centralized
regressions/ck_barrier/validate/barrier_combining

@ -53,8 +53,6 @@ consecutive acquisitions of its local lock, even if there are other threads wait
If you are unsure of a value to use for the
.Fa pass_limit
argument, you should use CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT.
Cohort instances do not allocate any new memory during initialization, so they
can be freed later without using an explicit dispose method.
.Sh SEE ALSO
.Xr ck_cohort 3 ,
.Xr CK_COHORT_PROTOTYPE 3 ,

@ -52,7 +52,6 @@ enum ck_cohort_state {
#define CK_COHORT_UNLOCK(N, C, GC, LC) ck_cohort_##N##_unlock(C, GC, LC)
#define CK_COHORT_PROTOTYPE(N, GT, GL, GU, LT, LL, LU) \
\
CK_COHORT_INSTANCE(N) { \
GT *global_lock; \
LT *local_lock; \
@ -80,6 +79,7 @@ enum ck_cohort_state {
ck_cohort_##N##_lock(CK_COHORT_INSTANCE(N) *cohort, \
void *global_context, void *local_context) \
{ \
\
ck_pr_inc_uint(&cohort->waiting_threads); \
LL(cohort->local_lock, local_context); \
ck_pr_dec_uint(&cohort->waiting_threads); \
@ -96,6 +96,7 @@ enum ck_cohort_state {
ck_cohort_##N##_unlock(CK_COHORT_INSTANCE(N) *cohort, \
void *global_context, void *local_context) \
{ \
\
if (ck_pr_load_uint(&cohort->waiting_threads) > 0 \
&& cohort->acquire_count < cohort->local_pass_limit) { \
cohort->release_state = CK_COHORT_STATE_LOCAL; \
@ -111,7 +112,6 @@ enum ck_cohort_state {
return; \
}
#define CK_COHORT_INITIALIZER { \
.global_lock = NULL, \
.local_lock = NULL, \
@ -123,3 +123,4 @@ enum ck_cohort_state {
#endif /* _CK_COHORT_H */

@ -13,7 +13,7 @@
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXCK_PFLOCK_PRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL

@ -4,6 +4,7 @@ DIR=backoff \
bitmap \
brlock \
bytelock \
cohort \
epoch \
fifo \
hp \
@ -21,6 +22,8 @@ DIR=backoff \
.PHONY: all clean check
all:
$(MAKE) -C ./ck_cohort/validate all
$(MAKE) -C ./ck_cohort/benchmark all
$(MAKE) -C ./ck_bitmap/validate all
$(MAKE) -C ./ck_backoff/validate all
$(MAKE) -C ./ck_queue/validate all
@ -55,9 +58,13 @@ all:
$(MAKE) -C ./ck_bag/validate all
clean:
$(MAKE) -C ./ck_pflock/validate clean
$(MAKE) -C ./ck_pflock/benchmark clean
$(MAKE) -C ./ck_backoff/validate clean
$(MAKE) -C ./ck_bitmap/validate clean
$(MAKE) -C ./ck_queue/validate clean
$(MAKE) -C ./ck_cohort/validate clean
$(MAKE) -C ./ck_cohort/benchmark clean
$(MAKE) -C ./ck_brlock/validate clean
$(MAKE) -C ./ck_ht/validate clean
$(MAKE) -C ./ck_ht/benchmark clean

@ -57,27 +57,35 @@ static struct counters *count;
static uint64_t nthr;
static unsigned int n_cohorts;
static unsigned int barrier;
int critical __attribute__((aligned(64)));
static int critical CK_CC_CACHELINE;
static void
ck_spinlock_lock_with_context(ck_spinlock_t *lock, void *context)
{
(void)context;
ck_spinlock_lock(lock);
return;
}
static void
ck_spinlock_unlock_with_context(ck_spinlock_t *lock, void *context)
{
(void)context;
ck_spinlock_unlock(lock);
return;
}
CK_COHORT_PROTOTYPE(basic,
ck_spinlock_t, ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context,
ck_spinlock_t, ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context)
static CK_COHORT_INSTANCE(basic) *cohorts;
struct cohort_record {
CK_COHORT_INSTANCE(basic) cohort;
} CK_CC_CACHELINE;
static struct cohort_record *cohorts;
static ck_spinlock_t global_lock = CK_SPINLOCK_INITIALIZER;
struct block {
@ -94,12 +102,13 @@ fairness(void *null)
unsigned int core;
CK_COHORT_INSTANCE(basic) *cohort;
if (aff_iterate_core(&a, &core)) {
perror("ERROR: Could not affine thread");
exit(EXIT_FAILURE);
}
cohort = cohorts + (core / (int)(a.delta)) % n_cohorts;
cohort = &((cohorts + (core / (int)(a.delta)) % n_cohorts)->cohort);
while (ck_pr_load_uint(&ready) == 0);
@ -118,7 +127,7 @@ fairness(void *null)
CK_COHORT_UNLOCK(basic, cohort, NULL, NULL);
}
return (NULL);
return NULL;
}
int
@ -133,43 +142,35 @@ main(int argc, char *argv[])
if (argc != 5) {
ck_error("Usage: ck_cohort <number of cohorts> <threads per cohort> "
"<affinity delta> <critical section>\n");
exit(EXIT_FAILURE);
}
n_cohorts = atoi(argv[1]);
if (n_cohorts <= 0) {
ck_error("ERROR: Number of cohorts must be greater than 0\n");
exit(EXIT_FAILURE);
}
nthr = n_cohorts * atoi(argv[2]);
if (nthr <= 0) {
ck_error("ERROR: Number of threads must be greater than 0\n");
exit(EXIT_FAILURE);
}
critical = atoi(argv[4]);
if (critical < 0) {
ck_error("ERROR: critical section cannot be negative\n");
exit(EXIT_FAILURE);
}
threads = malloc(sizeof(pthread_t) * nthr);
if (threads == NULL) {
ck_error("ERROR: Could not allocate thread structures\n");
exit(EXIT_FAILURE);
}
cohorts = malloc(sizeof(CK_COHORT_INSTANCE(basic)) * n_cohorts);
cohorts = malloc(sizeof(struct cohort_record) * n_cohorts);
if (cohorts == NULL) {
ck_error("ERROR: Could not allocate cohort structures\n");
exit(EXIT_FAILURE);
}
context = malloc(sizeof(struct block) * nthr);
if (context == NULL) {
ck_error("ERROR: Could not allocate thread contexts\n");
exit(EXIT_FAILURE);
}
a.delta = atoi(argv[2]);
@ -178,7 +179,6 @@ main(int argc, char *argv[])
count = malloc(sizeof(*count) * nthr);
if (count == NULL) {
ck_error("ERROR: Could not create acquisition buffer\n");
exit(EXIT_FAILURE);
}
memset(count, 0, sizeof(*count) * nthr);
@ -187,9 +187,8 @@ main(int argc, char *argv[])
local_lock = malloc(max(CK_MD_CACHELINE, sizeof(ck_spinlock_t)));
if (local_lock == NULL) {
ck_error("ERROR: Could not allocate local lock\n");
exit(EXIT_FAILURE);
}
CK_COHORT_INIT(basic, cohorts + i, &global_lock, local_lock,
CK_COHORT_INIT(basic, &((cohorts + i)->cohort), &global_lock, local_lock,
CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
local_lock = NULL;
}
@ -200,7 +199,6 @@ main(int argc, char *argv[])
context[i].tid = i;
if (pthread_create(&threads[i], NULL, fairness, context + i)) {
ck_error("ERROR: Could not create thread %d\n", i);
exit(EXIT_FAILURE);
}
}
fprintf(stderr, "done\n");
@ -228,6 +226,6 @@ main(int argc, char *argv[])
printf("# average : %15" PRIu64 "\n", v);
printf("# deviation : %.2f (%.2f%%)\n\n", sqrt(d / nthr), (sqrt(d / nthr) / v) * 100.00);
return (0);
return 0;
}

@ -8,7 +8,7 @@ validate: validate.c ../../../include/ck_cohort.h
$(CC) $(CFLAGS) -o validate validate.c
check: all
./validate $(CORES) 1
./validate `expr $(CORES) / 2` 2 1
clean:
rm -rf *.dSYM *~ *.o $(OBJECTS)

Loading…
Cancel
Save