regressions: Benchmark coverage for latchlock operations.

ck_pring
Samy Al Bahra 11 years ago
parent 56de32fffd
commit 24d65432e0

@ -55,6 +55,19 @@ main(void)
e_b = rdtsc(); e_b = rdtsc();
printf(" WRITE: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS); printf(" WRITE: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
for (i = 0; i < STEPS; i++) {
ck_rwlock_write_latch(&rwlock);
ck_rwlock_write_unlatch(&rwlock);
}
s_b = rdtsc();
for (i = 0; i < STEPS; i++) {
ck_rwlock_write_latch(&rwlock);
ck_rwlock_write_unlatch(&rwlock);
}
e_b = rdtsc();
printf(" LATCHWRITE: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
#ifdef CK_F_PR_RTM #ifdef CK_F_PR_RTM
struct ck_elide_config config = CK_ELIDE_CONFIG_DEFAULT_INITIALIZER; struct ck_elide_config config = CK_ELIDE_CONFIG_DEFAULT_INITIALIZER;
struct ck_elide_stat st = CK_ELIDE_STAT_INITIALIZER; struct ck_elide_stat st = CK_ELIDE_STAT_INITIALIZER;
@ -99,6 +112,19 @@ main(void)
e_b = rdtsc(); e_b = rdtsc();
printf(" READ: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS); printf(" READ: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
for (i = 0; i < STEPS; i++) {
ck_rwlock_read_latchlock(&rwlock);
ck_rwlock_read_unlock(&rwlock);
}
s_b = rdtsc();
for (i = 0; i < STEPS; i++) {
ck_rwlock_read_latchlock(&rwlock);
ck_rwlock_read_unlock(&rwlock);
}
e_b = rdtsc();
printf(" LATCHREAD: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
#ifdef CK_F_PR_RTM #ifdef CK_F_PR_RTM
ck_elide_stat_init(&st); ck_elide_stat_init(&st);

@ -181,6 +181,71 @@ thread_lock(void *pun)
return NULL; return NULL;
} }
static void *
thread_lock_latch(void *pun)
{
uint64_t s_b, e_b, a, i;
uint64_t *value = pun;
if (aff_iterate(&affinity) != 0) {
perror("ERROR: Could not affine thread");
exit(EXIT_FAILURE);
}
ck_pr_inc_int(&barrier);
while (ck_pr_load_int(&barrier) != threads)
ck_pr_stall();
for (i = 1, a = 0;; i++) {
s_b = rdtsc();
ck_rwlock_read_latchlock(&rw.lock);
ck_rwlock_read_unlock(&rw.lock);
ck_rwlock_read_latchlock(&rw.lock);
ck_rwlock_read_unlock(&rw.lock);
ck_rwlock_read_latchlock(&rw.lock);
ck_rwlock_read_unlock(&rw.lock);
ck_rwlock_read_latchlock(&rw.lock);
ck_rwlock_read_unlock(&rw.lock);
ck_rwlock_read_latchlock(&rw.lock);
ck_rwlock_read_unlock(&rw.lock);
ck_rwlock_read_latchlock(&rw.lock);
ck_rwlock_read_unlock(&rw.lock);
ck_rwlock_read_latchlock(&rw.lock);
ck_rwlock_read_unlock(&rw.lock);
ck_rwlock_read_latchlock(&rw.lock);
ck_rwlock_read_unlock(&rw.lock);
ck_rwlock_read_latchlock(&rw.lock);
ck_rwlock_read_unlock(&rw.lock);
ck_rwlock_read_latchlock(&rw.lock);
ck_rwlock_read_unlock(&rw.lock);
ck_rwlock_read_latchlock(&rw.lock);
ck_rwlock_read_unlock(&rw.lock);
ck_rwlock_read_latchlock(&rw.lock);
ck_rwlock_read_unlock(&rw.lock);
ck_rwlock_read_latchlock(&rw.lock);
ck_rwlock_read_unlock(&rw.lock);
ck_rwlock_read_latchlock(&rw.lock);
ck_rwlock_read_unlock(&rw.lock);
ck_rwlock_read_latchlock(&rw.lock);
ck_rwlock_read_unlock(&rw.lock);
ck_rwlock_read_latchlock(&rw.lock);
ck_rwlock_read_unlock(&rw.lock);
e_b = rdtsc();
a += (e_b - s_b) >> 4;
if (ck_pr_load_uint(&flag) == 1)
break;
}
ck_pr_inc_int(&barrier);
while (ck_pr_load_int(&barrier) != threads * 2)
ck_pr_stall();
*value = (a / i);
return NULL;
}
static void static void
rwlock_test(pthread_t *p, int d, uint64_t *latency, void *(*f)(void *), const char *label) rwlock_test(pthread_t *p, int d, uint64_t *latency, void *(*f)(void *), const char *label)
{ {
@ -244,6 +309,7 @@ main(int argc, char *argv[])
d = atoi(argv[1]); d = atoi(argv[1]);
rwlock_test(p, d, latency, thread_lock, "rwlock"); rwlock_test(p, d, latency, thread_lock, "rwlock");
rwlock_test(p, d, latency, thread_lock_latch, "rwlock, latch");
#ifdef CK_F_PR_RTM #ifdef CK_F_PR_RTM
rwlock_test(p, d, latency, thread_lock_rtm, "rwlock, rtm"); rwlock_test(p, d, latency, thread_lock_rtm, "rwlock, rtm");

Loading…
Cancel
Save