ck_cohort: First-cut implementation of generalized cohort interface.

This work was contributed by Brendon Scheinman. This work is based
off "Lock Cohorting: A General Technique for Designing NUMA Locks".
ck_pring
Samy Al Bahra 12 years ago
parent 060b922dc2
commit f6a53fd9de

5
.gitignore vendored

@ -13,6 +13,11 @@ build/Makefile
*.a
*.so
*.dSYM
regressions/ck_cohort/benchmark/ck_cohort.LATENCY
regressions/ck_cohort/benchmark/ck_cohort.THROUGHPUT
regressions/ck_pflock/benchmark/latency
regressions/ck_pflock/benchmark/throughput
regressions/ck_pflock/validate/validate
regressions/ck_barrier/benchmark/throughput
regressions/ck_barrier/validate/barrier_centralized
regressions/ck_barrier/validate/barrier_combining

@ -0,0 +1,126 @@
/*
* Copyright 2013 Samy Al Bahra.
* Copyright 2013 Brendon Scheinman.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _CK_COHORT_H
#define _CK_COHORT_H
/*
* This is an implementation of lock cohorts as described in:
* Dice, D.; Marathe, V.; and Shavit, N. 2012.
* Lock Cohorting: A General Technique for Designing NUMA Locks
*/
#include <ck_cc.h>
#include <ck_pr.h>
#include <stddef.h>
enum ck_cohort_state {
CK_COHORT_STATE_GLOBAL = 0,
CK_COHORT_STATE_LOCAL = 1
};
#define CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT 10
#define CK_COHORT_NAME(N) ck_cohort_##N
#define CK_COHORT_INSTANCE(N) struct CK_COHORT_NAME(N)
#define CK_COHORT_INIT(N, C, GL, LL, P) ck_cohort_##N##_init(C, GL, LL, P)
#define CK_COHORT_LOCK(N, C, GC, LC) ck_cohort_##N##_lock(C, GC, LC)
#define CK_COHORT_UNLOCK(N, C, GC, LC) ck_cohort_##N##_unlock(C, GC, LC)
#define CK_COHORT_PROTOTYPE(N, GT, GL, GU, LT, LL, LU) \
CK_COHORT_INSTANCE(N) { \
GT *global_lock; \
LT *local_lock; \
enum ck_cohort_state release_state; \
unsigned int waiting_threads; \
unsigned int acquire_count; \
unsigned int local_pass_limit; \
}; \
\
CK_CC_INLINE static void \
ck_cohort_##N##_init(struct ck_cohort_##N *cohort, \
GT *global_lock, LT *local_lock, unsigned int pass_limit) \
{ \
cohort->global_lock = global_lock; \
cohort->local_lock = local_lock; \
cohort->release_state = CK_COHORT_STATE_GLOBAL; \
cohort->waiting_threads = 0; \
cohort->acquire_count = 0; \
cohort->local_pass_limit = pass_limit; \
ck_pr_barrier(); \
return; \
} \
\
CK_CC_INLINE static void \
ck_cohort_##N##_lock(CK_COHORT_INSTANCE(N) *cohort, \
void *global_context, void *local_context) \
{ \
\
ck_pr_inc_uint(&cohort->waiting_threads); \
LL(cohort->local_lock, local_context); \
ck_pr_dec_uint(&cohort->waiting_threads); \
\
if (cohort->release_state == CK_COHORT_STATE_GLOBAL) { \
GL(cohort->global_lock, global_context); \
} \
\
++cohort->acquire_count; \
return; \
} \
\
CK_CC_INLINE static void \
ck_cohort_##N##_unlock(CK_COHORT_INSTANCE(N) *cohort, \
void *global_context, void *local_context) \
{ \
\
if (ck_pr_load_uint(&cohort->waiting_threads) > 0 \
&& cohort->acquire_count < cohort->local_pass_limit) { \
cohort->release_state = CK_COHORT_STATE_LOCAL; \
} else { \
GU(cohort->global_lock, global_context); \
cohort->release_state = CK_COHORT_STATE_GLOBAL; \
cohort->acquire_count = 0; \
} \
\
ck_pr_fence_memory(); \
LU(cohort->local_lock, local_context); \
\
return; \
}
#define CK_COHORT_INITIALIZER { \
.global_lock = NULL, \
.local_lock = NULL, \
.release_state = CK_COHORT_STATE_GLOBAL, \
.waiting_threads = 0, \
.acquire_count = 0, \
.local_pass_limit = CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT \
}
#endif /* _CK_COHORT_H */

@ -4,6 +4,7 @@ DIR=backoff \
bitmap \
brlock \
bytelock \
cohort \
epoch \
fifo \
hp \
@ -21,6 +22,8 @@ DIR=backoff \
.PHONY: all clean check
all:
$(MAKE) -C ./ck_cohort/validate all
$(MAKE) -C ./ck_cohort/benchmark all
$(MAKE) -C ./ck_bitmap/validate all
$(MAKE) -C ./ck_backoff/validate all
$(MAKE) -C ./ck_queue/validate all
@ -58,6 +61,8 @@ clean:
$(MAKE) -C ./ck_backoff/validate clean
$(MAKE) -C ./ck_bitmap/validate clean
$(MAKE) -C ./ck_queue/validate clean
$(MAKE) -C ./ck_cohort/validate clean
$(MAKE) -C ./ck_cohort/benchmark clean
$(MAKE) -C ./ck_brlock/validate clean
$(MAKE) -C ./ck_ht/validate clean
$(MAKE) -C ./ck_ht/benchmark clean

@ -0,0 +1,17 @@
.PHONY: all clean
OBJECTS=ck_cohort.THROUGHPUT ck_cohort.LATENCY
all: $(OBJECTS)
ck_cohort.THROUGHPUT: ck_cohort.c
$(CC) $(CFLAGS) -o ck_cohort.THROUGHPUT throughput.c -lm
ck_cohort.LATENCY: ck_cohort.c
$(CC) -DLATENCY $(CFLAGS) -o ck_cohort.LATENCY ck_cohort.c
clean:
rm -rf *.dSYM $(OBJECTS)
include ../../../build/regressions.build
CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE -lm

@ -0,0 +1,8 @@
#include "../ck_cohort.h"
#include <ck_cohort.h>
#ifdef THROUGHPUT
#include "../../ck_spinlock/benchmark/throughput.h"
#elif defined(LATENCY)
#include "../../ck_spinlock/benchmark/latency.h"
#endif

@ -0,0 +1,231 @@
/*
* Copyright 2013 Samy Al Bahra.
* Copyright 2013 Brendon Scheinman.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <errno.h>
#include <inttypes.h>
#include <pthread.h>
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <unistd.h>
#include <sys/time.h>
#include <ck_pr.h>
#include <ck_cohort.h>
#include <ck_md.h>
#include <ck_spinlock.h>
#include "../../common.h"
#define max(x, y) (((x) > (y)) ? (x) : (y))
static struct affinity a;
static unsigned int ready;
struct counters {
uint64_t value;
} CK_CC_CACHELINE;
static struct counters *count;
static uint64_t nthr;
static unsigned int n_cohorts;
static unsigned int barrier;
static int critical CK_CC_CACHELINE;
static void
ck_spinlock_lock_with_context(ck_spinlock_t *lock, void *context)
{
(void)context;
ck_spinlock_lock(lock);
return;
}
static void
ck_spinlock_unlock_with_context(ck_spinlock_t *lock, void *context)
{
(void)context;
ck_spinlock_unlock(lock);
return;
}
CK_COHORT_PROTOTYPE(basic,
ck_spinlock_t, ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context,
ck_spinlock_t, ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context)
struct cohort_record {
CK_COHORT_INSTANCE(basic) cohort;
} CK_CC_CACHELINE;
static struct cohort_record *cohorts;
static ck_spinlock_t global_lock = CK_SPINLOCK_INITIALIZER;
struct block {
unsigned int tid;
};
static void *
fairness(void *null)
{
struct block *context = null;
unsigned int i = context->tid;
volatile int j;
long int base;
unsigned int core;
CK_COHORT_INSTANCE(basic) *cohort;
if (aff_iterate_core(&a, &core)) {
perror("ERROR: Could not affine thread");
exit(EXIT_FAILURE);
}
cohort = &((cohorts + (core / (int)(a.delta)) % n_cohorts)->cohort);
while (ck_pr_load_uint(&ready) == 0);
ck_pr_inc_uint(&barrier);
while (ck_pr_load_uint(&barrier) != nthr);
while (ready) {
CK_COHORT_LOCK(basic, cohort, NULL, NULL);
count[i].value++;
if (critical) {
base = common_lrand48() % critical;
for (j = 0; j < base; j++);
}
CK_COHORT_UNLOCK(basic, cohort, NULL, NULL);
}
return NULL;
}
int
main(int argc, char *argv[])
{
uint64_t v, d;
unsigned int i;
pthread_t *threads;
struct block *context;
ck_spinlock_t *local_lock;
if (argc != 5) {
ck_error("Usage: ck_cohort <number of cohorts> <threads per cohort> "
"<affinity delta> <critical section>\n");
}
n_cohorts = atoi(argv[1]);
if (n_cohorts <= 0) {
ck_error("ERROR: Number of cohorts must be greater than 0\n");
}
nthr = n_cohorts * atoi(argv[2]);
if (nthr <= 0) {
ck_error("ERROR: Number of threads must be greater than 0\n");
}
critical = atoi(argv[4]);
if (critical < 0) {
ck_error("ERROR: critical section cannot be negative\n");
}
threads = malloc(sizeof(pthread_t) * nthr);
if (threads == NULL) {
ck_error("ERROR: Could not allocate thread structures\n");
}
cohorts = malloc(sizeof(struct cohort_record) * n_cohorts);
if (cohorts == NULL) {
ck_error("ERROR: Could not allocate cohort structures\n");
}
context = malloc(sizeof(struct block) * nthr);
if (context == NULL) {
ck_error("ERROR: Could not allocate thread contexts\n");
}
a.delta = atoi(argv[2]);
a.request = 0;
count = malloc(sizeof(*count) * nthr);
if (count == NULL) {
ck_error("ERROR: Could not create acquisition buffer\n");
}
memset(count, 0, sizeof(*count) * nthr);
fprintf(stderr, "Creating cohorts...");
for (i = 0 ; i < n_cohorts ; i++) {
local_lock = malloc(max(CK_MD_CACHELINE, sizeof(ck_spinlock_t)));
if (local_lock == NULL) {
ck_error("ERROR: Could not allocate local lock\n");
}
CK_COHORT_INIT(basic, &((cohorts + i)->cohort), &global_lock, local_lock,
CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
local_lock = NULL;
}
fprintf(stderr, "done\n");
fprintf(stderr, "Creating threads (fairness)...");
for (i = 0; i < nthr; i++) {
context[i].tid = i;
if (pthread_create(&threads[i], NULL, fairness, context + i)) {
ck_error("ERROR: Could not create thread %d\n", i);
}
}
fprintf(stderr, "done\n");
ck_pr_store_uint(&ready, 1);
common_sleep(10);
ck_pr_store_uint(&ready, 0);
fprintf(stderr, "Waiting for threads to finish acquisition regression...");
for (i = 0; i < nthr; i++)
pthread_join(threads[i], NULL);
fprintf(stderr, "done\n\n");
for (i = 0, v = 0; i < nthr; i++) {
printf("%d %15" PRIu64 "\n", i, count[i].value);
v += count[i].value;
}
printf("\n# total : %15" PRIu64 "\n", v);
printf("# throughput : %15" PRIu64 " a/s\n", (v /= nthr) / 10);
for (i = 0, d = 0; i < nthr; i++)
d += (count[i].value - v) * (count[i].value - v);
printf("# average : %15" PRIu64 "\n", v);
printf("# deviation : %.2f (%.2f%%)\n\n", sqrt(d / nthr), (sqrt(d / nthr) / v) * 100.00);
return 0;
}

@ -0,0 +1,25 @@
#define LOCK_NAME "ck_cohort"
#define LOCK_DEFINE\
static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;\
static ck_spinlock_fas_t local_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;\
static void\
ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)\
{\
(void)context;\
ck_spinlock_fas_lock(lock);\
}\
\
static void\
ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)\
{\
(void)context;\
ck_spinlock_fas_unlock(lock);\
}\
CK_COHORT_PROTOTYPE(fas_fas,\
ck_spinlock_fas_t, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context,\
ck_spinlock_fas_t, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context)\
static CK_COHORT_INSTANCE(fas_fas) CK_CC_CACHELINE cohort = CK_COHORT_INITIALIZER
#define LOCK_INIT CK_COHORT_INIT(fas_fas, &cohort, &global_fas_lock, &local_fas_lock,\
CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT)
#define LOCK CK_COHORT_LOCK(fas_fas, &cohort, NULL, NULL)
#define UNLOCK CK_COHORT_UNLOCK(fas_fas, &cohort, NULL, NULL)

@ -0,0 +1,17 @@
.PHONY: check clean distribution
OBJECTS=validate
all: $(OBJECTS)
validate: validate.c ../../../include/ck_cohort.h
$(CC) $(CFLAGS) -o validate validate.c
check: all
./validate `expr $(CORES) / 2` 2 1
clean:
rm -rf *.dSYM *~ *.o $(OBJECTS)
include ../../../build/regressions.build
CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE

@ -0,0 +1,180 @@
/*
* Copyright 2013 Samy Al Bahra.
* Copyright 2013 Brendon Scheinman.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <errno.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <ck_pr.h>
#include <ck_cohort.h>
#include <ck_spinlock.h>
#include "../../common.h"
#ifndef ITERATE
#define ITERATE 1000000
#endif
static struct affinity a;
static unsigned int locked;
static int nthr;
static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;
static void
ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)
{
(void)context;
ck_spinlock_fas_lock(lock);
}
static void
ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)
{
(void)context;
ck_spinlock_fas_unlock(lock);
}
CK_COHORT_PROTOTYPE(fas_fas,
ck_spinlock_fas_t, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context,
ck_spinlock_fas_t, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context)
static CK_COHORT_INSTANCE(fas_fas) *cohorts;
static int n_cohorts;
static void *
thread(void *null CK_CC_UNUSED)
{
int i = ITERATE;
unsigned int l;
unsigned int core;
CK_COHORT_INSTANCE(fas_fas) *cohort;
if (aff_iterate_core(&a, &core)) {
perror("ERROR: Could not affine thread");
exit(EXIT_FAILURE);
}
cohort = cohorts + (core / (int)(a.delta)) % n_cohorts;
while (i--) {
CK_COHORT_LOCK(fas_fas, cohort, NULL, NULL);
{
l = ck_pr_load_uint(&locked);
if (l != 0) {
ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
}
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
ck_pr_inc_uint(&locked);
l = ck_pr_load_uint(&locked);
if (l != 8) {
ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
}
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
ck_pr_dec_uint(&locked);
l = ck_pr_load_uint(&locked);
if (l != 0) {
ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
}
}
CK_COHORT_UNLOCK(fas_fas, cohort, NULL, NULL);
}
return (NULL);
}
int
main(int argc, char *argv[])
{
pthread_t *threads;
int threads_per_cohort;
ck_spinlock_fas_t *local_lock;
int i;
if (argc != 4) {
ck_error("Usage: validate <number of cohorts> <threads per cohort> <affinity delta>\n");
}
n_cohorts = atoi(argv[1]);
if (n_cohorts <= 0) {
ck_error("ERROR: Number of cohorts must be greater than 0\n");
}
threads_per_cohort = atoi(argv[2]);
if (threads_per_cohort <= 0) {
ck_error("ERROR: Threads per cohort must be greater than 0\n");
}
nthr = n_cohorts * threads_per_cohort;
threads = malloc(sizeof(pthread_t) * nthr);
if (threads == NULL) {
ck_error("ERROR: Could not allocate thread structures\n");
}
a.delta = atoi(argv[3]);
fprintf(stderr, "Creating cohorts...");
cohorts = malloc(sizeof(CK_COHORT_INSTANCE(fas_fas)) * n_cohorts);
for (i = 0 ; i < n_cohorts ; i++) {
local_lock = malloc(sizeof(ck_spinlock_fas_t));
CK_COHORT_INIT(fas_fas, cohorts + i, &global_fas_lock, local_lock,
CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
}
fprintf(stderr, "done\n");
fprintf(stderr, "Creating threads...");
for (i = 0; i < nthr; i++) {
if (pthread_create(&threads[i], NULL, thread, NULL)) {
ck_error("ERROR: Could not create thread %d\n", i);
}
}
fprintf(stderr, "done\n");
fprintf(stderr, "Waiting for threads to finish correctness regression...");
for (i = 0; i < nthr; i++)
pthread_join(threads[i], NULL);
fprintf(stderr, "done (passed)\n");
return (0);
}

@ -229,6 +229,18 @@ aff_iterate(struct affinity *acb)
return sched_setaffinity(gettid(), sizeof(s), &s);
}
CK_CC_UNUSED static int
aff_iterate_core(struct affinity *acb, unsigned int *core)
{
cpu_set_t s;
*core = ck_pr_faa_uint(&acb->request, acb->delta);
CPU_ZERO(&s);
CPU_SET((*core) % CORES, &s);
return sched_setaffinity(gettid(), sizeof(s), &s);
}
#elif defined(__MACH__)
CK_CC_UNUSED static int
aff_iterate(struct affinity *acb)
@ -243,6 +255,19 @@ aff_iterate(struct affinity *acb)
(thread_policy_t)&policy,
THREAD_AFFINITY_POLICY_COUNT);
}
CK_CC_UNUSED static int
aff_iterate_core(struct affinity *acb, unsigned int *core)
{
thread_affinity_policy_data_t policy;
*core = ck_pr_faa_uint(&acb->request, acb->delta) % CORES;
policy.affinity_tag = *core;
return thread_policy_set(mach_thread_self(),
THREAD_AFFINITY_POLICY,
(thread_policy_t)&policy,
THREAD_AFFINITY_POLICY_COUNT);
}
#else
CK_CC_UNUSED static int
aff_iterate(struct affinity *acb CK_CC_UNUSED)
@ -250,6 +275,13 @@ aff_iterate(struct affinity *acb CK_CC_UNUSED)
return (0);
}
CK_CC_UNUSED static int
aff_iterate_core(struct affinity *acb CK_CC_UNUSED, unsigned int *core)
{
*core = 0;
return (0);
}
#endif
CK_CC_INLINE static uint64_t

Loading…
Cancel
Save