ck_hs: Lock-free set loosely modeled after ck_ht.

Currently only SPMC, but MPMC transformation is relatively trivial.
Documentation will come in a follow-up commit.
ck_pring
Samy Al Bahra 13 years ago
parent 01cd3a0256
commit 0f5e540afa

1
.gitignore vendored

@ -3,7 +3,6 @@ build/ck.pc
build/regressions.build
build/ck.spec
include/ck_md.h
Makefile
*.gz
*.o
*.a

@ -0,0 +1,104 @@
/*
* Copyright 2012 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _CK_HS_H
#define _CK_HS_H
#include <ck_pr.h>
#include <ck_cc.h>
#include <ck_malloc.h>
#include <ck_md.h>
#include <ck_stdint.h>
#include <stdbool.h>
#include <stddef.h>
struct ck_hs_hash {
unsigned long value;
};
typedef struct ck_hs_hash ck_hs_hash_t;
#define CK_HS_MODE_SPMC 1
#define CK_HS_MODE_DIRECT 2
#define CK_HS_MODE_OBJECT 8
/* Currently unsupported. */
#define CK_HS_MODE_MPMC (void)
/*
* Hash callback function.
*/
typedef ck_hs_hash_t ck_hs_hash_cb_t(const void *, unsigned long);
/*
* Returns pointer to object if objects are equivalent.
*/
typedef bool ck_hs_compare_cb_t(const void *, const void *);
#if defined(CK_MD_POINTER_PACK_ENABLE) && defined(CK_MD_VMA_BITS)
#define CK_HS_PP
#define CK_HS_KEY_MASK ((1U << ((sizeof(void *) * 8) - CK_MD_VMA_BITS)) - 1)
#endif
struct ck_hs_map;
struct ck_hs {
struct ck_malloc *m;
struct ck_hs_map *map;
unsigned int mode;
unsigned long seed;
ck_hs_hash_cb_t *hf;
ck_hs_compare_cb_t *compare;
};
typedef struct ck_hs ck_hs_t;
struct ck_hs_stat {
unsigned long tombstones;
unsigned long n_entries;
unsigned int probe_maximum;
};
struct ck_hs_iterator {
void **cursor;
unsigned long offset;
};
typedef struct ck_hs_iterator ck_hs_iterator_t;
#define CK_HS_ITERATOR_INITIALIZER { NULL, 0 }
void ck_hs_iterator_init(ck_hs_iterator_t *);
bool ck_hs_next(ck_hs_t *, ck_hs_iterator_t *, void **);
bool ck_hs_init(ck_hs_t *, unsigned int, ck_hs_hash_cb_t *, ck_hs_compare_cb_t *, struct ck_malloc *, unsigned long, unsigned long);
void ck_hs_destroy(ck_hs_t *);
void *ck_hs_get(ck_hs_t *, ck_hs_hash_t, const void *);
bool ck_hs_put(ck_hs_t *, ck_hs_hash_t, const void *);
bool ck_hs_set(ck_hs_t *, ck_hs_hash_t, const void *, void **);
void *ck_hs_remove(ck_hs_t *, ck_hs_hash_t, const void *);
bool ck_hs_grow(ck_hs_t *, unsigned long);
unsigned long ck_hs_count(ck_hs_t *);
bool ck_hs_reset(ck_hs_t *);
void ck_hs_stat(ck_hs_t *, struct ck_hs_stat *);
#endif /* _CK_HS_H */

@ -7,6 +7,7 @@ DIR=backoff \
epoch \
fifo \
hp \
hs \
ht \
pr \
queue \
@ -32,6 +33,8 @@ all:
$(MAKE) -C ./ck_fifo/benchmark all
$(MAKE) -C ./ck_pr/validate all
$(MAKE) -C ./ck_pr/benchmark all
$(MAKE) -C ./ck_hs/benchmark all
$(MAKE) -C ./ck_hs/validate all
$(MAKE) -C ./ck_barrier/validate all
$(MAKE) -C ./ck_barrier/benchmark all
$(MAKE) -C ./ck_bytelock/validate all
@ -55,6 +58,8 @@ clean:
$(MAKE) -C ./ck_brlock/validate clean
$(MAKE) -C ./ck_ht/validate clean
$(MAKE) -C ./ck_ht/benchmark clean
$(MAKE) -C ./ck_hs/validate clean
$(MAKE) -C ./ck_hs/benchmark clean
$(MAKE) -C ./ck_brlock/benchmark clean
$(MAKE) -C ./ck_spinlock/validate clean
$(MAKE) -C ./ck_spinlock/benchmark clean

@ -0,0 +1,563 @@
/*
* Copyrighs 2012 Samy Al Bahra.
* All righss reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyrighs
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyrighs
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <ck_hs.h>
#include "../../../src/ck_ht_hash.h"
#include <assert.h>
#include <ck_epoch.h>
#include <ck_malloc.h>
#include <ck_pr.h>
#include <errno.h>
#include <inttypes.h>
#include <pthread.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include "../../common.h"
static ck_hs_t hs CK_CC_CACHELINE;
static char **keys;
static size_t keys_length = 0;
static size_t keys_capacity = 128;
static ck_epoch_t epoch_hs;
static ck_epoch_record_t epoch_wr;
static int n_threads;
static bool next_stage;
enum state {
HS_STATE_STOP = 0,
HS_STATE_GET,
HS_STATE_STRICT_REPLACEMENT,
HS_STATE_DELETION,
HS_STATE_REPLACEMENT,
HS_STATE_COUNT
};
static struct affinity affinerator = AFFINITY_INITIALIZER;
static uint64_t accumulator[HS_STATE_COUNT];
static int barrier[HS_STATE_COUNT];
static int state;
struct hs_epoch {
ck_epoch_entry_t epoch_entry;
};
static void
alarm_handler(int s)
{
(void)s;
next_stage = true;
return;
}
static ck_hs_hash_t
hs_hash(const void *object, unsigned long seed)
{
const char *c = object;
ck_hs_hash_t h;
h.value = (unsigned long)MurmurHash64A(c, strlen(c), seed);
return h;
}
static bool
hs_compare(const void *previous, const void *compare)
{
return strcmp(previous, compare) == 0;
}
static void
hs_destroy(ck_epoch_entry_t *e)
{
free(e);
return;
}
static void *
hs_malloc(size_t r)
{
ck_epoch_entry_t *b;
b = malloc(sizeof(*b) + r);
return b + 1;
}
static void
hs_free(void *p, size_t b, bool r)
{
struct hs_epoch *e = p;
(void)b;
if (r == true) {
/* Destruction requires safe memory reclamation. */
ck_epoch_call(&epoch_hs, &epoch_wr, &(--e)->epoch_entry, hs_destroy);
} else {
free(--e);
}
return;
}
static struct ck_malloc my_allocator = {
.malloc = hs_malloc,
.free = hs_free
};
static void
set_init(void)
{
ck_epoch_init(&epoch_hs);
ck_epoch_register(&epoch_hs, &epoch_wr);
srand48((long int)time(NULL));
if (ck_hs_init(&hs, CK_HS_MODE_OBJECT | CK_HS_MODE_SPMC, hs_hash, hs_compare, &my_allocator, 65536, lrand48()) == false) {
perror("ck_hs_init");
exit(EXIT_FAILURE);
}
return;
}
static bool
set_remove(const char *value)
{
ck_hs_hash_t h;
h.value = (unsigned long)MurmurHash64A(value, strlen(value), hs.seed);
return (bool)ck_hs_remove(&hs, h, value);
}
static bool
set_replace(const char *value)
{
ck_hs_hash_t h;
void *previous;
h.value = (unsigned long)MurmurHash64A(value, strlen(value), hs.seed);
return ck_hs_set(&hs, h, value, &previous);
}
static void *
set_get(const char *value)
{
ck_hs_hash_t h;
void *v;
h.value = (unsigned long)MurmurHash64A(value, strlen(value), hs.seed);
v = ck_hs_get(&hs, h, value);
return v;
}
static bool
set_insert(const char *value)
{
ck_hs_hash_t h;
h.value = (unsigned long)MurmurHash64A(value, strlen(value), hs.seed);
return ck_hs_put(&hs, h, value);
}
static size_t
set_count(void)
{
return ck_hs_count(&hs);
}
static bool
set_reset(void)
{
return ck_hs_reset(&hs);
}
static void *
reader(void *unused)
{
size_t i;
ck_epoch_record_t epoch_record;
int state_previous = HS_STATE_STOP;
int n_state;
uint64_t s, j, a;
(void)unused;
if (aff_iterate(&affinerator) != 0)
perror("WARNING: Failed to affine thread");
s = j = a = 0;
ck_epoch_register(&epoch_hs, &epoch_record);
for (;;) {
j++;
ck_epoch_begin(&epoch_hs, &epoch_record);
s = rdtsc();
for (i = 0; i < keys_length; i++) {
char *r;
r = set_get(keys[i]);
if (r == NULL) {
if (n_state == HS_STATE_STRICT_REPLACEMENT) {
fprintf(stderr, "ERROR: Did not find during replacement: %s\n", keys[i]);
exit(EXIT_FAILURE);
}
continue;
}
if (strcmp(r, keys[i]) == 0)
continue;
fprintf(stderr, "ERROR: Found invalid value: [%s] but expected [%s]\n", r, keys[i]);
exit(EXIT_FAILURE);
}
a += rdtsc() - s;
ck_epoch_end(&epoch_hs, &epoch_record);
n_state = ck_pr_load_int(&state);
if (n_state != state_previous) {
ck_pr_add_64(&accumulator[state_previous], a / (j * keys_length));
ck_pr_inc_int(&barrier[state_previous]);
while (ck_pr_load_int(&barrier[state_previous]) != n_threads + 1)
ck_pr_stall();
state_previous = n_state;
s = j = a = 0;
}
}
return NULL;
}
int
main(int argc, char *argv[])
{
FILE *fp;
char buffer[512];
size_t i, j, r;
unsigned int d = 0;
uint64_t s, e, a, repeated;
char **t;
pthread_t *readers;
double p_r, p_d;
r = 20;
s = 8;
p_d = 0.5;
p_r = 0.5;
n_threads = CORES - 1;
if (argc < 2) {
fprintf(stderr, "Usage: parallel <dictionary> [<interval length> <initial size> <readers>\n"
" <probability of replacement> <probability of deletion> <epoch threshold>]\n");
exit(EXIT_FAILURE);
}
if (argc >= 3)
r = atoi(argv[2]);
if (argc >= 4)
s = (uint64_t)atoi(argv[3]);
if (argc >= 5) {
n_threads = atoi(argv[4]);
if (n_threads < 1) {
fprintf(stderr, "ERROR: Number of readers must be >= 1.\n");
exit(EXIT_FAILURE);
}
}
if (argc >= 6) {
p_r = atof(argv[5]) / 100.00;
if (p_r < 0) {
fprintf(stderr, "ERROR: Probability of replacement must be >= 0 and <= 100.\n");
exit(EXIT_FAILURE);
}
}
if (argc >= 7) {
p_d = atof(argv[6]) / 100.00;
if (p_d < 0) {
fprintf(stderr, "ERROR: Probability of deletion must be >= 0 and <= 100.\n");
exit(EXIT_FAILURE);
}
}
affinerator.delta = 1;
readers = malloc(sizeof(pthread_t) * n_threads);
assert(readers != NULL);
keys = malloc(sizeof(char *) * keys_capacity);
assert(keys != NULL);
fp = fopen(argv[1], "r");
assert(fp != NULL);
while (fgets(buffer, sizeof(buffer), fp) != NULL) {
buffer[strlen(buffer) - 1] = '\0';
keys[keys_length++] = strdup(buffer);
assert(keys[keys_length - 1] != NULL);
if (keys_length == keys_capacity) {
t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
assert(t != NULL);
keys = t;
}
}
t = realloc(keys, sizeof(char *) * keys_length);
assert(t != NULL);
keys = t;
set_init();
for (i = 0; i < (size_t)n_threads; i++) {
if (pthread_create(&readers[i], NULL, reader, NULL) != 0) {
fprintf(stderr, "ERROR: Failed to create thread %zu.\n", i);
exit(EXIT_FAILURE);
}
}
for (i = 0; i < keys_length; i++)
d += set_insert(keys[i]) == false;
fprintf(stderr, " [S] %d readers, 1 writer.\n", n_threads);
fprintf(stderr, " [S] %zu entries stored and %u duplicates.\n\n",
set_count(), d);
fprintf(stderr, " ,- BASIC TEST\n");
fprintf(stderr, " | Executing SMR test...");
a = 0;
for (j = 0; j < r; j++) {
if (set_reset() == false) {
fprintf(stderr, "ERROR: Failed to reset hash table.\n");
exit(EXIT_FAILURE);
}
s = rdtsc();
for (i = 0; i < keys_length; i++)
d += set_insert(keys[i]) == false;
e = rdtsc();
a += e - s;
}
fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
fprintf(stderr, " | Executing replacement test...");
a = 0;
for (j = 0; j < r; j++) {
s = rdtsc();
for (i = 0; i < keys_length; i++)
set_replace(keys[i]);
e = rdtsc();
a += e - s;
}
fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
fprintf(stderr, " | Executing get test...");
a = 0;
for (j = 0; j < r; j++) {
s = rdtsc();
for (i = 0; i < keys_length; i++) {
if (set_get(keys[i]) == NULL) {
fprintf(stderr, "ERROR: Unexpected NULL value.\n");
exit(EXIT_FAILURE);
}
}
e = rdtsc();
a += e - s;
}
fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
a = 0;
fprintf(stderr, " | Executing removal test...");
for (j = 0; j < r; j++) {
s = rdtsc();
for (i = 0; i < keys_length; i++)
set_remove(keys[i]);
e = rdtsc();
a += e - s;
for (i = 0; i < keys_length; i++)
set_insert(keys[i]);
}
fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
fprintf(stderr, " | Executing negative look-up test...");
a = 0;
for (j = 0; j < r; j++) {
s = rdtsc();
for (i = 0; i < keys_length; i++) {
set_get("\x50\x03\x04\x05\x06\x10");
}
e = rdtsc();
a += e - s;
}
fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
ck_epoch_record_t epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_hs, &epoch_wr);
fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
"%u pending, %u peak, %lu reclamations\n\n",
epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
fprintf(stderr, " ,- READER CONCURRENCY\n");
fprintf(stderr, " | Executing reader test...");
ck_pr_store_int(&state, HS_STATE_GET);
while (ck_pr_load_int(&barrier[HS_STATE_STOP]) != n_threads)
ck_pr_stall();
ck_pr_inc_int(&barrier[HS_STATE_STOP]);
sleep(r);
ck_pr_store_int(&state, HS_STATE_STRICT_REPLACEMENT);
while (ck_pr_load_int(&barrier[HS_STATE_GET]) != n_threads)
ck_pr_stall();
fprintf(stderr, "done (reader = %" PRIu64 " ticks)\n",
accumulator[HS_STATE_GET] / n_threads);
fprintf(stderr, " | Executing strict replacement test...");
a = repeated = 0;
signal(SIGALRM, alarm_handler);
alarm(r);
ck_pr_inc_int(&barrier[HS_STATE_GET]);
for (;;) {
repeated++;
s = rdtsc();
for (i = 0; i < keys_length; i++)
set_replace(keys[i]);
e = rdtsc();
a += e - s;
if (next_stage == true) {
next_stage = false;
break;
}
}
ck_pr_store_int(&state, HS_STATE_DELETION);
while (ck_pr_load_int(&barrier[HS_STATE_STRICT_REPLACEMENT]) != n_threads)
ck_pr_stall();
set_reset();
ck_epoch_synchronize(&epoch_hs, &epoch_wr);
fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
a / (repeated * keys_length), accumulator[HS_STATE_STRICT_REPLACEMENT] / n_threads);
signal(SIGALRM, alarm_handler);
alarm(r);
fprintf(stderr, " | Executing deletion test (%.2f)...", p_d * 100);
a = repeated = 0;
ck_pr_inc_int(&barrier[HS_STATE_STRICT_REPLACEMENT]);
for (;;) {
double delete;
repeated++;
s = rdtsc();
for (i = 0; i < keys_length; i++) {
set_insert(keys[i]);
if (p_d != 0.0) {
delete = drand48();
if (delete <= p_d)
set_remove(keys[i]);
}
}
e = rdtsc();
a += e - s;
if (next_stage == true) {
next_stage = false;
break;
}
}
ck_pr_store_int(&state, HS_STATE_REPLACEMENT);
while (ck_pr_load_int(&barrier[HS_STATE_DELETION]) != n_threads)
ck_pr_stall();
set_reset();
ck_epoch_synchronize(&epoch_hs, &epoch_wr);
fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
a / (repeated * keys_length), accumulator[HS_STATE_DELETION] / n_threads);
signal(SIGALRM, alarm_handler);
alarm(r);
fprintf(stderr, " | Executing replacement test (%.2f)...", p_r * 100);
a = repeated = 0;
ck_pr_inc_int(&barrier[HS_STATE_DELETION]);
for (;;) {
double delete, replace;
repeated++;
s = rdtsc();
for (i = 0; i < keys_length; i++) {
set_insert(keys[i]);
if (p_d != 0.0) {
delete = drand48();
if (delete <= p_d)
set_remove(keys[i]);
}
if (p_r != 0.0) {
replace = drand48();
if (replace <= p_r)
set_replace(keys[i]);
}
}
e = rdtsc();
a += e - s;
if (next_stage == true) {
next_stage = false;
break;
}
}
ck_pr_store_int(&state, HS_STATE_STOP);
while (ck_pr_load_int(&barrier[HS_STATE_REPLACEMENT]) != n_threads)
ck_pr_stall();
set_reset();
ck_epoch_synchronize(&epoch_hs, &epoch_wr);
fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
a / (repeated * keys_length), accumulator[HS_STATE_REPLACEMENT] / n_threads);
ck_pr_inc_int(&barrier[HS_STATE_REPLACEMENT]);
epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_hs, &epoch_wr);
fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
"%u pending, %u peak, %lu reclamations\n\n",
epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
return 0;
}

@ -0,0 +1,375 @@
/*
* Copyrighs 2012 Samy Al Bahra.
* All righss reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyrighs
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyrighs
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <ck_hs.h>
#include <assert.h>
#include <ck_malloc.h>
#include <errno.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "../../common.h"
#include "../../../src/ck_ht_hash.h"
static ck_hs_t hs;
static char **keys;
static size_t keys_length = 0;
static size_t keys_capacity = 128;
static void *
hs_malloc(size_t r)
{
return malloc(r);
}
static void
hs_free(void *p, size_t b, bool r)
{
(void)b;
(void)r;
free(p);
return;
}
static struct ck_malloc my_allocator = {
.malloc = hs_malloc,
.free = hs_free
};
static ck_hs_hash_t
hs_hash(const void *object, unsigned long seed)
{
const char *c = object;
ck_hs_hash_t h;
h.value = (unsigned long)MurmurHash64A(c, strlen(c), seed);
return h;
}
static bool
hs_compare(const void *previous, const void *compare)
{
return strcmp(previous, compare) == 0;
}
static void
set_init(void)
{
srand48((long int)time(NULL));
if (ck_hs_init(&hs, CK_HS_MODE_OBJECT | CK_HS_MODE_SPMC, hs_hash, hs_compare, &my_allocator, 8, lrand48()) == false) {
perror("ck_hs_init");
exit(EXIT_FAILURE);
}
return;
}
static bool
set_remove(const char *value)
{
ck_hs_hash_t h;
h.value = (unsigned long)MurmurHash64A(value, strlen(value), hs.seed);
ck_hs_remove(&hs, h, value);
return true;
}
static bool
set_replace(const char *value)
{
ck_hs_hash_t h;
void *previous;
h.value = (unsigned long)MurmurHash64A(value, strlen(value), hs.seed);
return ck_hs_set(&hs, h, value, &previous);
}
static void *
set_get(const char *value)
{
ck_hs_hash_t h;
void *v;
h.value = (unsigned long)MurmurHash64A(value, strlen(value), hs.seed);
v = ck_hs_get(&hs, h, value);
return v;
}
static bool
set_insert(const char *value)
{
ck_hs_hash_t h;
h.value = (unsigned long)MurmurHash64A(value, strlen(value), hs.seed);
return ck_hs_put(&hs, h, value);
}
static size_t
set_count(void)
{
return ck_hs_count(&hs);
}
static bool
set_reset(void)
{
return ck_hs_reset(&hs);
}
static void
keys_shuffle(char **k)
{
size_t i, j;
char *t;
for (i = keys_length; i > 1; i--) {
j = rand() % (i - 1);
if (j != i - 1) {
t = k[i - 1];
k[i - 1] = k[j];
k[j] = t;
}
}
return;
}
int
main(int argc, char *argv[])
{
FILE *fp;
char buffer[512];
size_t i, j, r;
unsigned int d = 0;
uint64_t s, e, a, ri, si, ai, sr, rg, sg, ag, sd, ng;
struct ck_hs_stat st;
char **t;
r = 20;
s = 8;
srand(time(NULL));
if (argc < 2) {
fprintf(stderr, "Usage: ck_hs <dictionary> [<repetitions> <initial size>]\n");
exit(EXIT_FAILURE);
}
if (argc >= 3)
r = atoi(argv[2]);
if (argc >= 4)
s = (uint64_t)atoi(argv[3]);
keys = malloc(sizeof(char *) * keys_capacity);
assert(keys != NULL);
fp = fopen(argv[1], "r");
assert(fp != NULL);
while (fgets(buffer, sizeof(buffer), fp) != NULL) {
buffer[strlen(buffer) - 1] = '\0';
keys[keys_length++] = strdup(buffer);
assert(keys[keys_length - 1] != NULL);
if (keys_length == keys_capacity) {
t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
assert(t != NULL);
keys = t;
}
}
t = realloc(keys, sizeof(char *) * keys_length);
assert(t != NULL);
keys = t;
set_init();
for (i = 0; i < keys_length; i++)
d += set_insert(keys[i]) == false;
ck_hs_stat(&hs, &st);
fprintf(stderr, "# %zu entries stored, %u duplicates, %u probe.\n",
set_count(), d, st.probe_maximum);
fprintf(stderr, "# reverse_insertion serial_insertion random_insertion serial_replace reverse_get serial_get random_get serial_remove negative_get\n\n");
a = 0;
for (j = 0; j < r; j++) {
if (set_reset() == false) {
fprintf(stderr, "ERROR: Failed to reset hash table.\n");
exit(EXIT_FAILURE);
}
s = rdtsc();
for (i = keys_length; i > 0; i--)
d += set_insert(keys[i - 1]) == false;
e = rdtsc();
a += e - s;
}
ri = a / (r * keys_length);
a = 0;
for (j = 0; j < r; j++) {
if (set_reset() == false) {
fprintf(stderr, "ERROR: Failed to reset hash table.\n");
exit(EXIT_FAILURE);
}
s = rdtsc();
for (i = 0; i < keys_length; i++)
d += set_insert(keys[i]) == false;
e = rdtsc();
a += e - s;
}
si = a / (r * keys_length);
a = 0;
for (j = 0; j < r; j++) {
keys_shuffle(keys);
if (set_reset() == false) {
fprintf(stderr, "ERROR: Failed to reset hash table.\n");
exit(EXIT_FAILURE);
}
s = rdtsc();
for (i = 0; i < keys_length; i++)
d += set_insert(keys[i]) == false;
e = rdtsc();
a += e - s;
}
ai = a / (r * keys_length);
a = 0;
for (j = 0; j < r; j++) {
s = rdtsc();
for (i = 0; i < keys_length; i++)
set_replace(keys[i]);
e = rdtsc();
a += e - s;
}
sr = a / (r * keys_length);
set_reset();
for (i = 0; i < keys_length; i++)
set_insert(keys[i]);
a = 0;
for (j = 0; j < r; j++) {
s = rdtsc();
for (i = keys_length; i > 0; i--) {
if (set_get(keys[i - 1]) == NULL) {
fprintf(stderr, "ERROR: Unexpected NULL value.\n");
exit(EXIT_FAILURE);
}
}
e = rdtsc();
a += e - s;
}
rg = a / (r * keys_length);
a = 0;
for (j = 0; j < r; j++) {
s = rdtsc();
for (i = 0; i < keys_length; i++) {
if (set_get(keys[i]) == NULL) {
fprintf(stderr, "ERROR: Unexpected NULL value.\n");
exit(EXIT_FAILURE);
}
}
e = rdtsc();
a += e - s;
}
sg = a / (r * keys_length);
a = 0;
for (j = 0; j < r; j++) {
keys_shuffle(keys);
s = rdtsc();
for (i = 0; i < keys_length; i++) {
if (set_get(keys[i]) == NULL) {
fprintf(stderr, "ERROR: Unexpected NULL value.\n");
exit(EXIT_FAILURE);
}
}
e = rdtsc();
a += e - s;
}
ag = a / (r * keys_length);
a = 0;
for (j = 0; j < r; j++) {
s = rdtsc();
for (i = 0; i < keys_length; i++)
set_remove(keys[i]);
e = rdtsc();
a += e - s;
for (i = 0; i < keys_length; i++)
set_insert(keys[i]);
}
sd = a / (r * keys_length);
a = 0;
for (j = 0; j < r; j++) {
s = rdtsc();
for (i = 0; i < keys_length; i++) {
set_get("\x50\x03\x04\x05\x06\x10");
}
e = rdtsc();
a += e - s;
}
ng = a / (r * keys_length);
printf("%zu "
"%" PRIu64 " "
"%" PRIu64 " "
"%" PRIu64 " "
"%" PRIu64 " "
"%" PRIu64 " "
"%" PRIu64 " "
"%" PRIu64 " "
"%" PRIu64 " "
"%" PRIu64 "\n",
keys_length, ri, si, ai, sr, rg, sg, ag, sd, ng);
return 0;
}

@ -0,0 +1,17 @@
.PHONY: check clean distribution
OBJECTS=serial
all: $(OBJECTS)
serial: serial.c ../../../include/ck_hs.h ../../../src/ck_hs.c
$(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_hs.c
check: all
./serial
clean:
rm -rf *~ *.o $(OBJECTS) *.dSYM
include ../../../build/regressions.build
CFLAGS+=-D_GNU_SOURCE

@ -0,0 +1,182 @@
/*
* Copyrighs 2012 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyrighs
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyrighs
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <ck_hs.h>
#include <assert.h>
#include <ck_malloc.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
static void *
hs_malloc(size_t r)
{
return malloc(r);
}
static void
hs_free(void *p, size_t b, bool r)
{
(void)b;
(void)r;
free(p);
return;
}
static struct ck_malloc my_allocator = {
.malloc = hs_malloc,
.free = hs_free
};
const char *test[] = {"Samy", "Al", "Bahra", "dances", "in", "the", "wind.", "Once",
"upon", "a", "time", "his", "gypsy", "ate", "one", "itsy",
"bitsy", "spider.", "What", "goes", "up", "must",
"come", "down.", "What", "is", "down", "stays",
"down.", "A", "B", "C", "D", "E", "F", "G", "H",
"I", "J", "K", "L", "M", "N", "O"};
const char *negative = "negative";
static ck_hs_hash_t
hs_hash(const void *object, unsigned long seed)
{
const char *c = object;
ck_hs_hash_t h;
(void)seed;
h.value = c[0];
return h;
}
static bool
hs_compare(const void *previous, const void *compare)
{
return strcmp(previous, compare) == 0;
}
int
main(void)
{
ck_hs_t hs;
ck_hs_hash_t h;
size_t i;
if (ck_hs_init(&hs, CK_HS_MODE_SPMC | CK_HS_MODE_OBJECT, hs_hash, hs_compare, &my_allocator, 8, 6602834) == false) {
perror("ck_hs_init");
exit(EXIT_FAILURE);
}
/* Test serial put semantics. */
for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
h.value = test[i][0];
ck_hs_put(&hs, h, test[i]);
if (ck_hs_put(&hs, h, test[i]) == true) {
fprintf(stderr, "ERROR [1]: put must fail on collision.\n");
exit(EXIT_FAILURE);
}
}
/* Test grow semantics. */
ck_hs_grow(&hs, 32);
for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
h.value = test[i][0];
if (ck_hs_put(&hs, h, test[i]) == true) {
fprintf(stderr, "ERROR [2]: put must fail on collision.\n");
exit(EXIT_FAILURE);
}
if (ck_hs_get(&hs, h, test[i]) == NULL) {
fprintf(stderr, "ERROR: get must not fail\n");
exit(EXIT_FAILURE);
}
}
/* Grow set and check get semantics. */
ck_hs_grow(&hs, 128);
for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
h.value = test[i][0];
if (ck_hs_get(&hs, h, test[i]) == NULL) {
fprintf(stderr, "ERROR: get must not fail\n");
exit(EXIT_FAILURE);
}
}
/* Delete and check negative membership. */
for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
void *r;
h.value = test[i][0];
if (ck_hs_get(&hs, h, test[i]) == NULL)
continue;
if (r = ck_hs_remove(&hs, h, test[i]), r == NULL) {
fprintf(stderr, "ERROR: remove must not fail\n");
exit(EXIT_FAILURE);
}
if (strcmp(r, test[i]) != 0) {
fprintf(stderr, "ERROR: Removed incorrect node (%s != %s)\n", r, test[i]);
exit(EXIT_FAILURE);
}
}
/* Test replacement semantics. */
for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
void *r;
bool d;
h.value = test[i][0];
d = ck_hs_get(&hs, h, test[i]) != NULL;
if (ck_hs_set(&hs, h, test[i], &r) == false) {
fprintf(stderr, "ERROR: Failed to set\n");
exit(EXIT_FAILURE);
}
/* Expected replacement. */
if (d == true && (r == NULL || strcmp(r, test[i]) != 0)) {
fprintf(stderr, "ERROR: Incorrect previous value: %s != %s\n",
test[i], r);
}
if (ck_hs_set(&hs, h, test[i], &r) == false) {
fprintf(stderr, "ERROR: Failed to set [1]\n");
exit(EXIT_FAILURE);
}
if (strcmp(r, test[i]) != 0) {
fprintf(stderr, "ERROR: Invalid pointer: %s != %s\n", r, test[i]);
exit(EXIT_FAILURE);
}
}
return 0;
}

@ -8,7 +8,8 @@ OBJECTS=ck_barrier_centralized.o \
ck_epoch.o \
ck_ht.o \
ck_hp.o \
ck_bag.o
ck_bag.o \
ck_hs.o
all: libck.so libck.a
@ -24,6 +25,9 @@ ck_bag.o: ../include/ck_bag.h ck_bag.c
ck_epoch.o: ../include/ck_epoch.h ck_epoch.c
$(CC) $(CFLAGS) -c -o ck_epoch.o ck_epoch.c
ck_hs.o: ../include/ck_hs.h ck_hs.c
$(CC) $(CFLAGS) -c -o ck_hs.o ck_hs.c
ck_ht.o: ../include/ck_ht.h ck_ht.c
$(CC) $(CFLAGS) -c -o ck_ht.o ck_ht.c

@ -0,0 +1,537 @@
/*
* Copyright 2012 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <ck_cc.h>
#include <ck_hs.h>
#include <ck_limits.h>
#include <ck_md.h>
#include <ck_pr.h>
#include <ck_stdint.h>
#include <stdbool.h>
#include <string.h>
#include "ck_internal.h"
#define CK_HS_PROBE_L1_SHIFT 3ULL
#define CK_HS_PROBE_L1 (1 << CK_HS_PROBE_L1_SHIFT)
#define CK_HS_PROBE_L1_MASK (CK_HS_PROBE_L1 - 1)
#ifndef CK_HS_PROBE_L1_DEFAULT
#define CK_HS_PROBE_L1_DEFAULT CK_MD_CACHELINE
#endif
#define CK_HS_EMPTY NULL
#define CK_HS_TOMBSTONE ((void *)~(uintptr_t)0)
#define CK_HS_G (2)
#define CK_HS_G_MASK (CK_HS_G - 1)
struct ck_hs_map {
unsigned int generation[CK_HS_G];
unsigned int tombstones;
unsigned int probe_maximum;
unsigned int probe_limit;
unsigned long n_entries;
unsigned long mask;
unsigned long capacity;
unsigned long step;
unsigned long size;
void **entries;
};
void
ck_hs_iterator_init(struct ck_hs_iterator *iterator)
{
iterator->cursor = NULL;
iterator->offset = 0;
return;
}
bool
ck_hs_next(struct ck_hs *hs, struct ck_hs_iterator *i, void **key)
{
struct ck_hs_map *map = hs->map;
if (i->offset >= map->capacity)
return false;
do {
*key = map->entries[i->offset];
if (key != CK_HS_EMPTY && key != CK_HS_TOMBSTONE) {
i->offset++;
return true;
}
} while (++i->offset < map->capacity);
return false;
}
void
ck_hs_stat(struct ck_hs *hs, struct ck_hs_stat *st)
{
struct ck_hs_map *map = hs->map;
st->n_entries = map->n_entries;
st->tombstones = map->tombstones;
st->probe_maximum = map->probe_maximum;
return;
}
unsigned long
ck_hs_count(struct ck_hs *hs)
{
return hs->map->n_entries;
}
static void
ck_hs_map_destroy(struct ck_malloc *m, struct ck_hs_map *map, bool defer)
{
m->free(map, map->size, defer);
return;
}
static struct ck_hs_map *
ck_hs_map_create(struct ck_hs *hs, unsigned long entries)
{
struct ck_hs_map *map;
unsigned long size, n_entries, limit;
n_entries = ck_internal_power_2(entries);
size = sizeof(struct ck_hs_map) + (sizeof(void *) * n_entries + CK_MD_CACHELINE - 1);
map = hs->m->malloc(size);
if (map == NULL)
return NULL;
map->size = size;
/* We should probably use a more intelligent heuristic for default probe length. */
limit = ck_internal_max(n_entries >> (CK_HS_PROBE_L1_SHIFT + 2), CK_HS_PROBE_L1_DEFAULT);
if (limit > UINT_MAX)
limit = UINT_MAX;
map->probe_limit = (unsigned int)limit;
map->probe_maximum = 0;
map->capacity = n_entries;
map->step = ck_internal_bsf(n_entries);
map->mask = n_entries - 1;
map->n_entries = 0;
/* Align map allocation to cache line. */
map->entries = (void *)((uintptr_t)(map + 1) + (CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
memset(map->entries, 0, sizeof(void *) * n_entries);
memset(map->generation, 0, sizeof map->generation);
/* Commit entries purge with respect to map publication. */
ck_pr_fence_store();
return map;
}
bool
ck_hs_reset(struct ck_hs *hs)
{
struct ck_hs_map *map, *previous;
previous = hs->map;
map = ck_hs_map_create(hs, previous->capacity);
if (map == NULL)
return false;
ck_pr_store_ptr(&hs->map, map);
ck_hs_map_destroy(hs->m, previous, true);
return true;
}
static inline unsigned long
ck_hs_map_probe_next(struct ck_hs_map *map, unsigned long offset, ck_hs_hash_t h, unsigned long level, unsigned long probes)
{
ck_hs_hash_t r;
unsigned long stride;
(void)probes;
(void)level;
r.value = h.value >> map->step;
stride = (r.value & ~CK_HS_PROBE_L1_MASK) << 1 | (r.value & CK_HS_PROBE_L1_MASK);
return (offset + (stride | CK_HS_PROBE_L1)) & map->mask;
}
bool
ck_hs_grow(struct ck_hs *hs,
unsigned long capacity)
{
struct ck_hs_map *map, *update;
void **bucket, *previous;
unsigned long k, i, j, offset, probes;
restart:
map = hs->map;
if (map->capacity > capacity)
return false;
update = ck_hs_map_create(hs, capacity);
if (update == NULL)
return false;
for (k = 0; k < map->capacity; k++) {
struct ck_hs_hash h;
previous = map->entries[k];
if (previous == CK_HS_EMPTY || previous == CK_HS_TOMBSTONE)
continue;
#ifdef CK_HS_PP
if (hs->mode & CK_HS_MODE_OBJECT)
previous = (void *)((uintptr_t)previous & (((uintptr_t)1 << CK_MD_VMA_BITS) - 1));
#endif
h = hs->hf(previous, hs->seed);
offset = h.value & update->mask;
probes = 0;
for (i = 0; i < update->probe_limit; i++) {
bucket = (void *)((uintptr_t)&update->entries[offset] & ~(CK_MD_CACHELINE - 1));
for (j = 0; j < CK_HS_PROBE_L1; j++) {
void **cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1));
probes++;
if (*cursor == CK_HS_EMPTY) {
*cursor = previous;
update->n_entries++;
if (probes > update->probe_maximum)
update->probe_maximum = probes;
break;
}
}
if (j < CK_HS_PROBE_L1)
break;
offset = ck_hs_map_probe_next(update, offset, h, i, probes);
}
if (i == update->probe_limit) {
/*
* We have hit the probe limit, map needs to be even larger.
*/
ck_hs_map_destroy(hs->m, update, false);
capacity <<= 1;
goto restart;
}
}
ck_pr_fence_store();
ck_pr_store_ptr(&hs->map, update);
ck_hs_map_destroy(hs->m, map, true);
return true;
}
static void **
ck_hs_map_probe(struct ck_hs *hs,
struct ck_hs_map *map,
unsigned long *n_probes,
void ***priority,
ck_hs_hash_t h,
const void *key,
void **object,
unsigned long probe_limit)
{
void **bucket, **cursor, *k;
const void *compare;
void **pr = NULL;
unsigned long offset, i, j;
unsigned long probes = 0;
#ifdef CK_HS_PP
/* If we are storing object pointers, then we may leverage pointer packing. */
struct ck_hs_hash hv;
if (hs->mode & CK_HS_MODE_OBJECT) {
hv.value = (h.value >> 25) & CK_HS_KEY_MASK;
compare = (void *)((uintptr_t)key | (hv.value << CK_MD_VMA_BITS));
} else {
compare = key;
}
#else
compare = key;
#endif
offset = h.value & map->mask;
*object = NULL;
for (i = 0; i < probe_limit; i++) {
bucket = (void **)((uintptr_t)&map->entries[offset] & ~(CK_MD_CACHELINE - 1));
for (j = 0; j < CK_HS_PROBE_L1; j++) {
cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1));
probes++;
k = ck_pr_load_ptr(cursor);
if (k == CK_HS_EMPTY)
goto leave;
if (k == CK_HS_TOMBSTONE) {
if (pr == NULL)
pr = cursor;
*n_probes = probes;
continue;
}
#ifdef CK_HS_PP
if (hs->mode & CK_HS_MODE_OBJECT) {
if (((uintptr_t)k >> CK_MD_VMA_BITS) != hv.value)
continue;
k = (void *)((uintptr_t)k & (((uintptr_t)1 << CK_MD_VMA_BITS) - 1));
}
#endif
if (k == compare)
goto leave;
if (hs->compare == NULL)
continue;
if (hs->compare(k, key) == true)
goto leave;
}
offset = ck_hs_map_probe_next(map, offset, h, i, probes);
}
return NULL;
leave:
*object = k;
if (pr == NULL)
*n_probes = probes;
*priority = pr;
return cursor;
}
bool
ck_hs_set(struct ck_hs *hs,
struct ck_hs_hash h,
const void *key,
void **previous)
{
void **slot, **first, *object, *insert;
unsigned long n_probes;
struct ck_hs_map *map;
*previous = NULL;
restart:
map = hs->map;
slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit);
if (slot == NULL) {
if (ck_hs_grow(hs, map->capacity << 1) == false)
return false;
goto restart;
}
#ifdef CK_HS_PP
if (hs->mode & CK_HS_MODE_OBJECT) {
insert = (void *)((uintptr_t)key | ((h.value >> 25) << CK_MD_VMA_BITS));
} else {
insert = (void *)key;
}
#else
insert = (void *)key;
#endif
if (n_probes > map->probe_maximum)
ck_pr_store_uint(&map->probe_maximum, n_probes);
if (first != NULL) {
/* If an earlier bucket was found, then store entry there. */
ck_pr_store_ptr(first, insert);
/*
* If a duplicate was found, then we must guarantee that new entry
* is visible with respect to concurrent probe sequences.
*/
if (*slot != CK_HS_EMPTY) {
ck_pr_inc_uint(&map->generation[h.value & CK_HS_G_MASK]);
ck_pr_fence_store();
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
}
} else {
/*
* If we are storing into same slot, then atomic store is sufficient
* for replacement.
*/
ck_pr_store_ptr(slot, insert);
}
if (object == NULL) {
map->n_entries++;
if ((map->n_entries << 1) > map->capacity)
ck_hs_grow(hs, map->capacity << 1);
}
*previous = object;
return true;
}
bool
ck_hs_put(struct ck_hs *hs,
struct ck_hs_hash h,
const void *key)
{
void **slot, **first, *object, *insert;
unsigned long n_probes;
struct ck_hs_map *map;
restart:
map = hs->map;
slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit);
if (slot == NULL) {
if (ck_hs_grow(hs, map->capacity << 1) == false)
return false;
goto restart;
}
/* If a match was found, then fail. */
if (*slot != CK_HS_EMPTY)
return false;
#ifdef CK_HS_PP
if (hs->mode & CK_HS_MODE_OBJECT) {
insert = (void *)((uintptr_t)key | ((h.value >> 25) << CK_MD_VMA_BITS));
} else {
insert = (void *)key;
}
#else
insert = (void *)key;
#endif
if (n_probes > map->probe_maximum)
ck_pr_store_uint(&map->probe_maximum, n_probes);
if (first != NULL) {
/* If an earlier bucket was found, then go ahead and replace it. */
ck_pr_store_ptr(first, insert);
ck_pr_inc_uint(&map->generation[h.value & CK_HS_G_MASK]);
/* Guarantee that new object is visible with respect to generation increment. */
ck_pr_fence_store();
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
} else {
/* An empty slot was found. */
ck_pr_store_ptr(slot, insert);
}
map->n_entries++;
if ((map->n_entries << 1) > map->capacity)
ck_hs_grow(hs, map->capacity << 1);
return true;
}
void *
ck_hs_get(struct ck_hs *hs,
struct ck_hs_hash h,
const void *key)
{
void **slot, **first, *object;
struct ck_hs_map *map;
unsigned long n_probes;
unsigned int g, g_p, probe;
unsigned int *generation;
do {
map = ck_pr_load_ptr(&hs->map);
generation = &map->generation[h.value & CK_HS_G_MASK];
g = ck_pr_load_uint(generation);
probe = ck_pr_load_uint(&map->probe_maximum);
ck_pr_fence_load();
slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, probe);
if (slot == NULL)
return NULL;
ck_pr_fence_load();
g_p = ck_pr_load_uint(generation);
} while (g != g_p);
return object;
}
void *
ck_hs_remove(struct ck_hs *hs,
struct ck_hs_hash h,
const void *key)
{
void **slot, **first, *object;
struct ck_hs_map *map = hs->map;
unsigned long n_probes;
slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_maximum);
if (slot == NULL || object == NULL)
return NULL;
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
map->n_entries--;
map->tombstones++;
return object;
}
bool
ck_hs_init(struct ck_hs *hs,
unsigned int mode,
ck_hs_hash_cb_t *hf,
ck_hs_compare_cb_t *compare,
struct ck_malloc *m,
unsigned long n_entries,
unsigned long seed)
{
if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
return false;
hs->m = m;
hs->mode = mode;
hs->seed = seed;
hs->hf = hf;
hs->compare = compare;
hs->map = ck_hs_map_create(hs, n_entries);
return hs->map != NULL;
}

@ -60,6 +60,12 @@ ck_internal_power_2(uint32_t v)
return (++v);
}
CK_CC_INLINE static unsigned long
ck_internal_max(unsigned long x, unsigned long y)
{
return x ^ ((x ^ y) & -(x < y));
}
CK_CC_INLINE static uint64_t
ck_internal_max_64(uint64_t x, uint64_t y)
@ -75,15 +81,30 @@ ck_internal_max_32(uint32_t x, uint32_t y)
return x ^ ((x ^ y) & -(x < y));
}
CK_CC_INLINE static unsigned long
ck_internal_bsf(unsigned long v)
{
unsigned int i;
const unsigned int s = sizeof(unsigned long) * 8 - 1;
for (i = 0; i < s; i++) {
if (v & (1 << (s - i)))
return i;
}
return 1;
}
CK_CC_INLINE static uint64_t
ck_internal_bsf_64(uint64_t v)
{
unsigned int i;
const unsigned int s = sizeof(unsigned long) * 8 - 1;
for (i = 0; i < 8; i++) {
for (i = 0; i < s; i++) {
if (v & (1 << (63 - i)))
return i;
}
return i;
return 1;
}

Loading…
Cancel
Save