ck_ht: Lock-free SPMC hash table, for x86_64.

This is a hash table that is optimized for architectures that
implement total store ordering and workloads that are read-heavy
involving a single writer and multiple readers. Unlike traditional
non-blocking multi-producer/multi-consumer hash table
implementations this version allows for immediate re-use of deleted
buckets (no need for explicit reclamation cycles) and is more
conducive to traditional safe memory reclamation schemes used in
unmanaged languages (otherwise, we would require key duplication).

It is relatively heavy-weight for MPMC workloads on architectures
which do not implement TSO in comparison to Click's MPMC hash
table. However, it still has better performance characteristics
than a blocking hash table.

The committed version currently only provides x86_64 support. This is
being committed for review by peers and for a silent release that will
allow us to test ck_ht_spmc under high production workloads.

Next public release will include additional documentation as well as
support for other architectures.

In the mean time, please see the unit tests for example usage. Included in
this commit: Dropped -Wbad-function-cast from GCC port.
ck_pring
Samy Al Bahra 13 years ago
parent dc97d69ca4
commit 9f786337f7

2
configure vendored

@ -363,7 +363,7 @@ elif test "$COMPILER" = "gcc"; then
CC_WL_OPT="-soname"
fi
LDFLAGS="-shared -fPIC -Wl,$CC_WL_OPT,libck.so.$VERSION_MAJOR $LDFLAGS"
CFLAGS="-D_XOPEN_SOURCE=600 -D_BSD_SOURCE -std=gnu99 -pedantic -Wall -W -Wundef -Wendif-labels -Wshadow -Wpointer-arith -Wbad-function-cast -Wcast-align -Wwrite-strings -Wstrict-prototypes -Wmissing-prototypes -Wnested-externs -Winline -Wdisabled-optimization -fstrict-aliasing -O2 -pipe -Wno-parentheses $CFLAGS"
CFLAGS="-D_XOPEN_SOURCE=600 -D_BSD_SOURCE -std=gnu99 -pedantic -Wall -W -Wundef -Wendif-labels -Wshadow -Wpointer-arith -Wcast-align -Wwrite-strings -Wstrict-prototypes -Wmissing-prototypes -Wnested-externs -Winline -Wdisabled-optimization -fstrict-aliasing -O2 -pipe -Wno-parentheses $CFLAGS"
PTHREAD_CFLAGS="-pthread"
else
assert "" "unknown compiler"

@ -0,0 +1,167 @@
/*
* Copyright 2012 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _CK_HT_H
#define _CK_HT_H
#ifdef __x86_64__
#include <ck_cc.h>
#include <ck_pr.h>
#include <ck_malloc.h>
#include <ck_stdint.h>
#include <stdbool.h>
#include <stddef.h>
struct ck_ht_hash {
uint64_t value;
};
typedef struct ck_ht_hash ck_ht_hash_t;
enum ck_ht_mode {
CK_HT_MODE_DIRECT,
CK_HT_MODE_BYTESTRING
};
struct ck_ht_entry {
uintptr_t key;
uintptr_t value CK_CC_PACKED;
} CK_CC_ALIGNED;
typedef struct ck_ht_entry ck_ht_entry_t;
struct ck_ht_map;
struct ck_ht {
struct ck_ht_map *map;
enum ck_ht_mode mode;
uint64_t seed;
};
typedef struct ck_ht ck_ht_t;
struct ck_ht_iterator {
struct ck_ht_entry *current;
uint64_t offset;
};
typedef struct ck_ht_iterator ck_ht_iterator_t;
#define CK_HT_ITERATOR_INITIALIZER { NULL, 0 }
CK_CC_INLINE static void
ck_ht_iterator_init(struct ck_ht_iterator *iterator)
{
iterator->current = NULL;
iterator->offset = 0;
return;
}
CK_CC_INLINE static void
ck_ht_entry_key_set(ck_ht_entry_t *entry, const void *key, uint16_t key_length)
{
entry->key = (uintptr_t)key | ((uintptr_t)key_length << 48);
return;
}
CK_CC_INLINE static void *
ck_ht_entry_key(ck_ht_entry_t *entry)
{
return (void *)(entry->key & (((uintptr_t)1 << 48) - 1));
}
CK_CC_INLINE static uint16_t
ck_ht_entry_key_length(ck_ht_entry_t *entry)
{
return entry->key >> 48;
}
CK_CC_INLINE static void *
ck_ht_entry_value(ck_ht_entry_t *entry)
{
return (void *)(entry->value & (((uintptr_t)1 << 48) - 1));
}
CK_CC_INLINE static void
ck_ht_entry_set(struct ck_ht_entry *entry,
ck_ht_hash_t h,
const void *key,
uint16_t key_length,
const void *value)
{
entry->key = (uintptr_t)key | ((uintptr_t)key_length << 48);
entry->value = (uintptr_t)value | ((uintptr_t)(h.value >> 32) << 48);
return;
}
CK_CC_INLINE static void
ck_ht_entry_set_direct(struct ck_ht_entry *entry,
uintptr_t key,
uintptr_t value)
{
entry->key = key;
entry->value = value;
return;
}
CK_CC_INLINE static uintptr_t
ck_ht_entry_key_direct(ck_ht_entry_t *entry)
{
return entry->key;
}
CK_CC_INLINE static uintptr_t
ck_ht_entry_value_direct(ck_ht_entry_t *entry)
{
return entry->value;
}
/*
* Iteration must occur without any concurrent mutations on
* the hash table.
*/
bool ck_ht_next(ck_ht_t *, ck_ht_iterator_t *, ck_ht_entry_t **entry);
void ck_ht_hash(ck_ht_hash_t *, ck_ht_t *, const void *, uint16_t);
void ck_ht_hash_direct(ck_ht_hash_t *, ck_ht_t *, uintptr_t);
bool ck_ht_allocator_set(struct ck_malloc *);
bool ck_ht_init(ck_ht_t *, enum ck_ht_mode, uint64_t, uint64_t);
void ck_ht_destroy(ck_ht_t *);
bool ck_ht_set_spmc(ck_ht_t *, ck_ht_hash_t, ck_ht_entry_t *);
bool ck_ht_put_spmc(ck_ht_t *, ck_ht_hash_t, ck_ht_entry_t *);
bool ck_ht_get_spmc(ck_ht_t *, ck_ht_hash_t, ck_ht_entry_t *);
bool ck_ht_grow_spmc(ck_ht_t *, uint64_t);
bool ck_ht_remove_spmc(ck_ht_t *, ck_ht_hash_t, ck_ht_entry_t *);
bool ck_ht_reset_spmc(ck_ht_t *);
uint64_t ck_ht_count(ck_ht_t *);
#endif /* __x86_64__ */
#endif /* _CK_HT_H */

@ -3,6 +3,8 @@
all:
$(MAKE) -C ./ck_queue/validate all
$(MAKE) -C ./ck_brlock/validate all
$(MAKE) -C ./ck_ht/validate all
$(MAKE) -C ./ck_ht/benchmark all
$(MAKE) -C ./ck_brlock/benchmark all
$(MAKE) -C ./ck_spinlock/validate all
$(MAKE) -C ./ck_spinlock/benchmark all
@ -28,6 +30,8 @@ all:
clean:
$(MAKE) -C ./ck_queue/validate clean
$(MAKE) -C ./ck_brlock/validate clean
$(MAKE) -C ./ck_ht/validate clean
$(MAKE) -C ./ck_ht/benchmark clean
$(MAKE) -C ./ck_brlock/benchmark clean
$(MAKE) -C ./ck_spinlock/validate clean
$(MAKE) -C ./ck_spinlock/benchmark clean

@ -0,0 +1,14 @@
.PHONY: clean distribution
OBJECTS=ck_ht
all: $(OBJECTS)
ck_ht: ck_ht.c ../../../include/ck_ht.h ../../../src/ck_ht.c
$(CC) $(CFLAGS) -o ck_ht ck_ht.c ../../../src/ck_ht.c
clean:
rm -rf *~ *.o $(OBJECTS) *.dSYM
include ../../../build/regressions.build
CFLAGS+=-D_GNU_SOURCE

@ -0,0 +1,265 @@
/*
* Copyright 2012 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifdef __x86_64__
#include <assert.h>
#include <ck_ht.h>
#include <ck_malloc.h>
#include <errno.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "../../common.h"
static ck_ht_t ht;
static char **keys;
static size_t keys_length = 0;
static size_t keys_capacity = 128;
static void *
ht_malloc(size_t r)
{
return malloc(r);
}
static void
ht_free(void *p, size_t b, bool r)
{
(void)b;
(void)r;
free(p);
return;
}
static struct ck_malloc my_allocator = {
.malloc = ht_malloc,
.free = ht_free
};
static void
table_init(void)
{
srand48((long int)time(NULL));
ck_ht_allocator_set(&my_allocator);
if (ck_ht_init(&ht, CK_HT_MODE_BYTESTRING, 8, lrand48()) == false) {
perror("ck_ht_init");
exit(EXIT_FAILURE);
}
return;
}
static bool
table_remove(const char *value)
{
ck_ht_entry_t entry;
ck_ht_hash_t h;
size_t l = strlen(value);
ck_ht_hash(&h, &ht, value, l);
ck_ht_entry_key_set(&entry, value, l);
return ck_ht_remove_spmc(&ht, h, &entry);
}
static bool
table_replace(const char *value)
{
ck_ht_entry_t entry;
ck_ht_hash_t h;
size_t l = strlen(value);
ck_ht_hash(&h, &ht, value, l);
ck_ht_entry_set(&entry, h, value, l, "REPLACED");
return ck_ht_set_spmc(&ht, h, &entry);
}
static void *
table_get(const char *value)
{
ck_ht_entry_t entry;
ck_ht_hash_t h;
size_t l = strlen(value);
ck_ht_hash(&h, &ht, value, l);
ck_ht_entry_key_set(&entry, value, l);
if (ck_ht_get_spmc(&ht, h, &entry) == true)
return ck_ht_entry_value(&entry);
return NULL;
}
static bool
table_insert(const char *value)
{
ck_ht_entry_t entry;
ck_ht_hash_t h;
size_t l = strlen(value);
ck_ht_hash(&h, &ht, value, l);
ck_ht_entry_set(&entry, h, value, l, "VALUE");
return ck_ht_put_spmc(&ht, h, &entry);
}
static size_t
table_count(void)
{
return ck_ht_count(&ht);
}
static bool
table_reset(void)
{
return ck_ht_reset_spmc(&ht);
}
int
main(int argc, char *argv[])
{
FILE *fp;
char buffer[512];
size_t i, j;
size_t r = 20;
unsigned int d = 0;
uint64_t s, e, a;
if (argc < 2) {
fprintf(stderr, "Usage: ck_ht <dictionary> [repetitions]\n");
exit(EXIT_FAILURE);
}
if (argc == 3)
r = atoi(argv[2]);
keys = malloc(sizeof(char *) * keys_capacity);
assert(keys != NULL);
fp = fopen(argv[1], "r");
assert(fp != NULL);
while (fgets(buffer, sizeof(buffer), fp) != NULL) {
buffer[strlen(buffer) - 1] = '\0';
keys[keys_length++] = strdup(buffer);
assert(keys[keys_length - 1] != NULL);
if (keys_length == keys_capacity) {
char **t;
t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
assert(t != NULL);
keys = t;
}
}
table_init();
for (i = 0; i < keys_length; i++)
d += table_insert(keys[i]) == false;
fprintf(stderr, "%zu entries stored and %u duplicates.\n\n",
table_count(), d);
a = 0;
for (j = 0; j < r; j++) {
if (table_reset() == false) {
fprintf(stderr, "ERROR: Failed to reset hash table.\n");
exit(EXIT_FAILURE);
}
s = rdtsc();
for (i = 0; i < keys_length; i++)
d += table_insert(keys[i]) == false;
e = rdtsc();
a += e - s;
}
printf("Serial insertion: %" PRIu64 " ticks\n", a / (r * keys_length));
a = 0;
for (j = 0; j < r; j++) {
s = rdtsc();
for (i = 0; i < keys_length; i++)
table_replace(keys[i]);
e = rdtsc();
a += e - s;
}
printf(" Serial replace: %" PRIu64 " ticks\n", a / (r * keys_length));
a = 0;
for (j = 0; j < r; j++) {
s = rdtsc();
for (i = 0; i < keys_length; i++) {
if (table_get(keys[i]) == NULL) {
fprintf(stderr, "ERROR: Unexpected NULL value.\n");
exit(EXIT_FAILURE);
}
}
e = rdtsc();
a += e - s;
}
printf(" Serial get: %" PRIu64 " ticks\n", a / (r * keys_length));
a = 0;
for (j = 0; j < r; j++) {
s = rdtsc();
for (i = 0; i < keys_length; i++)
table_remove(keys[i]);
e = rdtsc();
a += e - s;
for (i = 0; i < keys_length; i++)
table_insert(keys[i]);
}
printf(" Serial remove: %" PRIu64 " ticks\n", a / (r * keys_length));
a = 0;
for (j = 0; j < r; j++) {
s = rdtsc();
for (i = 0; i < keys_length; i++) {
table_get("\x50\x03\x04\x05\x06\x10");
}
e = rdtsc();
a += e - s;
}
printf(" Negative get: %" PRIu64 " ticks\n", a / (r * keys_length));
return 0;
}
#else
int
main(void)
{
return 0;
}
#endif /* __x86_64__ */

@ -0,0 +1,14 @@
.PHONY: clean distribution
OBJECTS=serial
all: $(OBJECTS)
serial: serial.c ../../../include/ck_ht.h ../../../src/ck_ht.c
$(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_ht.c
clean:
rm -rf *~ *.o $(OBJECTS) *.dSYM
include ../../../build/regressions.build
CFLAGS+=-D_GNU_SOURCE

@ -0,0 +1,293 @@
/*
* Copyright 2012 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifdef __x86_64__
#include <assert.h>
#include <ck_ht.h>
#include <ck_malloc.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
static void *
ht_malloc(size_t r)
{
return malloc(r);
}
static void
ht_free(void *p, size_t b, bool r)
{
(void)b;
(void)r;
free(p);
return;
}
static struct ck_malloc my_allocator = {
.malloc = ht_malloc,
.free = ht_free
};
const char *test[] = {"Samy", "Al", "Bahra", "dances", "in", "the", "wind.", "Once",
"upon", "a", "time", "his", "gypsy", "ate", "one", "itsy",
"bitsy", "spider.", "What", "goes", "up", "must",
"come", "down.", "What", "is", "down", "stays",
"down.", "A", "B", "C", "D", "E", "F", "G", "H",
"I", "J", "K", "L", "M", "N", "O"};
static uintptr_t direct[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 1, 2, 3, 4, 5, 9 };
const char *negative = "negative";
int
main(void)
{
size_t i, l;
ck_ht_t ht;
ck_ht_entry_t entry;
ck_ht_hash_t h;
ck_ht_iterator_t iterator = CK_HT_ITERATOR_INITIALIZER;
ck_ht_entry_t *cursor;
ck_ht_allocator_set(&my_allocator);
if (ck_ht_init(&ht, CK_HT_MODE_BYTESTRING, 8, 6602834) == false) {
perror("ck_ht_init");
exit(EXIT_FAILURE);
}
for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
l = strlen(test[i]);
ck_ht_hash(&h, &ht, test[i], l);
ck_ht_entry_set(&entry, h, test[i], l, test[i]);
ck_ht_put_spmc(&ht, h, &entry);
}
l = strlen(test[0]);
ck_ht_hash(&h, &ht, test[0], l);
ck_ht_entry_set(&entry, h, test[0], l, test[0]);
ck_ht_put_spmc(&ht, h, &entry);
for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
l = strlen(test[i]);
ck_ht_hash(&h, &ht, test[i], l);
ck_ht_entry_key_set(&entry, test[i], l);
if (ck_ht_get_spmc(&ht, h, &entry) == false) {
fprintf(stderr, "ERROR: Failed to find [%s]\n", test[i]);
} else {
void *k, *v;
k = ck_ht_entry_key(&entry);
v = ck_ht_entry_value(&entry);
if (strcmp(k, test[i]) || strcmp(v, test[i])) {
fprintf(stderr, "ERROR: Mismatch: (%s, %s) != (%s, %s)\n",
(char *)k, (char *)v, test[i], test[i]);
}
}
}
ck_ht_hash(&h, &ht, negative, strlen(negative));
ck_ht_entry_key_set(&entry, negative, strlen(negative));
if (ck_ht_get_spmc(&ht, h, &entry) == true) {
fprintf(stderr, "ERROR: Found non-existing entry.\n");
}
for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
l = strlen(test[i]);
ck_ht_hash(&h, &ht, test[i], l);
ck_ht_entry_key_set(&entry, test[i], l);
if (ck_ht_get_spmc(&ht, h, &entry) == false)
continue;
if (ck_ht_remove_spmc(&ht, h, &entry) == false) {
fprintf(stderr, "ERROR: Failed to delete existing entry\n");
exit(EXIT_FAILURE);
}
if (ck_ht_get_spmc(&ht, h, &entry) == true)
fprintf(stderr, "ERROR: Able to find [%s] after delete\n", test[i]);
ck_ht_entry_set(&entry, h, test[i], l, test[i]);
if (ck_ht_put_spmc(&ht, h, &entry) == false)
fprintf(stderr, "ERROR: Failed to insert [%s]\n", test[i]);
if (ck_ht_remove_spmc(&ht, h, &entry) == false) {
fprintf(stderr, "ERROR: Failed to delete existing entry\n");
exit(EXIT_FAILURE);
}
}
ck_ht_reset_spmc(&ht);
if (ck_ht_count(&ht) != 0) {
fprintf(stderr, "ERROR: Map was not reset.\n");
exit(EXIT_FAILURE);
}
for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
l = strlen(test[i]);
ck_ht_hash(&h, &ht, test[i], l);
ck_ht_entry_set(&entry, h, test[i], l, test[i]);
ck_ht_put_spmc(&ht, h, &entry);
}
for (i = 0; ck_ht_next(&ht, &iterator, &cursor) == true; i++);
if (i != 42) {
fprintf(stderr, "ERROR: Incorrect number of entries in table.\n");
exit(EXIT_FAILURE);
}
for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
l = strlen(test[i]);
ck_ht_hash(&h, &ht, test[i], l);
ck_ht_entry_set(&entry, h, test[i], l, test[i]);
ck_ht_set_spmc(&ht, h, &entry);
}
for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
l = strlen(test[i]);
ck_ht_hash(&h, &ht, test[i], l);
ck_ht_entry_key_set(&entry, test[i], l);
if (ck_ht_get_spmc(&ht, h, &entry) == false) {
fprintf(stderr, "ERROR: Failed to find [%s]\n", test[i]);
} else {
void *k, *v;
k = ck_ht_entry_key(&entry);
v = ck_ht_entry_value(&entry);
if (strcmp(k, test[i]) || strcmp(v, test[i])) {
fprintf(stderr, "ERROR: Mismatch: (%s, %s) != (%s, %s)\n",
(char *)k, (char *)v, test[i], test[i]);
}
}
}
for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
l = strlen(test[i]);
ck_ht_hash(&h, &ht, test[i], l);
ck_ht_entry_set(&entry, h, test[i], l, "REPLACED");
ck_ht_set_spmc(&ht, h, &entry);
if (strcmp(test[i], "What") == 0)
continue;
if (strcmp(test[i], "down.") == 0)
continue;
if (strcmp(ck_ht_entry_value(&entry), test[i]) != 0) {
fprintf(stderr, "Mismatch detected: %s, expected %s\n",
(char *)ck_ht_entry_value(&entry),
test[i]);
exit(EXIT_FAILURE);
}
}
ck_ht_iterator_init(&iterator);
while (ck_ht_next(&ht, &iterator, &cursor) == true) {
if (strcmp(ck_ht_entry_value(cursor), "REPLACED") != 0) {
fprintf(stderr, "Mismatch detected: %s, expected REPLACED\n",
(char *)ck_ht_entry_value(cursor));
exit(EXIT_FAILURE);
}
}
for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
l = strlen(test[i]);
ck_ht_hash(&h, &ht, test[i], l);
ck_ht_entry_key_set(&entry, test[i], l);
if (ck_ht_get_spmc(&ht, h, &entry) == false)
continue;
if (ck_ht_remove_spmc(&ht, h, &entry) == false) {
fprintf(stderr, "ERROR: Failed to delete existing entry\n");
exit(EXIT_FAILURE);
}
if (ck_ht_get_spmc(&ht, h, &entry) == true)
fprintf(stderr, "ERROR: Able to find [%s] after delete\n", test[i]);
ck_ht_entry_set(&entry, h, test[i], l, test[i]);
if (ck_ht_put_spmc(&ht, h, &entry) == false)
fprintf(stderr, "ERROR: Failed to insert [%s]\n", test[i]);
if (ck_ht_remove_spmc(&ht, h, &entry) == false) {
fprintf(stderr, "ERROR: Failed to delete existing entry\n");
exit(EXIT_FAILURE);
}
}
ck_ht_destroy(&ht);
if (ck_ht_init(&ht, CK_HT_MODE_DIRECT, 8, 6602834) == false) {
perror("ck_ht_init");
exit(EXIT_FAILURE);
}
l = 0;
for (i = 0; i < sizeof(direct) / sizeof(*direct); i++) {
ck_ht_hash_direct(&h, &ht, direct[i]);
ck_ht_entry_set_direct(&entry, direct[i], (uintptr_t)test[i]);
l += ck_ht_put_spmc(&ht, h, &entry) == false;
}
if (l != 7) {
fprintf(stderr, "ERROR: Got %zu failures rather than 7\n", l);
exit(EXIT_FAILURE);
}
for (i = 0; i < sizeof(direct) / sizeof(*direct); i++) {
ck_ht_hash_direct(&h, &ht, direct[i]);
ck_ht_entry_set_direct(&entry, direct[i], (uintptr_t)"REPLACED");
l += ck_ht_set_spmc(&ht, h, &entry) == false;
}
ck_ht_iterator_init(&iterator);
while (ck_ht_next(&ht, &iterator, &cursor) == true) {
if (strcmp(ck_ht_entry_value(cursor), "REPLACED") != 0) {
fprintf(stderr, "Mismatch detected: %s, expected REPLACED\n",
(char *)ck_ht_entry_value(cursor));
exit(EXIT_FAILURE);
}
}
ck_ht_destroy(&ht);
return 0;
}
#else
int
main(void)
{
return 0;
}
#endif

@ -1,12 +1,13 @@
.PHONY: clean distribution
OBJECTS=ck_hp.o \
ck_barrier_centralized.o \
OBJECTS=ck_barrier_centralized.o \
ck_barrier_combining.o \
ck_barrier_dissemination.o \
ck_barrier_tournament.o \
ck_barrier_mcs.o \
ck_epoch.o
ck_epoch.o \
ck_ht.o \
ck_hp.o
all: libck.so libck.a
@ -19,6 +20,9 @@ libck.a: $(OBJECTS)
ck_epoch.o: ../include/ck_epoch.h ck_epoch.c
$(CC) $(CFLAGS) -c -o ck_epoch.o ck_epoch.c
ck_ht.o: ../include/ck_ht.h ck_ht.c
$(CC) $(CFLAGS) -c -o ck_ht.o ck_ht.c
ck_hp.o: ck_hp.c
$(CC) $(CFLAGS) -c -o ck_hp.o ck_hp.c

@ -0,0 +1,647 @@
/*
* Copyright 2012 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* This implementation borrows several techniques from Josh Dybnis's
* nbds library which can be found at http://code.google.com/p/nbds
*/
/*
* This release currently only includes support for x86_64 as I haven't
* had the time to put pressure on other platforms. The portability issues
* will be addressed next release.
*/
#ifdef __x86_64__
#include <ck_cc.h>
#include <ck_ht.h>
#include <ck_pr.h>
#include <ck_stdint.h>
#include <stdbool.h>
#include <string.h>
#include "ck_ht_hash.h"
#include "ck_internal.h"
#ifndef CK_HT_BUCKET_LENGTH
#define CK_HT_BUCKET_SHIFT 2ULL
#define CK_HT_BUCKET_LENGTH (1 << CK_HT_BUCKET_SHIFT)
#define CK_HT_BUCKET_MASK (CK_HT_BUCKET_LENGTH - 1)
#endif
#ifndef CK_HT_PROBE_DEFAULT
#define CK_HT_PROBE_DEFAULT 64ULL
#endif
#define CK_HT_KEY_EMPTY ((uintptr_t)0)
#define CK_HT_KEY_TOMBSTONE (~(uintptr_t)0)
struct ck_ht_map {
enum ck_ht_mode mode;
uint64_t deletions;
uint64_t probe_maximum;
uint64_t probe_length;
uint64_t probe_limit;
uint64_t size;
uint64_t n_entries;
uint64_t mask;
uint64_t capacity;
uint64_t step;
struct ck_ht_entry *entries;
};
static struct ck_malloc allocator;
void
ck_ht_hash(struct ck_ht_hash *h,
struct ck_ht *table,
const void *key,
uint16_t key_length)
{
h->value = MurmurHash64A(key, key_length, table->seed);
return;
}
void
ck_ht_hash_direct(struct ck_ht_hash *h,
struct ck_ht *table,
uintptr_t key)
{
ck_ht_hash(h, table, &key, sizeof(key));
return;
}
bool
ck_ht_allocator_set(struct ck_malloc *m)
{
if (m->malloc == NULL || m->free == NULL)
return false;
allocator.malloc = m->malloc;
allocator.free = m->free;
return true;
}
static struct ck_ht_map *
ck_ht_map_create(enum ck_ht_mode mode, uint64_t entries)
{
struct ck_ht_map *map;
uint64_t size, n_entries;
n_entries = ck_internal_power_2(entries);
size = sizeof(struct ck_ht_map) +
(sizeof(struct ck_ht_entry) * n_entries + CK_MD_CACHELINE - 1);
map = allocator.malloc(size);
if (map == NULL)
return NULL;
map->mode = mode;
map->size = size;
map->probe_limit = ck_internal_max_64(n_entries >>
(CK_HT_BUCKET_SHIFT + 2), CK_HT_PROBE_DEFAULT);
map->deletions = 0;
map->probe_maximum = 0;
map->capacity = n_entries;
map->step = ck_internal_bsf_64(map->capacity);
map->mask = map->capacity - 1;
map->n_entries = 0;
map->entries = (struct ck_ht_entry *)(((uintptr_t)(map + 1) +
CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
if (map->entries == NULL) {
allocator.free(map, size, false);
return NULL;
}
memset(map->entries, 0, sizeof(struct ck_ht_entry) * n_entries);
return map;
}
static void
ck_ht_map_destroy(struct ck_ht_map *map, bool defer)
{
allocator.free(map, map->size, defer);
return;
}
static inline size_t
ck_ht_map_probe_next(struct ck_ht_map *map, size_t offset, ck_ht_hash_t h)
{
ck_ht_hash_t r;
size_t stride;
r.value = h.value >> map->step;
stride = (r.value & ~CK_HT_BUCKET_MASK) << 1
| (r.value & CK_HT_BUCKET_MASK);
return (offset + (stride | CK_HT_BUCKET_LENGTH)) & map->mask;
}
bool
ck_ht_init(ck_ht_t *table, enum ck_ht_mode mode, uint64_t entries, uint64_t seed)
{
table->mode = mode;
table->seed = seed;
table->map = ck_ht_map_create(mode, entries);
return table->map != NULL;
}
static struct ck_ht_entry *
ck_ht_map_probe(struct ck_ht_map *map,
ck_ht_hash_t h,
ck_ht_entry_t *snapshot,
ck_ht_entry_t **available,
const void *key,
uint16_t key_length,
uint64_t *probe_limit)
{
struct ck_ht_entry *bucket, *cursor;
struct ck_ht_entry *first = NULL;
size_t offset, i, j;
uint64_t probes = 0;
uint64_t probe_maximum;
probe_maximum = ck_pr_load_64(&map->probe_maximum);
offset = h.value & map->mask;
for (i = 0; i < map->probe_limit; i++) {
/*
* Probe on a complete cache line first. Scan forward and wrap around to
* the beginning of the cache line. Only when the complete cache line has
* been scanned do we move on to the next row.
*/
bucket = (void *)((uintptr_t)(map->entries + offset) &
~(CK_MD_CACHELINE - 1));
for (j = 0; j < CK_HT_BUCKET_LENGTH; j++) {
uint16_t k;
probes++;
if (probe_limit == NULL && probes > probe_maximum)
return NULL;
cursor = bucket + ((j + offset) & (CK_HT_BUCKET_LENGTH - 1));
/*
* Technically, we should probably lift this to a separate probe
* function. A lot of complexity in here belongs only for the
* reader. However, assuming a reasonable BTB we can attempt to
* at least avoid fence costs for the writer until we decide
* it is worth the code duplication.
*/
if (probe_limit == NULL) {
snapshot->key = (uintptr_t)ck_pr_load_ptr(&cursor->key);
ck_pr_fence_load();
snapshot->value = (uintptr_t)ck_pr_load_ptr(&cursor->value);
} else {
snapshot->key = cursor->key;
snapshot->value = cursor->value;
}
/*
* It is probably worth it to encapsulate probe state
* in order to prevent a complete reprobe sequence in
* the case of intermittent writers.
*/
if (first == NULL && snapshot->key == CK_HT_KEY_TOMBSTONE) {
first = cursor;
continue;
}
if (snapshot->key == CK_HT_KEY_EMPTY)
goto leave;
if (snapshot->key == (uintptr_t)key)
goto leave;
if (map->mode == CK_HT_MODE_BYTESTRING) {
void *pointer;
/*
* Check memoized portion of hash value before
* expensive full-length comparison.
*/
k = ck_ht_entry_key_length(snapshot);
if (k != key_length)
continue;
if (snapshot->value >> 48 != ((h.value >> 32) & 0xFFFF))
continue;
pointer = ck_ht_entry_key(snapshot);
if (memcmp(pointer, key, key_length) == 0)
goto leave;
}
}
offset = ck_ht_map_probe_next(map, offset, h);
}
return NULL;
leave:
if (probe_limit != NULL)
*probe_limit = probes;
if (available != NULL)
*available = first;
return cursor;
}
uint64_t
ck_ht_count(ck_ht_t *table)
{
struct ck_ht_map *map = ck_pr_load_ptr(&table->map);
return ck_pr_load_64(&map->n_entries);
}
bool
ck_ht_next(struct ck_ht *table,
struct ck_ht_iterator *i,
struct ck_ht_entry **entry)
{
struct ck_ht_map *map = table->map;
uintptr_t key;
if (i->offset >= map->capacity)
return false;
do {
key = map->entries[i->offset].key;
if (key != CK_HT_KEY_EMPTY && key != CK_HT_KEY_TOMBSTONE)
break;
} while (++i->offset < map->capacity);
if (i->offset >= map->capacity)
return false;
*entry = map->entries + i->offset++;
return true;
}
bool
ck_ht_reset_spmc(struct ck_ht *table)
{
struct ck_ht_map *map, *update;
map = table->map;
update = ck_ht_map_create(table->mode, map->capacity);
if (update == NULL)
return false;
ck_pr_store_ptr(&table->map, update);
ck_ht_map_destroy(map, true);
return true;
}
bool
ck_ht_grow_spmc(ck_ht_t *table, uint64_t capacity)
{
struct ck_ht_map *map, *update;
struct ck_ht_entry *bucket, *previous;
struct ck_ht_hash h;
size_t k, i, j, offset;
uint64_t probes;
map = table->map;
if (map->capacity >= capacity)
return false;
update = ck_ht_map_create(table->mode, capacity);
if (update == NULL)
return false;
for (k = 0; k < map->capacity; k++) {
previous = &map->entries[k];
if (previous->key == CK_HT_KEY_EMPTY || previous->key == CK_HT_KEY_TOMBSTONE)
continue;
if (table->mode == CK_HT_MODE_BYTESTRING) {
void *key;
uint16_t key_length;
key = ck_ht_entry_key(previous);
key_length = ck_ht_entry_key_length(previous);
ck_ht_hash(&h, table, key, key_length);
} else {
ck_ht_hash(&h, table, &previous->key, sizeof(previous->key));
}
offset = h.value & update->mask;
probes = 0;
for (i = 0; i < update->probe_limit; i++) {
bucket = (void *)((uintptr_t)(update->entries + offset) & ~(CK_MD_CACHELINE - 1));
for (j = 0; j < CK_HT_BUCKET_LENGTH; j++) {
struct ck_ht_entry *cursor = bucket + ((j + offset) & (CK_HT_BUCKET_LENGTH - 1));
probes++;
if (cursor->key == CK_HT_KEY_EMPTY) {
*cursor = *previous;
update->n_entries++;
if (probes > update->probe_maximum)
update->probe_maximum = probes;
break;
}
}
if (j < CK_HT_BUCKET_LENGTH)
break;
offset = ck_ht_map_probe_next(update, offset, h);
}
if (i == update->probe_limit) {
/*
* We have hit the probe limit, the map needs to be even
* larger.
*/
ck_ht_map_destroy(update, false);
return ck_ht_grow_spmc(table, capacity << 1);
}
}
ck_pr_fence_store();
ck_pr_store_ptr(&table->map, update);
ck_ht_map_destroy(map, true);
return true;
}
bool
ck_ht_remove_spmc(ck_ht_t *table,
ck_ht_hash_t h,
ck_ht_entry_t *entry)
{
struct ck_ht_map *map;
struct ck_ht_entry *candidate, snapshot;
map = table->map;
if (table->mode == CK_HT_MODE_BYTESTRING) {
candidate = ck_ht_map_probe(map, h, &snapshot, NULL,
ck_ht_entry_key(entry), ck_ht_entry_key_length(entry), NULL);
} else {
candidate = ck_ht_map_probe(map, h, &snapshot, NULL,
(void *)entry->key, sizeof(entry->key), NULL);
}
/* No matching entry was found. */
if (candidate == NULL || snapshot.key == CK_HT_KEY_EMPTY)
return false;
*entry = snapshot;
ck_pr_store_ptr(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE);
/*
* It is possible that the key is read before transition into
* the tombstone state. Assuming the keys do match, a reader
* may have already acquired a snapshot of the value at the time.
* However, assume the reader is preempted as a deletion occurs
* followed by a replacement. In this case, it is possible that
* the reader acquires some value V' instead of V. Let us assume
* however that any transition from V into V' (essentially, update
* of a value without the reader knowing of a K -> K' transition),
* is preceded by an update to the deletions counter. This guarantees
* any replacement of a T key also implies a D -> D' transition.
* If D has not transitioned, the value has yet to be replaced so it
* is a valid association with K and is safe to return. If D has
* transitioned after a reader has acquired a snapshot then it is
* possible that we are in the invalid state of (K, V'). The reader
* is then able to attempt a reprobe at which point the only visible
* states should be (T, V') or (K', V'). The latter is guaranteed
* through memory fencing.
*/
ck_pr_store_64(&map->deletions, map->deletions + 1);
ck_pr_fence_store();
ck_pr_store_64(&map->n_entries, map->n_entries - 1);
return true;
}
bool
ck_ht_get_spmc(ck_ht_t *table,
ck_ht_hash_t h,
ck_ht_entry_t *entry)
{
struct ck_ht_entry *candidate, snapshot;
struct ck_ht_map *map;
uint64_t d, d_prime;
map = ck_pr_load_ptr(&table->map);
d = ck_pr_load_64(&map->deletions);
if (table->mode == CK_HT_MODE_BYTESTRING) {
candidate = ck_ht_map_probe(map, h, &snapshot, NULL,
ck_ht_entry_key(entry), ck_ht_entry_key_length(entry), NULL);
} else {
candidate = ck_ht_map_probe(map, h, &snapshot, NULL,
(void *)entry->key, sizeof(entry->key), NULL);
}
if (candidate == NULL || snapshot.key == CK_HT_KEY_EMPTY)
return false;
d_prime = ck_pr_load_64(&map->deletions);
if (d != d_prime) {
/*
* It is possible we have read (K, V'). Only valid states are
* (K, V), (K', V') and (T, V). Restart load operation in face
* of concurrent deletions.
*/
return ck_ht_get_spmc(table, h, entry);
}
*entry = snapshot;
return true;
}
bool
ck_ht_set_spmc(ck_ht_t *table,
ck_ht_hash_t h,
ck_ht_entry_t *entry)
{
struct ck_ht_entry snapshot, *candidate, *priority;
struct ck_ht_map *map;
uint64_t probes;
for (;;) {
map = table->map;
if (table->mode == CK_HT_MODE_BYTESTRING) {
candidate = ck_ht_map_probe(map, h, &snapshot, &priority,
ck_ht_entry_key(entry),
ck_ht_entry_key_length(entry),
&probes);
} else {
candidate = ck_ht_map_probe(map, h, &snapshot, &priority,
(void *)entry->key,
sizeof(entry->key),
&probes);
}
if (candidate != NULL)
break;
if (ck_ht_grow_spmc(table, map->capacity << 1) == false)
return false;
}
if (probes > map->probe_maximum)
ck_pr_store_64(&map->probe_maximum, probes);
if (candidate->key != CK_HT_KEY_EMPTY && priority != NULL) {
/*
* If we are replacing an existing entry and an earlier
* tombstone was found in the probe sequence then replace
* the existing entry in a manner that doesn't linearizability
* of concurrent get operations.
*/
ck_pr_store_ptr(&priority->value, (void *)entry->value);
ck_pr_fence_store();
ck_pr_store_ptr(&priority->key, (void *)entry->key);
ck_pr_fence_store();
ck_pr_store_ptr(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE);
ck_pr_store_64(&map->deletions, map->deletions + 1);
ck_pr_fence_store();
} else {
/*
* In this case we are inserting a new entry or replacing
* an existing entry.
*/
bool replace = candidate->key != CK_HT_KEY_EMPTY;
if (priority != NULL)
candidate = priority;
ck_pr_store_ptr(&candidate->value, (void *)entry->value);
ck_pr_fence_store();
ck_pr_store_ptr(&candidate->key, (void *)entry->key);
/*
* If we are insert a new entry then increment number
* of entries associated with map.
*/
if (replace == false)
ck_pr_store_64(&map->n_entries, map->n_entries + 1);
}
/* Enforce a load factor of 0.5. */
if (map->n_entries * 2 > map->capacity)
ck_ht_grow_spmc(table, map->capacity << 1);
*entry = snapshot;
return true;
}
bool
ck_ht_put_spmc(ck_ht_t *table,
ck_ht_hash_t h,
ck_ht_entry_t *entry)
{
struct ck_ht_entry snapshot, *candidate, *priority;
struct ck_ht_map *map;
uint64_t probes;
for (;;) {
map = table->map;
if (table->mode == CK_HT_MODE_BYTESTRING) {
candidate = ck_ht_map_probe(map, h, &snapshot, &priority,
ck_ht_entry_key(entry),
ck_ht_entry_key_length(entry),
&probes);
} else {
candidate = ck_ht_map_probe(map, h, &snapshot, &priority,
(void *)entry->key,
sizeof(entry->key),
&probes);
}
if (candidate != NULL)
break;
if (ck_ht_grow_spmc(table, map->capacity << 1) == false)
return false;
}
/*
* If the snapshot key is non-empty and the value field is not
* a tombstone then an identical key was found. As store does
* not implement replacement, we will fail.
*/
if (candidate->key != CK_HT_KEY_EMPTY && candidate->key != CK_HT_KEY_TOMBSTONE)
return false;
if (probes > map->probe_maximum)
ck_pr_store_64(&map->probe_maximum, probes);
/*
* If an earlier tombstone value was found, then store into that slot instead.
* It is earlier in the probe sequence to begin with.
*/
if (priority != NULL)
candidate = priority;
ck_pr_store_ptr(&candidate->value, (void *)entry->value);
ck_pr_fence_store();
ck_pr_store_ptr(&candidate->key, (void *)entry->key);
ck_pr_store_64(&map->n_entries, map->n_entries + 1);
/* Enforce a load factor of 0.5. */
if (map->n_entries * 2 > map->capacity)
ck_ht_grow_spmc(table, map->capacity << 1);
return true;
}
void
ck_ht_destroy(struct ck_ht *table)
{
ck_ht_map_destroy(table->map, false);
return;
}
#endif /* __x86_64__ */

@ -0,0 +1,246 @@
/*
* Copyright 2012 Samy Al Bahra
* Copyright 2011 AppNexus, Inc.
*
* This is unpublished proprietary source code of AppNexus, Inc.
* The copyright notice above does not evidence any actual or
* intended publication of such source code.
*
* Redistribution of this material is strictly prohibited.
*/
#ifndef _CK_HT_HASH_H
#define _CK_HT_HASH_H
/*
* This is the Murmur hash written by Austin Appleby.
*/
#include <ck_stdint.h>
//-----------------------------------------------------------------------------
// MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
// Note - The x86 and x64 versions do _not_ produce the same results, as the
// algorithms are optimized for their respective platforms. You can still
// compile and run any of them on any platform, but your performance with the
// non-native version will be less than optimal.
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER)
#define FORCE_INLINE __forceinline
#include <stdlib.h>
#define ROTL32(x,y) _rotl(x,y)
#define ROTL64(x,y) _rotl64(x,y)
#define BIG_CONSTANT(x) (x)
// Other compilers
#else // defined(_MSC_VER)
#define FORCE_INLINE __attribute__((always_inline))
static inline uint32_t rotl32 ( uint32_t x, int8_t r )
{
return (x << r) | (x >> (32 - r));
}
static inline uint64_t rotl64 ( uint64_t x, int8_t r )
{
return (x << r) | (x >> (64 - r));
}
#define ROTL32(x,y) rotl32(x,y)
#define ROTL64(x,y) rotl64(x,y)
#define BIG_CONSTANT(x) (x##LLU)
#endif // !defined(_MSC_VER)
//-----------------------------------------------------------------------------
// Block read - if your platform needs to do endian-swapping or can only
// handle aligned reads, do the conversion here
FORCE_INLINE static uint32_t getblock ( const uint32_t * p, int i )
{
return p[i];
}
//-----------------------------------------------------------------------------
// Finalization mix - force all bits of a hash block to avalanche
FORCE_INLINE static uint32_t fmix ( uint32_t h )
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
//-----------------------------------------------------------------------------
static inline void MurmurHash3_x86_32 ( const void * key, int len,
uint32_t seed, uint32_t * out )
{
const uint8_t * data = (const uint8_t*)key;
const int nblocks = len / 4;
int i;
uint32_t h1 = seed;
uint32_t c1 = 0xcc9e2d51;
uint32_t c2 = 0x1b873593;
//----------
// body
const uint32_t * blocks = (const uint32_t *)(void *)(data + nblocks*4);
for(i = -nblocks; i; i++)
{
uint32_t k1 = getblock(blocks,i);
k1 *= c1;
k1 = ROTL32(k1,15);
k1 *= c2;
h1 ^= k1;
h1 = ROTL32(h1,13);
h1 = h1*5+0xe6546b64;
}
//----------
// tail
const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
uint32_t k1 = 0;
switch(len & 3)
{
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0];
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
};
//----------
// finalization
h1 ^= len;
h1 = fmix(h1);
*(uint32_t *)out = h1;
}
static inline uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed )
{
const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
const int r = 47;
uint64_t h = seed ^ (len * m);
const uint64_t * data = (const uint64_t *)key;
const uint64_t * end = data + (len/8);
while(data != end)
{
uint64_t k = *data++;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
const unsigned char * data2 = (const unsigned char*)data;
switch(len & 7)
{
case 7: h ^= (uint64_t)(data2[6]) << 48;
case 6: h ^= (uint64_t)(data2[5]) << 40;
case 5: h ^= (uint64_t)(data2[4]) << 32;
case 4: h ^= (uint64_t)(data2[3]) << 24;
case 3: h ^= (uint64_t)(data2[2]) << 16;
case 2: h ^= (uint64_t)(data2[1]) << 8;
case 1: h ^= (uint64_t)(data2[0]);
h *= m;
};
h ^= h >> r;
h *= m;
h ^= h >> r;
return h;
}
// 64-bit hash for 32-bit platforms
static inline uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed )
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t h1 = (uint32_t)(seed) ^ len;
uint32_t h2 = (uint32_t)(seed >> 32);
const uint32_t * data = (const uint32_t *)key;
while(len >= 8)
{
uint32_t k1 = *data++;
k1 *= m; k1 ^= k1 >> r; k1 *= m;
h1 *= m; h1 ^= k1;
len -= 4;
uint32_t k2 = *data++;
k2 *= m; k2 ^= k2 >> r; k2 *= m;
h2 *= m; h2 ^= k2;
len -= 4;
}
if(len >= 4)
{
uint32_t k1 = *data++;
k1 *= m; k1 ^= k1 >> r; k1 *= m;
h1 *= m; h1 ^= k1;
len -= 4;
}
switch(len)
{
case 3: h2 ^= ((unsigned char*)data)[2] << 16;
case 2: h2 ^= ((unsigned char*)data)[1] << 8;
case 1: h2 ^= ((unsigned char*)data)[0];
h2 *= m;
};
h1 ^= h2 >> 18; h1 *= m;
h2 ^= h1 >> 22; h2 *= m;
h1 ^= h2 >> 17; h1 *= m;
h2 ^= h1 >> 19; h2 *= m;
uint64_t h = h1;
h = (h << 32) | h2;
return h;
}
#endif /* _CK_HT_HASH_H */

@ -26,7 +26,7 @@
*/
/*
* Log and power_2 algorithms from: http://graphics.stanford.edu/~seander/bithacks.html
* Several of these are from: http://graphics.stanford.edu/~seander/bithacks.html
*/
#define CK_INTERNAL_LOG_0 (0xAAAAAAAA)
@ -60,3 +60,30 @@ ck_internal_power_2(uint32_t v)
return (++v);
}
CK_CC_INLINE static uint64_t
ck_internal_max_64(uint64_t x, uint64_t y)
{
return x ^ ((x ^ y) & -(x < y));
}
CK_CC_INLINE static uint32_t
ck_internal_max_32(uint32_t x, uint32_t y)
{
return x ^ ((x ^ y) & -(x < y));
}
CK_CC_INLINE static uint64_t
ck_internal_bsf_64(uint64_t v)
{
unsigned int i;
for (i = 0; i < 8; i++) {
if (v & (1 << (63 - i)))
return i;
}
return i;
}

Loading…
Cancel
Save