Samy Al Bahra 10 years ago
commit 0029650be1

8
configure vendored

@ -170,13 +170,15 @@ for option; do
echo " --mandir=N Manual pages directory (default is ${PREFIX}/man)"
echo " --prefix=N Installs library files in N (default is $PREFIX)"
echo
echo "The following options will modify code generation."
echo " --cores=N Specify number of cores available on target machine"
echo "The following options will affect generated code."
echo " --enable-pointer-packing Assumes address encoding is subset of pointer range"
echo " --enable-rtm Enable restricted transactional memory (x86_64 only)"
echo " --enable-rtm Enable restricted transactional memory (power, x86_64)"
echo " --memory-model=N Specify memory model (currently tso, pso or rmo)"
echo " --vma-bits=N Specify valid number of VMA bits"
echo
echo "The following options affect regression testing."
echo " --cores=N Specify number of cores available on target machine"
echo
echo "The following environment variables may be used:"
echo " CC C compiler command"
echo " CFLAGS C compiler flags"

@ -99,6 +99,7 @@ OBJECTS=CK_ARRAY_FOREACH \
ck_rhs_next \
ck_rhs_get \
ck_rhs_put \
ck_rhs_put_unique \
ck_rhs_set \
ck_rhs_fas \
ck_rhs_remove \
@ -190,6 +191,18 @@ html:
$(BUILD_DIR)/doc/$$target$(HTML_SUFFIX); \
done
# check for entries that are missing in OBJECTS
objcheck: all
for file in `ls * | egrep '(ck|CK)_' | egrep -v "($(GZIP_SUFFIX)|$(HTML_SUFFIX))$$"`; do \
if [ ! -f $${file}$(GZIP_SUFFIX) ]; then \
echo "$$file is missing from OBJECTS" >&2; \
fi; \
done
# check for stale references
refcheck:
@./refcheck.pl $(OBJECTS)
install:
mkdir -p $(DESTDIR)/$(MANDIR)/man3 || exit
cp *$(GZIP_SUFFIX) $(DESTDIR)/$(MANDIR)/man3 || exit
@ -200,5 +213,5 @@ uninstall:
done
clean:
rm -f $(BUILD_DIR)/doc/*~ $(BUILD_DIR)/doc/*.3.gz $(BUILD_DIR)/doc/*.html
rm -f $(BUILD_DIR)/doc/*~ $(BUILD_DIR)/doc/*$(GZIP_SUFFIX) $(BUILD_DIR)/doc/*$(HTML_SUFFIX)

@ -76,7 +76,7 @@ with using the
.Xr ck_ht_entry_key_direct 3 ,
.Xr ck_ht_entry_value_direct 3
and
.Xr ck_entry_set_direct 3
.Xr ck_ht_entry_set_direct 3
functions. Attempting a hash table operation with a key of value of 0 or
UINTPTR_MAX will result in undefined behavior.
.El

@ -0,0 +1,27 @@
#!/usr/bin/perl
use warnings;
use strict;
my @files = @ARGV;
my $h;
foreach my $file (@files) {
$h->{$file} = 1;
}
foreach my $file (@files) {
open(my $fh, "<", $file) or die "cannot open < $file: $!";
while (<$fh>) {
chomp;
if ($_ =~ /\.Xr ((ck|CK)_[a-zA-Z_]+) ([0-9])/) {
my $name = $1;
my $section = $3;
if (!$h->{$name}) {
print STDERR "$file: ref to missing ${name}($section)\n";
}
}
}
close($fh) or die("cannot close $file: $!");
}

@ -27,6 +27,10 @@
#ifndef _CK_HT_H
#define _CK_HT_H
#ifndef _CK_HT_IM
#warning ck_ht is deprecated, see ck_hm, ck_hs or ck_rhs.
#endif /* !_CK_HT_IM */
#include <ck_pr.h>
#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_STORE_64)

@ -138,6 +138,17 @@ CK_PR_FENCE_NOOP(release)
#undef CK_PR_FENCE_EMIT
#undef CK_PR_FENCE_NOOP
#ifndef CK_F_PR_RFO
#define CK_F_PR_RFO
CK_CC_INLINE static void
ck_pr_rfo(const void *m)
{
(void)m;
return;
}
#endif /* CK_F_PR_RFO */
#define CK_PR_BIN(K, S, M, T, P, C) \
CK_CC_INLINE static void \
ck_pr_##K##_##S(M *target, T value) \

@ -91,6 +91,26 @@ CK_PR_FENCE(acquire, "mfence")
#undef CK_PR_FENCE
/*
* Read for ownership. Older compilers will generate the 32-bit
* 3DNow! variant which is binary compatible with x86-64 variant
* of prefetchw.
*/
#ifndef CK_F_PR_RFO
#define CK_F_PR_RFO
CK_CC_INLINE static void
ck_pr_rfo(const void *m)
{
__asm__ __volatile__("prefetchw (%0)"
:
: "r" (m)
: "memory");
return;
}
#endif /* CK_F_PR_RFO */
/*
* Atomic fetch-and-store operations.
*/

@ -162,4 +162,3 @@ ck_spinlock_anderson_unlock(struct ck_spinlock_anderson *lock,
}
#endif /* CK_F_SPINLOCK_ANDERSON */
#endif /* _CK_SPINLOCK_ANDERSON_H */

@ -118,4 +118,3 @@ CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_cas, ck_spinlock_cas_t,
#endif /* CK_F_SPINLOCK_CAS */
#endif /* _CK_SPINLOCK_CAS_H */

@ -114,4 +114,3 @@ ck_spinlock_clh_unlock(struct ck_spinlock_clh **thread)
}
#endif /* CK_F_SPINLOCK_CLH */
#endif /* _CK_SPINLOCK_CLH_H */

@ -140,4 +140,3 @@ CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_dec, ck_spinlock_dec_t,
#endif /* CK_F_SPINLOCK_DEC */
#endif /* _CK_SPINLOCK_DEC_H */

@ -115,4 +115,3 @@ CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_fas, ck_spinlock_fas_t,
#endif /* CK_F_SPINLOCK_FAS */
#endif /* _CK_SPINLOCK_FAS_H */

@ -142,4 +142,3 @@ ck_spinlock_hclh_unlock(struct ck_spinlock_hclh **thread)
}
#endif /* CK_F_SPINLOCK_HCLH */
#endif /* _CK_SPINLOCK_HCLH_H */

@ -146,4 +146,3 @@ ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs *n
}
#endif /* CK_F_SPINLOCK_MCS */
#endif /* _CK_SPINLOCK_MCS_H */

@ -295,4 +295,3 @@ CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_ticket, ck_spinlock_ticket_t,
#endif /* CK_F_SPINLOCK_TICKET */
#endif /* _CK_SPINLOCK_TICKET_H */

@ -24,6 +24,7 @@
* SUCH DAMAGE.
*/
#define _CK_HT_IM
#include <ck_ht.h>
#ifdef CK_F_HT
@ -451,6 +452,8 @@ ck_ht_gc(struct ck_ht *ht, unsigned long cycles, unsigned long seed)
offset = h.value & map->mask;
if (priority != NULL) {
ck_pr_store_64(&map->deletions, map->deletions + 1);
ck_pr_fence_store();
#ifndef CK_HT_PP
ck_pr_store_64(&priority->key_length, entry->key_length);
ck_pr_store_64(&priority->hash, entry->hash);
@ -463,8 +466,6 @@ ck_ht_gc(struct ck_ht *ht, unsigned long cycles, unsigned long seed)
ck_pr_fence_store();
ck_pr_store_ptr(&entry->key, (void *)CK_HT_KEY_TOMBSTONE);
ck_pr_fence_store();
ck_pr_store_64(&map->deletions, map->deletions + 1);
ck_pr_fence_store();
}
if (cycles == 0) {
@ -768,29 +769,8 @@ ck_ht_remove_spmc(struct ck_ht *table,
return false;
*entry = snapshot;
ck_pr_store_ptr(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE);
/*
* It is possible that the key is read before transition into
* the tombstone state. Assuming the keys do match, a reader
* may have already acquired a snapshot of the value at the time.
* However, assume the reader is preempted as a deletion occurs
* followed by a replacement. In this case, it is possible that
* the reader acquires some value V' instead of V. Let us assume
* however that any transition from V into V' (essentially, update
* of a value without the reader knowing of a K -> K' transition),
* is preceded by an update to the deletions counter. This guarantees
* any replacement of a T key also implies a D -> D' transition.
* If D has not transitioned, the value has yet to be replaced so it
* is a valid association with K and is safe to return. If D has
* transitioned after a reader has acquired a snapshot then it is
* possible that we are in the invalid state of (K, V'). The reader
* is then able to attempt a reprobe at which point the only visible
* states should be (T, V') or (K', V'). The latter is guaranteed
* through memory fencing.
*/
ck_pr_fence_store();
ck_pr_store_64(&map->deletions, map->deletions + 1);
ck_pr_store_ptr(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE);
ck_pr_fence_store();
ck_pr_store_64(&map->n_entries, map->n_entries - 1);
return true;
@ -884,13 +864,11 @@ ck_ht_set_spmc(struct ck_ht *table,
if (candidate->key != CK_HT_KEY_EMPTY &&
priority != NULL && candidate != priority) {
/*
* If we are replacing an existing entry and an earlier
* tombstone was found in the probe sequence then replace
* the existing entry in a manner that doesn't affect linearizability
* of concurrent get operations. We avoid a state of (K, B)
* (where [K, B] -> [K', B]) by guaranteeing a forced reprobe
* before transitioning from K to T. (K, B) implies (K, B, D')
* so we will reprobe successfully from this transient state.
* Entry is moved into another position in probe sequence.
* We avoid a state of (K, B) (where [K, B] -> [K', B]) by
* guaranteeing a forced reprobe before transitioning from K to
* T. (K, B) implies (K, B, D') so we will reprobe successfully
* from this transient state.
*/
probes = probes_wr;
@ -898,28 +876,47 @@ ck_ht_set_spmc(struct ck_ht *table,
ck_pr_store_64(&priority->key_length, entry->key_length);
ck_pr_store_64(&priority->hash, entry->hash);
#endif
/*
* Readers must observe version counter change before they
* observe re-use. If they observe re-use, it is at most
* a tombstone.
*/
if (priority->value == CK_HT_KEY_TOMBSTONE) {
ck_pr_store_64(&map->deletions, map->deletions + 1);
ck_pr_fence_store();
}
ck_pr_store_ptr(&priority->value, (void *)entry->value);
ck_pr_fence_store();
ck_pr_store_ptr(&priority->key, (void *)entry->key);
ck_pr_fence_store();
/*
* Make sure that readers who observe the tombstone would
* also observe counter change.
*/
ck_pr_store_64(&map->deletions, map->deletions + 1);
ck_pr_fence_store();
ck_pr_store_ptr(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE);
ck_pr_fence_store();
ck_pr_store_64(&map->deletions, map->deletions + 1);
ck_pr_fence_store();
} else {
/*
* In this case we are inserting a new entry or replacing
* an existing entry. There is no need to force a re-probe
* on tombstone replacement due to the fact that previous
* deletion counter update would have been published with
* respect to any concurrent probes.
* an existing entry. Yes, this can be combined into above branch,
* but isn't because you are actually looking at dying code
* (ck_ht is effectively deprecated and is being replaced soon).
*/
bool replace = candidate->key != CK_HT_KEY_EMPTY &&
candidate->key != CK_HT_KEY_TOMBSTONE;
if (priority != NULL) {
if (priority->key == CK_HT_KEY_TOMBSTONE) {
ck_pr_store_64(&map->deletions, map->deletions + 1);
ck_pr_fence_store();
}
candidate = priority;
probes = probes_wr;
}
@ -991,6 +988,10 @@ ck_ht_put_spmc(struct ck_ht *table,
}
if (priority != NULL) {
/* Version counter is updated before re-use. */
ck_pr_store_64(&map->deletions, map->deletions + 1);
ck_pr_fence_store();
/* Re-use tombstone if one was found. */
candidate = priority;
probes = probes_wr;

Loading…
Cancel
Save