ck_epoch: Collect from previous instance of epoch. Add torture test.

This typo lead to incorrect results. Added a read-mostly torture
test (follow on work is volatile interface for stack).
ck_pring
Samy Al Bahra 14 years ago
parent 13dd1a4f82
commit 3f87223d21

@ -203,13 +203,13 @@ ck_epoch_reclaim(struct ck_epoch_record *record)
* the calling thread. No active reference should exist to
* any object in the record's pending list.
*/
CK_STACK_FOREACH_SAFE(&record->pending[epoch], cursor, next) {
CK_STACK_FOREACH_SAFE(&record->pending[g_epoch], cursor, next) {
global->destroy(cursor);
record->n_pending--;
record->n_reclamations++;
}
ck_stack_init(&record->pending[epoch]);
ck_stack_init(&record->pending[g_epoch]);
record->epoch = g_epoch;
record->delta = 0;
@ -273,9 +273,10 @@ ck_epoch_retire(struct ck_epoch_record *record, ck_stack_entry_t *entry)
CK_CC_INLINE static void
ck_epoch_free(struct ck_epoch_record *record, ck_stack_entry_t *entry)
{
unsigned int epoch = ck_pr_load_uint(&record->epoch);
struct ck_epoch *global = record->global;
ck_stack_push_spnc(&record->pending[record->epoch], entry);
ck_stack_push_spnc(&record->pending[epoch], entry);
record->n_pending += 1;
if (record->n_pending > record->n_peak)

@ -1,9 +1,12 @@
.PHONY: clean distribution
OBJECTS=ck_stack
OBJECTS=ck_stack ck_stack_read
all: $(OBJECTS)
ck_stack_read: ck_stack_read.c ../../../include/ck_stack.h ../../../include/ck_epoch.h
$(CC) $(CFLAGS) -o ck_stack_read ck_stack_read.c
ck_stack: ck_stack.c ../../../include/ck_stack.h ../../../include/ck_epoch.h
$(CC) $(CFLAGS) -o ck_stack ck_stack.c

@ -110,13 +110,13 @@ thread(void *unused CK_CC_UNUSED)
ck_pr_inc_uint(&e_barrier);
while (ck_pr_load_uint(&e_barrier) < n_threads);
fprintf(stderr, "Peak: %u (%2.2f%%)\nReclamations: %" PRIu64 "\n\n",
fprintf(stderr, "Peak: %u (%2.2f%%), %u pending\nReclamations: %" PRIu64 "\n\n",
record.n_peak,
(double)record.n_peak / PAIRS * 100,
record.n_pending,
record.n_reclamations);
ck_epoch_purge(&record);
ck_pr_inc_uint(&e_barrier);
while (ck_pr_load_uint(&e_barrier) < (n_threads << 1));

@ -0,0 +1,220 @@
/*
* Copyright 2010-2011 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <errno.h>
#include <inttypes.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <sys/time.h>
#include <ck_backoff.h>
#include <ck_cc.h>
#include <ck_pr.h>
#include <stdbool.h>
#include <stddef.h>
#include <ck_epoch.h>
#include <ck_stack.h>
#include "../../common.h"
static unsigned int threshold;
static unsigned int n_threads;
static unsigned int barrier;
static unsigned int e_barrier;
static unsigned int readers;
#ifndef PAIRS
#define PAIRS 5000000
#endif
#ifndef ITERATE
#define ITERATE 20
#endif
struct node {
unsigned int value;
ck_stack_entry_t stack_entry;
};
static ck_stack_t stack = CK_STACK_INITIALIZER;
static ck_epoch_t stack_epoch;
CK_STACK_CONTAINER(struct node, stack_entry, stack_container)
static struct affinity a;
static void *
read_thread(void *unused CK_CC_UNUSED)
{
unsigned int j;
ck_epoch_record_t record;
ck_stack_entry_t *cursor;
volatile ck_stack_entry_t *n;
ck_epoch_register(&stack_epoch, &record);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
exit(EXIT_FAILURE);
}
ck_pr_inc_uint(&barrier);
while (ck_pr_load_uint(&barrier) < n_threads);
while (CK_STACK_ISEMPTY(&stack) == true) {
if (ck_pr_load_uint(&readers) != 0)
break;
ck_pr_stall();
}
j = 0;
for (;;) {
ck_epoch_read_begin(&record);
CK_STACK_FOREACH(&stack, cursor) {
n = cursor;
j++;
}
ck_epoch_end(&record);
if (j != 0 && ck_pr_load_uint(&readers) == 0)
ck_pr_store_uint(&readers, 1);
if (CK_STACK_ISEMPTY(&stack) == true &&
ck_pr_load_uint(&e_barrier) != 0)
break;
}
ck_pr_inc_uint(&e_barrier);
while (ck_pr_load_uint(&e_barrier) < n_threads);
fprintf(stderr, "[R] Observed entries: %u\n", j);
return (NULL);
}
static void *
thread(void *unused CK_CC_UNUSED)
{
struct node **entry, *e;
unsigned int i, j;
ck_epoch_record_t record;
ck_stack_entry_t *s;
ck_epoch_register(&stack_epoch, &record);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
exit(EXIT_FAILURE);
}
ck_pr_inc_uint(&barrier);
while (ck_pr_load_uint(&barrier) < n_threads);
entry = malloc(sizeof(struct node *) * PAIRS);
if (entry == NULL) {
fprintf(stderr, "Failed allocation.\n");
exit(EXIT_FAILURE);
}
for (j = 0; j < ITERATE; j++) {
for (i = 0; i < PAIRS; i++) {
entry[i] = malloc(sizeof(struct node));
if (entry == NULL) {
fprintf(stderr, "Failed individual allocation\n");
exit(EXIT_FAILURE);
}
}
for (i = 0; i < PAIRS; i++) {
ck_epoch_write_begin(&record);
ck_stack_push_upmc(&stack, &entry[i]->stack_entry);
ck_epoch_end(&record);
}
while (ck_pr_load_uint(&readers) == 0)
ck_pr_stall();
for (i = 0; i < PAIRS; i++) {
ck_epoch_write_begin(&record);
s = ck_stack_pop_upmc(&stack);
ck_epoch_end(&record);
e = stack_container(s);
ck_epoch_free(&record, &e->stack_entry);
}
}
ck_pr_inc_uint(&e_barrier);
while (ck_pr_load_uint(&e_barrier) < n_threads);
fprintf(stderr, "[W] Peak: %u (%2.2f%%)\n Reclamations: %" PRIu64 "\n\n",
record.n_peak,
(double)record.n_peak / ((double)PAIRS * ITERATE) * 100,
record.n_reclamations);
return (NULL);
}
static void
destructor(ck_stack_entry_t *p)
{
struct node *e = stack_container(p);
free(e);
return;
}
int
main(int argc, char *argv[])
{
unsigned int i;
pthread_t *threads;
if (argc != 4) {
fprintf(stderr, "Usage: stack <threads> <threshold> <delta>\n");
exit(EXIT_FAILURE);
}
n_threads = atoi(argv[1]);
threshold = atoi(argv[2]);
a.delta = atoi(argv[3]);
a.request = 0;
threads = malloc(sizeof(pthread_t) * n_threads);
ck_epoch_init(&stack_epoch, threshold, destructor);
for (i = 0; i < n_threads - 1; i++)
pthread_create(threads + i, NULL, read_thread, NULL);
pthread_create(threads + i, NULL, thread, NULL);
for (i = 0; i < n_threads; i++)
pthread_join(threads[i], NULL);
return (0);
}
Loading…
Cancel
Save