ck_epoch: Add ck_epoch_reclaim function.

This function allows for explicit execution of all
deferred callbacks in an epoch_record. The primary
motivation is currently for performance profiling
but there are other use-cases where best-effort
semantics could be applied.
ck_pring
Samy Al Bahra 12 years ago
parent 281b5ad045
commit 4132ec4998

@ -50,6 +50,7 @@ OBJECTS=ck_ht_count \
ck_epoch_poll \ ck_epoch_poll \
ck_epoch_recycle \ ck_epoch_recycle \
ck_epoch_register \ ck_epoch_register \
ck_epoch_reclaim \
ck_epoch_synchronize \ ck_epoch_synchronize \
ck_epoch_unregister \ ck_epoch_unregister \
ck_bag_allocator_set \ ck_bag_allocator_set \

@ -112,6 +112,7 @@ must have been previously registered via
.Xr ck_epoch_recycle 3 , .Xr ck_epoch_recycle 3 ,
.Xr ck_epoch_poll 3 , .Xr ck_epoch_poll 3 ,
.Xr ck_epoch_synchronize 3 , .Xr ck_epoch_synchronize 3 ,
.Xr ck_epoch_reclaim 3 ,
.Xr ck_epoch_call 3 , .Xr ck_epoch_call 3 ,
.Xr ck_epoch_begin 3 , .Xr ck_epoch_begin 3 ,
.Xr ck_epoch_end 3 .Xr ck_epoch_end 3

@ -63,6 +63,7 @@ must have been previously registered via
.Xr ck_epoch_recycle 3 , .Xr ck_epoch_recycle 3 ,
.Xr ck_epoch_poll 3 , .Xr ck_epoch_poll 3 ,
.Xr ck_epoch_synchronize 3 , .Xr ck_epoch_synchronize 3 ,
.Xr ck_epoch_reclaim 3 ,
.Xr ck_epoch_barrier 3 , .Xr ck_epoch_barrier 3 ,
.Xr ck_epoch_call 3 , .Xr ck_epoch_call 3 ,
.Xr ck_epoch_end 3 .Xr ck_epoch_end 3

@ -48,7 +48,7 @@ The function will be provided
the pointer specified by the pointer specified by
.Fa entry . .Fa entry .
The function will execute at some time in the future via calls to The function will execute at some time in the future via calls to
.Fn ck_epoch_synchronize 3 , .Fn ck_epoch_reclaim 3 ,
.Fn ck_epoch_barrier 3 .Fn ck_epoch_barrier 3
or or
.Fn ck_epoch_poll 3 . .Fn ck_epoch_poll 3 .
@ -127,6 +127,7 @@ must have been previously registered via
.Xr ck_epoch_recycle 3 , .Xr ck_epoch_recycle 3 ,
.Xr ck_epoch_poll 3 , .Xr ck_epoch_poll 3 ,
.Xr ck_epoch_synchronize 3 , .Xr ck_epoch_synchronize 3 ,
.Xr ck_epoch_reclaim 3 ,
.Xr ck_epoch_barrier 3 , .Xr ck_epoch_barrier 3 ,
.Xr ck_epoch_begin 3 , .Xr ck_epoch_begin 3 ,
.Xr ck_epoch_end 3 .Xr ck_epoch_end 3

@ -57,6 +57,7 @@ must have been previously registered via
.Xr ck_epoch_recycle 3 , .Xr ck_epoch_recycle 3 ,
.Xr ck_epoch_poll 3 , .Xr ck_epoch_poll 3 ,
.Xr ck_epoch_synchronize 3 , .Xr ck_epoch_synchronize 3 ,
.Xr ck_epoch_reclaim 3 ,
.Xr ck_epoch_barrier 3 , .Xr ck_epoch_barrier 3 ,
.Xr ck_epoch_call 3 , .Xr ck_epoch_call 3 ,
.Xr ck_epoch_begin 3 .Xr ck_epoch_begin 3

@ -60,6 +60,7 @@ object.
.Xr ck_epoch_recycle 3 , .Xr ck_epoch_recycle 3 ,
.Xr ck_epoch_poll 3 , .Xr ck_epoch_poll 3 ,
.Xr ck_epoch_synchronize 3 , .Xr ck_epoch_synchronize 3 ,
.Xr ck_epoch_reclaim 3 ,
.Xr ck_epoch_barrier 3 , .Xr ck_epoch_barrier 3 ,
.Xr ck_epoch_call 3 , .Xr ck_epoch_call 3 ,
.Xr ck_epoch_begin 3 , .Xr ck_epoch_begin 3 ,

@ -64,6 +64,7 @@ must have been previously registered via
.Xr ck_epoch_unregister 3 , .Xr ck_epoch_unregister 3 ,
.Xr ck_epoch_recycle 3 , .Xr ck_epoch_recycle 3 ,
.Xr ck_epoch_synchronize 3 , .Xr ck_epoch_synchronize 3 ,
.Xr ck_epoch_reclaim 3 ,
.Xr ck_epoch_barrier 3 , .Xr ck_epoch_barrier 3 ,
.Xr ck_epoch_call 3 , .Xr ck_epoch_call 3 ,
.Xr ck_epoch_begin 3 , .Xr ck_epoch_begin 3 ,

@ -0,0 +1,92 @@
.\"
.\" Copyright 2013 Samy Al Bahra.
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\"
.Dd May 2, 2013
.Dt CK_EPOCH_RECLAIM 3
.Sh NAME
.Nm ck_epoch_reclaim
.Nd immediately execute all deferred callbacks
.Sh LIBRARY
Concurrency Kit (libck, \-lck)
.Sh SYNOPSIS
.In ck_epoch.h
.Ft void
.Fn ck_epoch_reclaim "ck_epoch_record_t *record"
.Sh DESCRIPTION
The
.Fn ck_epoch_reclaim 3
function will unconditionally execute all callbacks
that have been deferred with
.Fn ck_epoch_call 3 .
.Sh EXAMPLE
.Bd -literal -offset indent
#include <ck_epoch.h>
#include <ck_stack.h>
#include <stdlib.h>
/*
* epoch was previously initialized with ck_epoch_init.
*/
ck_epoch_t *epoch;
void
function(void)
{
ck_epoch_record_t *record;
logically_delete(object);
ck_epoch_call(epoch, record, &object->epoch_entry, destructor);
/*
* Wait until no threads could possibly have a reference to the
* object we just deleted.
*/
ck_epoch_synchronize(epoch, record);
/*
* Execute all deferred callbacks.
*/
ck_epoch_reclaim(record);
return;
}
.Ed
.Sh RETURN VALUES
This function has no return value.
.Sh SEE ALSO
.Xr ck_epoch_init 3 ,
.Xr ck_epoch_register 3 ,
.Xr ck_epoch_unregister 3 ,
.Xr ck_epoch_recycle 3 ,
.Xr ck_epoch_poll 3 ,
.Xr ck_epoch_reclaim 3 ,
.Xr ck_epoch_barrier 3 ,
.Xr ck_epoch_call 3 ,
.Xr ck_epoch_begin 3 ,
.Xr ck_epoch_end 3
.Pp
Additional information available at http://concurrencykit.org/

@ -93,6 +93,7 @@ is not a valid epoch object.
.Xr ck_epoch_unregister 3 , .Xr ck_epoch_unregister 3 ,
.Xr ck_epoch_poll 3 , .Xr ck_epoch_poll 3 ,
.Xr ck_epoch_synchronize 3 , .Xr ck_epoch_synchronize 3 ,
.Xr ck_epoch_reclaim 3 ,
.Xr ck_epoch_barrier 3 , .Xr ck_epoch_barrier 3 ,
.Xr ck_epoch_call 3 , .Xr ck_epoch_call 3 ,
.Xr ck_epoch_begin 3 , .Xr ck_epoch_begin 3 ,

@ -58,6 +58,7 @@ This function has no return value.
.Xr ck_epoch_recycle 3 , .Xr ck_epoch_recycle 3 ,
.Xr ck_epoch_poll 3 , .Xr ck_epoch_poll 3 ,
.Xr ck_epoch_synchronize 3 , .Xr ck_epoch_synchronize 3 ,
.Xr ck_epoch_reclaim 3 ,
.Xr ck_epoch_barrier 3 , .Xr ck_epoch_barrier 3 ,
.Xr ck_epoch_call 3 , .Xr ck_epoch_call 3 ,
.Xr ck_epoch_begin 3 , .Xr ck_epoch_begin 3 ,

@ -42,15 +42,14 @@ function will block the caller until a grace period has been
detected, according to the semantics of epoch reclamation. detected, according to the semantics of epoch reclamation.
Any objects requiring safe memory reclamation which are logically Any objects requiring safe memory reclamation which are logically
deleted are safe for physical deletion following a call to deleted are safe for physical deletion following a call to
.Fn ck_epoch_synchronize 3 . This function may also dispatch callbacks .Fn ck_epoch_synchronize 3 .
associated with
.Fa epoch
that were previously scheduled via
.Fn ck_epoch_call 3 .
If you require that all callbacks be dispatched, then it is suggested If you require that all callbacks be dispatched, then it is suggested
that you use that you use
.Fn ck_epoch_barrier 3 .Fn ck_epoch_barrier 3
instead. instead or follow a call of
.Fn ck_epoch_synchronize 3
with
.Fn ck_epoch_reclaim 3 .
.Sh EXAMPLE .Sh EXAMPLE
.Bd -literal -offset indent .Bd -literal -offset indent
@ -115,6 +114,7 @@ must have been previously registered via
.Xr ck_epoch_unregister 3 , .Xr ck_epoch_unregister 3 ,
.Xr ck_epoch_recycle 3 , .Xr ck_epoch_recycle 3 ,
.Xr ck_epoch_poll 3 , .Xr ck_epoch_poll 3 ,
.Xr ck_epoch_reclaim 3 ,
.Xr ck_epoch_barrier 3 , .Xr ck_epoch_barrier 3 ,
.Xr ck_epoch_call 3 , .Xr ck_epoch_call 3 ,
.Xr ck_epoch_begin 3 , .Xr ck_epoch_begin 3 ,

@ -64,6 +64,7 @@ function.
.Xr ck_epoch_recycle 3 , .Xr ck_epoch_recycle 3 ,
.Xr ck_epoch_poll 3 , .Xr ck_epoch_poll 3 ,
.Xr ck_epoch_synchronize 3 , .Xr ck_epoch_synchronize 3 ,
.Xr ck_epoch_reclaim 3 ,
.Xr ck_epoch_barrier 3 , .Xr ck_epoch_barrier 3 ,
.Xr ck_epoch_call 3 , .Xr ck_epoch_call 3 ,
.Xr ck_epoch_begin 3 , .Xr ck_epoch_begin 3 ,

@ -151,5 +151,6 @@ void ck_epoch_unregister(ck_epoch_t *, ck_epoch_record_t *);
bool ck_epoch_poll(ck_epoch_t *, ck_epoch_record_t *); bool ck_epoch_poll(ck_epoch_t *, ck_epoch_record_t *);
void ck_epoch_synchronize(ck_epoch_t *, ck_epoch_record_t *); void ck_epoch_synchronize(ck_epoch_t *, ck_epoch_record_t *);
void ck_epoch_barrier(ck_epoch_t *, ck_epoch_record_t *); void ck_epoch_barrier(ck_epoch_t *, ck_epoch_record_t *);
void ck_epoch_reclaim(ck_epoch_record_t *);
#endif /* _CK_EPOCH_H */ #endif /* _CK_EPOCH_H */

@ -197,7 +197,7 @@ write_thread(void *unused CK_CC_UNUSED)
} }
} }
ck_epoch_synchronize(&stack_epoch, &record); ck_epoch_barrier(&stack_epoch, &record);
if (tid == 0) { if (tid == 0) {
fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] Peak: %u (%2.2f%%)\n Reclamations: %lu\n\n", fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] Peak: %u (%2.2f%%)\n Reclamations: %lu\n\n",

@ -192,7 +192,12 @@ write_thread(void *unused CK_CC_UNUSED)
e = stack_container(s); e = stack_container(s);
ck_epoch_end(&stack_epoch, &record); ck_epoch_end(&stack_epoch, &record);
ck_epoch_synchronize(&stack_epoch, &record); if (i & 1) {
ck_epoch_synchronize(&stack_epoch, &record);
ck_epoch_reclaim(&record);
} else {
ck_epoch_barrier(&stack_epoch, &record);
}
if (i & 1) { if (i & 1) {
ck_epoch_call(&stack_epoch, &record, &e->epoch_entry, destructor); ck_epoch_call(&stack_epoch, &record, &e->epoch_entry, destructor);

@ -124,7 +124,7 @@ thread(void *unused CK_CC_UNUSED)
record.n_pending, record.n_pending,
record.n_dispatch); record.n_dispatch);
ck_epoch_synchronize(&stack_epoch, &record); ck_epoch_barrier(&stack_epoch, &record);
ck_pr_inc_uint(&e_barrier); ck_pr_inc_uint(&e_barrier);
while (ck_pr_load_uint(&e_barrier) < (n_threads << 1)); while (ck_pr_load_uint(&e_barrier) < (n_threads << 1));

@ -275,11 +275,25 @@ ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e)
return; return;
} }
/*
* Reclaim all objects associated with a record.
*/
void
ck_epoch_reclaim(struct ck_epoch_record *record)
{
unsigned int epoch;
for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
ck_epoch_dispatch(record, epoch);
return;
}
/* /*
* This function must not be called with-in read section. * This function must not be called with-in read section.
*/ */
void void
ck_epoch_barrier(struct ck_epoch *global, struct ck_epoch_record *record) ck_epoch_synchronize(struct ck_epoch *global, struct ck_epoch_record *record)
{ {
struct ck_epoch_record *cr; struct ck_epoch_record *cr;
unsigned int delta, epoch, goal, i; unsigned int delta, epoch, goal, i;
@ -322,7 +336,7 @@ ck_epoch_barrier(struct ck_epoch *global, struct ck_epoch_record *record)
* we are at a grace period. * we are at a grace period.
*/ */
if (active == false) if (active == false)
goto dispatch; goto leave;
/* /*
* Increment current epoch. CAS semantics are used to eliminate * Increment current epoch. CAS semantics are used to eliminate
@ -349,7 +363,7 @@ reload:
* generation. We can actually avoid an addtional scan step * generation. We can actually avoid an addtional scan step
* at this point. * at this point.
*/ */
goto dispatch; goto leave;
} }
} }
@ -376,26 +390,17 @@ reload:
break; break;
} }
/* leave:
* As the synchronize operation is non-blocking, it is possible other
* writers have already observed three or more epoch generations
* relative to the generation the caller has observed. In this case,
* it is safe to assume we are also in a grace period and are able to
* dispatch all calls across all lists.
*/
dispatch:
for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
ck_epoch_dispatch(record, epoch);
record->epoch = delta; record->epoch = delta;
return; return;
} }
void void
ck_epoch_synchronize(struct ck_epoch *global, struct ck_epoch_record *record) ck_epoch_barrier(struct ck_epoch *global, struct ck_epoch_record *record)
{ {
ck_epoch_barrier(global, record); ck_epoch_synchronize(global, record);
ck_epoch_reclaim(record);
return; return;
} }

Loading…
Cancel
Save