diff --git a/doc/Makefile.in b/doc/Makefile.in index 2fe19ea..32b34cc 100644 --- a/doc/Makefile.in +++ b/doc/Makefile.in @@ -50,6 +50,7 @@ OBJECTS=ck_ht_count \ ck_epoch_poll \ ck_epoch_recycle \ ck_epoch_register \ + ck_epoch_reclaim \ ck_epoch_synchronize \ ck_epoch_unregister \ ck_bag_allocator_set \ diff --git a/doc/ck_epoch_barrier b/doc/ck_epoch_barrier index e4b63ac..80cc70e 100644 --- a/doc/ck_epoch_barrier +++ b/doc/ck_epoch_barrier @@ -112,6 +112,7 @@ must have been previously registered via .Xr ck_epoch_recycle 3 , .Xr ck_epoch_poll 3 , .Xr ck_epoch_synchronize 3 , +.Xr ck_epoch_reclaim 3 , .Xr ck_epoch_call 3 , .Xr ck_epoch_begin 3 , .Xr ck_epoch_end 3 diff --git a/doc/ck_epoch_begin b/doc/ck_epoch_begin index 44ed804..6a71860 100644 --- a/doc/ck_epoch_begin +++ b/doc/ck_epoch_begin @@ -63,6 +63,7 @@ must have been previously registered via .Xr ck_epoch_recycle 3 , .Xr ck_epoch_poll 3 , .Xr ck_epoch_synchronize 3 , +.Xr ck_epoch_reclaim 3 , .Xr ck_epoch_barrier 3 , .Xr ck_epoch_call 3 , .Xr ck_epoch_end 3 diff --git a/doc/ck_epoch_call b/doc/ck_epoch_call index 2736488..e3aeefb 100644 --- a/doc/ck_epoch_call +++ b/doc/ck_epoch_call @@ -48,7 +48,7 @@ The function will be provided the pointer specified by .Fa entry . The function will execute at some time in the future via calls to -.Fn ck_epoch_synchronize 3 , +.Fn ck_epoch_reclaim 3 , .Fn ck_epoch_barrier 3 or .Fn ck_epoch_poll 3 . @@ -127,6 +127,7 @@ must have been previously registered via .Xr ck_epoch_recycle 3 , .Xr ck_epoch_poll 3 , .Xr ck_epoch_synchronize 3 , +.Xr ck_epoch_reclaim 3 , .Xr ck_epoch_barrier 3 , .Xr ck_epoch_begin 3 , .Xr ck_epoch_end 3 diff --git a/doc/ck_epoch_end b/doc/ck_epoch_end index 69cb1eb..eea2dad 100644 --- a/doc/ck_epoch_end +++ b/doc/ck_epoch_end @@ -57,6 +57,7 @@ must have been previously registered via .Xr ck_epoch_recycle 3 , .Xr ck_epoch_poll 3 , .Xr ck_epoch_synchronize 3 , +.Xr ck_epoch_reclaim 3 , .Xr ck_epoch_barrier 3 , .Xr ck_epoch_call 3 , .Xr ck_epoch_begin 3 diff --git a/doc/ck_epoch_init b/doc/ck_epoch_init index 1d39292..e22294d 100644 --- a/doc/ck_epoch_init +++ b/doc/ck_epoch_init @@ -60,6 +60,7 @@ object. .Xr ck_epoch_recycle 3 , .Xr ck_epoch_poll 3 , .Xr ck_epoch_synchronize 3 , +.Xr ck_epoch_reclaim 3 , .Xr ck_epoch_barrier 3 , .Xr ck_epoch_call 3 , .Xr ck_epoch_begin 3 , diff --git a/doc/ck_epoch_poll b/doc/ck_epoch_poll index 4dd57c3..b80c42e 100644 --- a/doc/ck_epoch_poll +++ b/doc/ck_epoch_poll @@ -64,6 +64,7 @@ must have been previously registered via .Xr ck_epoch_unregister 3 , .Xr ck_epoch_recycle 3 , .Xr ck_epoch_synchronize 3 , +.Xr ck_epoch_reclaim 3 , .Xr ck_epoch_barrier 3 , .Xr ck_epoch_call 3 , .Xr ck_epoch_begin 3 , diff --git a/doc/ck_epoch_reclaim b/doc/ck_epoch_reclaim new file mode 100644 index 0000000..ffe3bac --- /dev/null +++ b/doc/ck_epoch_reclaim @@ -0,0 +1,92 @@ +.\" +.\" Copyright 2013 Samy Al Bahra. +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" +.Dd May 2, 2013 +.Dt CK_EPOCH_RECLAIM 3 +.Sh NAME +.Nm ck_epoch_reclaim +.Nd immediately execute all deferred callbacks +.Sh LIBRARY +Concurrency Kit (libck, \-lck) +.Sh SYNOPSIS +.In ck_epoch.h +.Ft void +.Fn ck_epoch_reclaim "ck_epoch_record_t *record" +.Sh DESCRIPTION +The +.Fn ck_epoch_reclaim 3 +function will unconditionally execute all callbacks +that have been deferred with +.Fn ck_epoch_call 3 . +.Sh EXAMPLE +.Bd -literal -offset indent + +#include +#include +#include + +/* + * epoch was previously initialized with ck_epoch_init. + */ +ck_epoch_t *epoch; + +void +function(void) +{ + ck_epoch_record_t *record; + + logically_delete(object); + ck_epoch_call(epoch, record, &object->epoch_entry, destructor); + + /* + * Wait until no threads could possibly have a reference to the + * object we just deleted. + */ + ck_epoch_synchronize(epoch, record); + + /* + * Execute all deferred callbacks. + */ + ck_epoch_reclaim(record); + + return; +} +.Ed +.Sh RETURN VALUES +This function has no return value. +.Sh SEE ALSO +.Xr ck_epoch_init 3 , +.Xr ck_epoch_register 3 , +.Xr ck_epoch_unregister 3 , +.Xr ck_epoch_recycle 3 , +.Xr ck_epoch_poll 3 , +.Xr ck_epoch_reclaim 3 , +.Xr ck_epoch_barrier 3 , +.Xr ck_epoch_call 3 , +.Xr ck_epoch_begin 3 , +.Xr ck_epoch_end 3 +.Pp +Additional information available at http://concurrencykit.org/ diff --git a/doc/ck_epoch_recycle b/doc/ck_epoch_recycle index b7fce30..530079c 100644 --- a/doc/ck_epoch_recycle +++ b/doc/ck_epoch_recycle @@ -93,6 +93,7 @@ is not a valid epoch object. .Xr ck_epoch_unregister 3 , .Xr ck_epoch_poll 3 , .Xr ck_epoch_synchronize 3 , +.Xr ck_epoch_reclaim 3 , .Xr ck_epoch_barrier 3 , .Xr ck_epoch_call 3 , .Xr ck_epoch_begin 3 , diff --git a/doc/ck_epoch_register b/doc/ck_epoch_register index 268e874..85ea461 100644 --- a/doc/ck_epoch_register +++ b/doc/ck_epoch_register @@ -58,6 +58,7 @@ This function has no return value. .Xr ck_epoch_recycle 3 , .Xr ck_epoch_poll 3 , .Xr ck_epoch_synchronize 3 , +.Xr ck_epoch_reclaim 3 , .Xr ck_epoch_barrier 3 , .Xr ck_epoch_call 3 , .Xr ck_epoch_begin 3 , diff --git a/doc/ck_epoch_synchronize b/doc/ck_epoch_synchronize index e1b8ab3..33df67c 100644 --- a/doc/ck_epoch_synchronize +++ b/doc/ck_epoch_synchronize @@ -42,15 +42,14 @@ function will block the caller until a grace period has been detected, according to the semantics of epoch reclamation. Any objects requiring safe memory reclamation which are logically deleted are safe for physical deletion following a call to -.Fn ck_epoch_synchronize 3 . This function may also dispatch callbacks -associated with -.Fa epoch -that were previously scheduled via -.Fn ck_epoch_call 3 . +.Fn ck_epoch_synchronize 3 . If you require that all callbacks be dispatched, then it is suggested that you use .Fn ck_epoch_barrier 3 -instead. +instead or follow a call of +.Fn ck_epoch_synchronize 3 +with +.Fn ck_epoch_reclaim 3 . .Sh EXAMPLE .Bd -literal -offset indent @@ -115,6 +114,7 @@ must have been previously registered via .Xr ck_epoch_unregister 3 , .Xr ck_epoch_recycle 3 , .Xr ck_epoch_poll 3 , +.Xr ck_epoch_reclaim 3 , .Xr ck_epoch_barrier 3 , .Xr ck_epoch_call 3 , .Xr ck_epoch_begin 3 , diff --git a/doc/ck_epoch_unregister b/doc/ck_epoch_unregister index 7976571..9f297ee 100644 --- a/doc/ck_epoch_unregister +++ b/doc/ck_epoch_unregister @@ -64,6 +64,7 @@ function. .Xr ck_epoch_recycle 3 , .Xr ck_epoch_poll 3 , .Xr ck_epoch_synchronize 3 , +.Xr ck_epoch_reclaim 3 , .Xr ck_epoch_barrier 3 , .Xr ck_epoch_call 3 , .Xr ck_epoch_begin 3 , diff --git a/include/ck_epoch.h b/include/ck_epoch.h index 82d35cb..4624bdf 100644 --- a/include/ck_epoch.h +++ b/include/ck_epoch.h @@ -151,5 +151,6 @@ void ck_epoch_unregister(ck_epoch_t *, ck_epoch_record_t *); bool ck_epoch_poll(ck_epoch_t *, ck_epoch_record_t *); void ck_epoch_synchronize(ck_epoch_t *, ck_epoch_record_t *); void ck_epoch_barrier(ck_epoch_t *, ck_epoch_record_t *); +void ck_epoch_reclaim(ck_epoch_record_t *); #endif /* _CK_EPOCH_H */ diff --git a/regressions/ck_epoch/validate/ck_epoch_poll.c b/regressions/ck_epoch/validate/ck_epoch_poll.c index 8be273a..a9fb202 100644 --- a/regressions/ck_epoch/validate/ck_epoch_poll.c +++ b/regressions/ck_epoch/validate/ck_epoch_poll.c @@ -197,7 +197,7 @@ write_thread(void *unused CK_CC_UNUSED) } } - ck_epoch_synchronize(&stack_epoch, &record); + ck_epoch_barrier(&stack_epoch, &record); if (tid == 0) { fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] Peak: %u (%2.2f%%)\n Reclamations: %lu\n\n", diff --git a/regressions/ck_epoch/validate/ck_epoch_synchronize.c b/regressions/ck_epoch/validate/ck_epoch_synchronize.c index 960c99e..f5d68ab 100644 --- a/regressions/ck_epoch/validate/ck_epoch_synchronize.c +++ b/regressions/ck_epoch/validate/ck_epoch_synchronize.c @@ -192,7 +192,12 @@ write_thread(void *unused CK_CC_UNUSED) e = stack_container(s); ck_epoch_end(&stack_epoch, &record); - ck_epoch_synchronize(&stack_epoch, &record); + if (i & 1) { + ck_epoch_synchronize(&stack_epoch, &record); + ck_epoch_reclaim(&record); + } else { + ck_epoch_barrier(&stack_epoch, &record); + } if (i & 1) { ck_epoch_call(&stack_epoch, &record, &e->epoch_entry, destructor); diff --git a/regressions/ck_epoch/validate/ck_stack.c b/regressions/ck_epoch/validate/ck_stack.c index 55f74a1..204b3fc 100644 --- a/regressions/ck_epoch/validate/ck_stack.c +++ b/regressions/ck_epoch/validate/ck_stack.c @@ -124,7 +124,7 @@ thread(void *unused CK_CC_UNUSED) record.n_pending, record.n_dispatch); - ck_epoch_synchronize(&stack_epoch, &record); + ck_epoch_barrier(&stack_epoch, &record); ck_pr_inc_uint(&e_barrier); while (ck_pr_load_uint(&e_barrier) < (n_threads << 1)); diff --git a/src/ck_epoch.c b/src/ck_epoch.c index af23c93..1904748 100644 --- a/src/ck_epoch.c +++ b/src/ck_epoch.c @@ -275,11 +275,25 @@ ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e) return; } +/* + * Reclaim all objects associated with a record. + */ +void +ck_epoch_reclaim(struct ck_epoch_record *record) +{ + unsigned int epoch; + + for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++) + ck_epoch_dispatch(record, epoch); + + return; +} + /* * This function must not be called with-in read section. */ void -ck_epoch_barrier(struct ck_epoch *global, struct ck_epoch_record *record) +ck_epoch_synchronize(struct ck_epoch *global, struct ck_epoch_record *record) { struct ck_epoch_record *cr; unsigned int delta, epoch, goal, i; @@ -322,7 +336,7 @@ ck_epoch_barrier(struct ck_epoch *global, struct ck_epoch_record *record) * we are at a grace period. */ if (active == false) - goto dispatch; + goto leave; /* * Increment current epoch. CAS semantics are used to eliminate @@ -349,7 +363,7 @@ reload: * generation. We can actually avoid an addtional scan step * at this point. */ - goto dispatch; + goto leave; } } @@ -376,26 +390,17 @@ reload: break; } - /* - * As the synchronize operation is non-blocking, it is possible other - * writers have already observed three or more epoch generations - * relative to the generation the caller has observed. In this case, - * it is safe to assume we are also in a grace period and are able to - * dispatch all calls across all lists. - */ -dispatch: - for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++) - ck_epoch_dispatch(record, epoch); - +leave: record->epoch = delta; return; } void -ck_epoch_synchronize(struct ck_epoch *global, struct ck_epoch_record *record) +ck_epoch_barrier(struct ck_epoch *global, struct ck_epoch_record *record) { - ck_epoch_barrier(global, record); + ck_epoch_synchronize(global, record); + ck_epoch_reclaim(record); return; }