@ -55,12 +55,16 @@ enum {
struct ck_epoch ;
struct ck_epoch_record {
unsigned int active ;
unsigned int status ;
unsigned int epoch ;
ck_stack_t pending [ CK_EPOCH_LENGTH ] ;
unsigned int n_pending ;
unsigned int status ;
unsigned int delta ;
unsigned int n_peak ;
uint64_t n_reclamations ;
struct ck_epoch * global ;
ck_stack_entry_t record_next ;
} CK_CC_CACHELINE ;
typedef struct ck_epoch_record ck_epoch_record_t ;
@ -125,6 +129,9 @@ ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record)
record - > active = 0 ;
record - > epoch = 0 ;
record - > delta = 0 ;
record - > n_pending = 0 ;
record - > n_peak = 0 ;
record - > n_reclamations = 0 ;
record - > global = global ;
for ( i = 0 ; i < CK_EPOCH_LENGTH ; i + + )
@ -146,7 +153,7 @@ ck_epoch_unregister(struct ck_epoch_record *record)
}
CK_CC_INLINE static void
ck_epoch_ update ( struct ck_epoch * global , struct ck_epoch_record * record )
ck_epoch_ tick ( struct ck_epoch * global , struct ck_epoch_record * record )
{
struct ck_epoch_record * c_record ;
ck_stack_entry_t * cursor ;
@ -167,35 +174,16 @@ ck_epoch_update(struct ck_epoch *global, struct ck_epoch_record *record)
return ;
}
CK_CC_INLINE static void
ck_epoch_activate ( struct ck_epoch_record * record )
{
ck_pr_store_uint ( & record - > active , 1 ) ;
ck_pr_fence_store ( ) ;
return ;
}
CK_CC_INLINE static void
ck_epoch_deactivate ( struct ck_epoch_record * record )
{
ck_pr_fence_store ( ) ;
ck_pr_store_uint ( & record - > active , 0 ) ;
return ;
}
CK_CC_INLINE static void
ck_epoch_start ( struct ck_epoch_record * record )
CK_CC_INLINE static bool
ck_epoch_reclaim ( struct ck_epoch_record * record )
{
struct ck_epoch * global = record - > global ;
unsigned int g_epoch ;
for ( ; ; ) {
g_epoch = ck_pr_load_uint ( & global - > epoch ) ;
if ( record - > epoch ! = g_epoch ) {
unsigned int g_epoch = ck_pr_load_uint ( & global - > epoch ) ;
unsigned int epoch = record - > epoch ;
ck_stack_entry_t * next , * cursor ;
unsigned int epoch = record - > epoch & ( CK_EPOCH_LENGTH - 1 ) ;
if ( epoch = = g_epoch )
return false ;
/*
* This means all threads with a potential reference to a
@ -203,19 +191,34 @@ ck_epoch_start(struct ck_epoch_record *record)
* the calling thread . No active reference should exist to
* any object in the record ' s pending list .
*/
CK_STACK_FOREACH_SAFE ( & record - > pending [ epoch ] , cursor , next )
CK_STACK_FOREACH_SAFE ( & record - > pending [ epoch ] , cursor , next ) {
global - > destroy ( cursor ) ;
record - > n_pending - - ;
record - > n_reclamations + + ;
}
ck_stack_init ( & record - > pending [ epoch ] ) ;
ck_pr_store_uint ( & record - > epoch , g_epoch ) ;
record - > epoch = g_epoch & ( CK_EPOCH_LENGTH - 1 ) ;
record - > delta = 0 ;
return true ;
}
CK_CC_INLINE static void
ck_epoch_write_begin ( struct ck_epoch_record * record )
{
struct ck_epoch * global = record - > global ;
ck_pr_store_uint ( & record - > active , 1 ) ;
ck_pr_fence_store ( ) ;
for ( ; ; ) {
if ( ck_epoch_reclaim ( record ) = = true )
break ;
}
if ( + + record - > delta > = global - > threshold ) {
record - > delta = 0 ;
ck_epoch_update ( global , record ) ;
ck_epoch_ tick ( global , record ) ;
continue ;
}
@ -226,45 +229,53 @@ ck_epoch_start(struct ck_epoch_record *record)
}
CK_CC_INLINE static void
ck_epoch_ stop ( struct ck_epoch_record * record CK_CC_UNUSED )
ck_epoch_ read_begin ( struct ck_epoch_record * record )
{
ck_pr_store_uint ( & record - > active , 1 ) ;
ck_pr_fence_store ( ) ;
return ;
}
CK_CC_INLINE static void
ck_epoch_ b egi n( struct ck_epoch_record * record )
ck_epoch_ end ( struct ck_epoch_record * record )
{
ck_ epoch_activate( record ) ;
ck_ epoch_start( record ) ;
ck_ pr_fence_store( ) ;
ck_ pr_store_uint( & record - > active , 0 ) ;
return ;
}
CK_CC_INLINE static void
ck_epoch_ end ( struct ck_epoch_record * record )
ck_epoch_ fre e( struct ck_epoch_record * record , ck_stack_entry_t * entry )
{
struct ck_epoch * global = record - > global ;
unsigned int epoch = record - > epoch ;
ck_epoch_deactivate ( record ) ;
return ;
}
ck_stack_push_spnc ( & record - > pending [ epoch ] , entry ) ;
record - > n_pending + = 1 ;
CK_CC_INLINE static void
ck_epoch_flush ( struct ck_epoch_record * record )
{
if ( record - > n_pending > record - > n_peak )
record - > n_peak = record - > n_pending ;
if ( record - > n_pending > = global - > threshold & & ck_epoch_reclaim ( record ) = = false )
ck_epoch_tick ( global , record ) ;
ck_epoch_update ( record - > global , record ) ;
ck_epoch_start ( record ) ;
return ;
}
CK_CC_INLINE static void
ck_epoch_ fre e( struct ck_epoch_record * record , ck_stack_entry_t * entry )
ck_epoch_ purg e( struct ck_epoch_record * record )
{
unsigned int epoch = ck_pr_load_uint ( & record - > epoch ) & ( CK_EPOCH_LENGTH - 1 ) ;
ck_backoff_t backoff = CK_BACKOFF_INITIALIZER ;
while ( record - > n_pending > 0 ) {
if ( ck_epoch_reclaim ( record ) = = false )
ck_epoch_tick ( record - > global , record ) ;
else if ( record - > n_pending > 0 )
ck_backoff_gb ( & backoff ) ;
}
ck_stack_push_spnc ( & record - > pending [ epoch ] , entry ) ;
record - > delta + + ;
return ;
}