diff --git a/doc/ck_elide b/doc/ck_elide index e332d25..57c6552 100644 --- a/doc/ck_elide +++ b/doc/ck_elide @@ -65,7 +65,7 @@ concurrent execution of critical sections that do not issue conflicting memory operations. If any threads have successfully elided a lock acquisition, conflicting memory operations will roll-back any side-effects of the critical section and force every thread to retry the lock acquisition regularly. - +.Pp .Fn CK_ELIDE_LOCK , .Fn CK_ELIDE_UNLOCK , .Fn CK_ELIDE_LOCK_ADAPTIVE , @@ -103,7 +103,7 @@ and .Fn CK_ELIDE_UNLOCK_ADAPTIVE will immediately call .Fn UNLOCK_FUNCTION . - +.Pp .Fn CK_ELIDE_TRYLOCK requires a previous .Fn CK_ELIDE_TRYLOCK_PROTOTYPE @@ -118,7 +118,7 @@ operation is aborted. If RTM is unsupported .Fn CK_ELIDE_TRYLOCK will immediately call .Fn TRYLOCK_FUNCTION . - +.Pp .Fn CK_ELIDE_LOCK_ADAPTIVE and .Fn CK_ELIDE_UNLOCK_ADAPTIVE @@ -132,10 +132,105 @@ and .Fn CK_ELIDE_UNLOCK_ADAPTIVE . This structure is expected to be unique for different workloads, may not be re-used in recursive acquisitions and must match the -lifetime of the lock it is associated with. - +lifetime of the lock it is associated with. It is safe to mix +adaptive calls with best-effort calls. +.Pp Both ck_spinlock.h and ck_rwlock.h define ck_elide wrappers under the ck_spinlock and ck_rwlock namespace, respectively. +.Sh EXAMPLES +This example utilizes built-in lock elision facilities in ck_rwlock and ck_spinlock. +.Bd -literal -offset indent +#include +#include + +static ck_rwlock_t rw = CK_RWLOCK_INITIALIZER; +static struct ck_elide_config rw_config = + CK_ELIDE_CONFIG_DEFAULT_INITIALIZER; +static __thread ck_elide_stat_t rw_stat = + CK_ELIDE_STAT_INITIALIZER; + +static ck_spinlock_t spinlock = CK_SPINLOCK_INITIALIZER; +static struct ck_elide_config spinlock_config = + CK_ELIDE_CONFIG_DEFAULT_INITIALIZER; +static __thread ck_elide_stat_t spinlock_stat = + CK_ELIDE_STAT_INITIALIZER; + +void +function(void) +{ + + /* Lock-unlock write-side lock in weak best-effort manner. */ + CK_ELIDE_LOCK(ck_rwlock_write, &rw); + CK_ELIDE_UNLOCK(ck_rwlock_write, &rw); + + /* Lock-unlock read-side lock in weak best-effort manner. */ + CK_ELIDE_LOCK(ck_rwlock_read, &rw); + CK_ELIDE_UNLOCK(ck_rwlock_read, &rw); + + /* Lock-unlock write-side lock in an adaptive manner. */ + CK_ELIDE_LOCK_ADAPTIVE(ck_rwlock_write, &rw_stat, + &rw_config, &rw); + CK_ELIDE_UNLOCK_ADAPTIVE(ck_rwlock_write, &rw_stat, + &rw_config, &rw); + + /* Lock-unlock read-side lock in an adaptive manner. */ + CK_ELIDE_LOCK_ADAPTIVE(ck_rwlock_read, &rw_stat, + &rw_config, &rw); + CK_ELIDE_UNLOCK_ADAPTIVE(ck_rwlock_read, &rw_stat, + &rw_config, &rw); + + /* Lock-unlock spinlock in weak best-effort manner. */ + CK_ELIDE_LOCK(ck_spinlock, &spinlock); + CK_ELIDE_UNLOCK(ck_spinlock, &spinlock); + + /* Attempt to acquire the lock. */ + if (CK_ELIDE_TRYLOCK(ck_spinlock, &lock) == true) + CK_ELIDE_UNLOCK(ck_spinlock, &spinlock); + + /* Lock-unlock spinlock in an adaptive manner. */ + CK_ELIDE_LOCK_ADAPTIVE(ck_spinlock, &spinlock_stat, + &spinlock_config, &spinlock); + CK_ELIDE_UNLOCK_ADAPTIVE(ck_spinlock, &spinlock_stat, + &spinlock_config, &spinlock); +} +.Ed +.Pp +In this example, user-defined locking functions are provided an elision +implementation. +.Bd -literal -offset indent +/* Assume lock_t has been previously defined. */ +#include + +/* + * This function returns true if the lock is unavailable at the time + * it was called or false if the lock is available. + */ +bool is_locked(lock_t *) + +/* + * This function acquires the supplied lock. + */ +void lock(lock_t *); + +/* + * This function releases the lock. + */ +void unlock(lock_t *); + +CK_ELIDE_PROTOTYPE(my_lock, lock_t, is_locked, lock, is_locked, unlock) + +static lock_t lock; + +void +function(void) +{ + + CK_ELIDE_LOCK(my_lock, &lock); + CK_ELIDE_UNLOCK(my_lock, &lock); +} +.Ed .Sh SEE ALSO +.Xr ck_rwlock 3 , +.Xr ck_spinlock 3 +.Pp Additional information available at http://en.wikipedia.org/wiki/Transactional_Synchronization_Extensions and http://concurrencykit.org/ -