From 5e1b6141084624ce882152ad31944672018c2e9a Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Sat, 9 Mar 2013 19:24:22 -0600 Subject: [PATCH 01/20] ck_cohort_rw: Initial implementation with validation test. I still need to implement benchmark tests and write documentation. The reader-writer cohort locks also required that I add a method to the existing ck_cohort framework to determine whether or not a cohort lock is currently in a locked state. --- include/ck_cohort.h | 17 +- include/ck_cohort_rw.h | 142 +++++++++++++ regressions/ck_cohort_rw/ck_cohort.h | 25 +++ regressions/ck_cohort_rw/validate/Makefile | 17 ++ regressions/ck_cohort_rw/validate/validate | Bin 0 -> 30672 bytes regressions/ck_cohort_rw/validate/validate.c | 208 +++++++++++++++++++ 6 files changed, 405 insertions(+), 4 deletions(-) create mode 100644 include/ck_cohort_rw.h create mode 100644 regressions/ck_cohort_rw/ck_cohort.h create mode 100644 regressions/ck_cohort_rw/validate/Makefile create mode 100755 regressions/ck_cohort_rw/validate/validate create mode 100644 regressions/ck_cohort_rw/validate/validate.c diff --git a/include/ck_cohort.h b/include/ck_cohort.h index f82a251..9b14fb4 100644 --- a/include/ck_cohort.h +++ b/include/ck_cohort.h @@ -51,8 +51,9 @@ enum ck_cohort_state { #define CK_COHORT_LOCK(N, C, GC, LC) ck_cohort_##N##_lock(C, GC, LC) #define CK_COHORT_UNLOCK(N, C, GC, LC) ck_cohort_##N##_unlock(C, GC, LC) #define CK_COHORT_TRYLOCK(N, C, GLC, LLC, LUC) ck_cohort_##N##_trylock(C, GLC, LLC, LUC) +#define CK_COHORT_LOCKED(N, C, GC, LC) ck_cohort_##N##_locked(C, GC, LC) -#define CK_COHORT_PROTOTYPE(N, GL, GU, LL, LU) \ +#define CK_COHORT_PROTOTYPE(N, GL, GU, GI, LL, LU, LI) \ CK_COHORT_INSTANCE(N) { \ void *global_lock; \ void *local_lock; \ @@ -111,10 +112,18 @@ enum ck_cohort_state { LU(cohort->local_lock, local_context); \ \ return; \ + } \ + \ + CK_CC_INLINE static bool \ + ck_cohort_##N##_locked(CK_COHORT_INSTANCE(N) *cohort, \ + void *global_context, void *local_context) \ + { \ + return GI(cohort->local_lock, local_context) || \ + LI(cohort->global_lock, global_context); \ } -#define CK_COHORT_TRYLOCK_PROTOTYPE(N, GL, GU, GTL, LL, LU, LTL) \ - CK_COHORT_PROTOTYPE(N, GL, GU, LL, LU) \ +#define CK_COHORT_TRYLOCK_PROTOTYPE(N, GL, GU, GI, GTL, LL, LU, LI, LTL) \ + CK_COHORT_PROTOTYPE(N, GL, GU, GI, LL, LU, LI) \ CK_CC_INLINE static bool \ ck_cohort_##N##_trylock(CK_COHORT_INSTANCE(N) *cohort, \ void *global_context, void *local_context, \ @@ -132,7 +141,7 @@ enum ck_cohort_state { \ if (cohort->release_state == CK_COHORT_STATE_GLOBAL && \ GTL(cohort->global_lock, global_context) == false) { \ - LU(cohort->local_lock, local_unlock_context); \ + LU(cohort->local_lock, local_unlock_context); \ return false; \ } \ \ diff --git a/include/ck_cohort_rw.h b/include/ck_cohort_rw.h new file mode 100644 index 0000000..471377c --- /dev/null +++ b/include/ck_cohort_rw.h @@ -0,0 +1,142 @@ +/* + * Copyright 2013 Samy Al Bahra. + * Copyright 2013 Brendon Scheinman. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _CK_COHORT_RW_H +#define _CK_COHORT_RW_H + +/* + * This is an implementation of NUMA-aware reader-writer locks as described in: + * Calciu, I.; Dice, D.; Lev, Y.; Luchangco, V.; Marathe, V.; and Shavit, N. 2013. + * NUMA-Aware Reader-Writer Locks + */ + +#include +#include +#include +#include + +#define CK_COHORT_RW_NAME(N) ck_cohort_rw_##N +#define CK_COHORT_RW_INSTANCE(N) struct CK_COHORT_RW_NAME(N) +#define CK_COHORT_RW_INIT(N, RW, WL) ck_cohort_rw_##N##_init(RW, WL) +#define CK_COHORT_RW_READ_LOCK(N, RW, C, GC, LC) ck_cohort_rw_##N##_read_lock(RW, C, GC, LC) +#define CK_COHORT_RW_READ_UNLOCK(N, RW) ck_cohort_rw_##N##_read_unlock(RW) +#define CK_COHORT_RW_WRITE_LOCK(N, RW, C, GC, LC) ck_cohort_rw_##N##_write_lock(RW, C, GC, LC) +#define CK_COHORT_RW_WRITE_UNLOCK(N, RW, C, GC, LC) ck_cohort_rw_##N##_write_unlock(RW, C, GC, LC) +#define CK_COHORT_RW_DEFAULT_WAIT_LIMIT 1000 + +#define CK_COHORT_RW_PROTOTYPE(N) \ + CK_COHORT_RW_INSTANCE(N) { \ + CK_COHORT_INSTANCE(N) *cohort; \ + unsigned int read_counter; \ + unsigned int write_barrier; \ + unsigned int wait_limit; \ + }; \ + \ + CK_CC_INLINE static void \ + ck_cohort_rw_##N##_init(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \ + unsigned int wait_limit) \ + { \ + rw_cohort->read_counter = 0; \ + rw_cohort->write_barrier = 0; \ + rw_cohort->wait_limit = wait_limit; \ + ck_pr_barrier(); \ + return; \ + } \ + \ + CK_CC_INLINE static void \ + ck_cohort_rw_##N##_write_lock(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \ + CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ + void *local_context) \ + { \ + while (ck_pr_load_uint(&rw_cohort->write_barrier) > 0) { \ + ck_pr_stall(); \ + } \ + \ + CK_COHORT_LOCK(N, cohort, global_context, local_context); \ + \ + while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \ + ck_pr_stall(); \ + } \ + \ + return; \ + } \ + \ + CK_CC_INLINE static void \ + ck_cohort_rw_##N##_write_unlock(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \ + CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ + void *local_context) \ + { \ + (void)rw_cohort; \ + CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ + } \ + \ + CK_CC_INLINE static void \ + ck_cohort_rw_##N##_read_lock(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \ + CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ + void *local_context) \ + { \ + unsigned int wait_count = 0; \ + bool raised = false; \ + start: \ + ck_pr_inc_uint(&rw_cohort->read_counter); \ + if (CK_COHORT_LOCKED(N, cohort, global_context, local_context) \ + == true) { \ + ck_pr_dec_uint(&rw_cohort->read_counter); \ + while (CK_COHORT_LOCKED(N, cohort, global_context, \ + local_context) == true) { \ + ck_pr_stall(); \ + if (++wait_count > rw_cohort->wait_limit \ + && raised == false) { \ + ck_pr_inc_uint( \ + &rw_cohort->write_barrier); \ + raised = true; \ + } \ + } \ + goto start; \ + } \ + \ + if (raised == true) { \ + ck_pr_dec_uint(&rw_cohort->write_barrier); \ + } \ + \ + return; \ + } \ + \ + CK_CC_INLINE static void \ + ck_cohort_rw_##N##_read_unlock(CK_COHORT_RW_INSTANCE(N) *cohort) \ + { \ + ck_pr_dec_uint(&cohort->read_counter); \ + } + +#define CK_COHORT_RW_INITIALIZER { \ + .cohort = NULL, \ + .read_counter = 0, \ + .write_barrier = 0, \ + .wait_limit = 0 \ +} + +#endif /* _CK_COHORT_RW_H */ diff --git a/regressions/ck_cohort_rw/ck_cohort.h b/regressions/ck_cohort_rw/ck_cohort.h new file mode 100644 index 0000000..847544c --- /dev/null +++ b/regressions/ck_cohort_rw/ck_cohort.h @@ -0,0 +1,25 @@ +#define LOCK_NAME "ck_cohort" +#define LOCK_DEFINE\ + static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;\ + static ck_spinlock_fas_t local_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;\ + static void\ + ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)\ + {\ + (void)context;\ + ck_spinlock_fas_lock(lock);\ + }\ +\ + static void\ + ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)\ + {\ + (void)context;\ + ck_spinlock_fas_unlock(lock);\ + }\ + CK_COHORT_PROTOTYPE(fas_fas,\ + ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context,\ + ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context)\ + static CK_COHORT_INSTANCE(fas_fas) CK_CC_CACHELINE cohort = CK_COHORT_INITIALIZER +#define LOCK_INIT CK_COHORT_INIT(fas_fas, &cohort, &global_fas_lock, &local_fas_lock,\ + CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT) +#define LOCK CK_COHORT_LOCK(fas_fas, &cohort, NULL, NULL) +#define UNLOCK CK_COHORT_UNLOCK(fas_fas, &cohort, NULL, NULL) diff --git a/regressions/ck_cohort_rw/validate/Makefile b/regressions/ck_cohort_rw/validate/Makefile new file mode 100644 index 0000000..2582598 --- /dev/null +++ b/regressions/ck_cohort_rw/validate/Makefile @@ -0,0 +1,17 @@ +.PHONY: check clean distribution + +OBJECTS=validate + +all: $(OBJECTS) + +validate: validate.c ../../../include/ck_cohort_rw.h + $(CC) $(CFLAGS) -o validate validate.c -g + +check: all + ./validate $(CORES) 1 + +clean: + rm -rf *.dSYM *~ *.o $(OBJECTS) + +include ../../../build/regressions.build +CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE diff --git a/regressions/ck_cohort_rw/validate/validate b/regressions/ck_cohort_rw/validate/validate new file mode 100755 index 0000000000000000000000000000000000000000..c8f46cc62658105cb8fa2762df7aa17aae1abb03 GIT binary patch literal 30672 zcmeHwdwf*Yz3z#{0NOsxEwK62mV*xOVc@+Ms4-o&-7aDH3 zX-o89-gv2Y>^ryo+wkA@{^m`R&&EI5ko7Vh%WM*T?#UkR8o<)Ga*$M47{L7M5cKvT z=-(WI{%=FjDNG;!om(>qgT=FT2zqD;`r;wzly4vYo!b>44CZ(D5cHcsA1TI(PTzV! zLU2D3q0ol*Xh$fX2*(nkkO(c`6j~kG&>BxfVvF0t@pvRI&?Pz(EwM705;*mtSxw*BYHL*pshuhks zjo>#yP&8i`YlXp3Xj5}%thFQ29BORYDB@e8CCtT5dL;Ep`gMcC5b-tOwJ0gSrT9;dZyiDWClxI*j&NN(2b?wye}yPXo;Fk7 zCFKb}HgP=_vQOLhSI#$$ZPdT5O|`>t5X@b`$rlh#sXKmH;^PUYlpXJtcpl-fDULrZ z@eIN#MaTC^+(kI0=J+0oe^>yVn(p{EiJv5#Qgpmi;%^a7sX4x0;;$1q9sTZ{UGorIu&2v-oMg&Q^<59CUA^5u?DiakX3nkIlK+L= zvQtu`?+VG!x%Dm7G3VQT-P50hSg`Tk;O+(Q!h-0lhCpyvZ*}*Go^^v9AUKOM0e909QAD9DMr!dip;2yR;YV_Jlyq*?Bp1 z%HAz|W$CVWyOtlI%=H~1k?yyG`sf7^2_ACoN3#aI7a4~Z$sOa+A{zh2p+yB|aW2($ z$1_@R#}9pTC!8MZ6I&laSU&^sIHleUs|jKC{eVpOc)v%iRK(X1YaduW-lxFX^<0;~ zru)sZSN4=WhpKjF2H^cqeZTvdQkk>sF3X!ScfzgY?I=u!2k~}^>8*-bbF4rP=fc|z z@;17byuH|ULD?(KITM!%QQfnkuY_>_r*ey|eHbmE9i( zdoEf`nb}=aXvnrZw4~6WeLM&^xxw8_3Vontl`2X>->#4P`wyU*kY7Ny`mI60Rz~opW6~M=MLkRgdq8_? z8w9!!^%a3Y{dzuzhVG+fFIIj(uqa2oKkLy^K2i3fjN|=T_W|iX5`6j(o}fPK0U$NI zt46&&B4;8Q5%2Bk@q7!!UA@V_oT%(a1Ftk_JncSxv$o^3mdxpq#fkU!y!}_AL&oT- z%7yskV9%IT`$67i%FDu%-%(Yd^?0hJhlEX@{FW-eJ)>t6^kUAgn~=H_{hlJAD12ry z&MflGBGW9~X7Mrl3?xsX=ox(izdJrOa(2~9Gu~s8>HU>iyl56bHj9I1@uXQiViw;< z(KGr%{8BLIz)EEgGDG#w*-4GkQ+4j#s)C#y{|@+2RW3Dju&2s6gc7^mb7e0gmMfr6 zMAA$KgFV9}aPiLP62njQdjrx%m2yF64{75MA%J)3R7Yo2=*N|*xgom zw5GeM@K|v7^snSlFPwY}6`=RkU>`nF(|tqX>0tM|LX1G~RMbp;v$Ff+>h4p)r$5%K zyMNjD-uuYRu6GmXmc2slQPcflP4}rK-G8d=AN6XG4k2^jPQDfFS%*EQr>!uzrl+Z} zpazhyrUwH=P0tM&Aa)$Ac!vAEV?43OJB)Crv(bV(PWLCq{eUhL<@kiZ$U@0y&)3wMVoz%(Uxc|5nt$AU<${5I7?Pk5c%BL*A!_>gcqulE9Gyc z(~7S>8Bh2cBEAjsWITod!X3U+rDergCr{t)o<2b_y?%6}PBk%u_TiL1*_cSiB5~p_@^=9FF4GI%Iw@7u4UQ#G<}*YBRh9-wy`1; zWQv^zgkr?Jj^q|)=k9hb$}YG?ugvz{D#H|L} z{`&X+D3y1*0#sPfc8lF6&d*4CT_(;KtiKTECNCNQ%DWgSM=LrPbYq{kzKQE)$=3>W z50y5)UgBpQV@75FvYY*B=%>vKC&5ixzzKyJpPst;15@)|B6#jQrKc)Up zFZg-2)6o&tE`LzvDOC=`ybH8dzx@%3Fyt(q(B<~v8KTg&VZ{*FU7lX_4c+k2a}8ZHrby!O`(aae52p^U8^b?> zA@^Nmcy|?n??Y(#trA>I;B94~dW>Pl9VlVB1Vr~@z{blBB!3z35tHfRulss{8JmeZ zd|SrWpOZ`joMhZcplcH3z760t{AZ9*j&M)KPLZ*l?n4eUPU1I{oVfcDP38;|L42oV z;i;o&-R@qnbi@5TF!xshXO&F_n6(GsQSV6YluHQjy`XsWG>RKey%F%dW3^i(YIv_D zZh=B`z25-YJ5Hem-n$Txcf3M<-d(7X_Z)>5duu#E&sAut_XngqL7^4iY7#zAp#kqn zip8hUpqHNhc+Xd8tv8p@3lv)KJw-tlYE&iHr1f%W@lMh1mS~H&m0~VdHR|+wN%%sA zCcLi`*RQq83U2m(kI)%f9Z-GJHqCn#g*8*Fkob1Z`vr=+T-ygD`lKD2_aN~rw1*}B zCC&Q>(y!DKQhtx-)u`l4or&j?VW>B{qLA{morotO$P+*e!EK~D^n-sEYscYswgecv) zQ{Mt*9>igILGt{&s+e$RgXVddRI+b}x}m)=m!xJR0PQDS z{bF#mpPHCkWM2SjpMJ^RB)A`f`U*1_!uSI?j2XsPB#&z7nv`qY4I}yr)5s`;<`FSg zVEkN4n!IyG_HV!}&{zJB9Q;oblpr<4cgev@Q_wK}LM9cKW$d@{D3w%^yz`*$zE2|! zJrm;j!yx6pkHAqe0K|aoNZ^?SkKPL{_k2yydl@A6PE(V5nW?#xz%iS^QJPZekBHyh zsOe*mk@iVV-()GAWQCJbVQe`hJcl*?Hj?RB7a^lB)U>}bLtmIOu~3@G`!>vZp3}yWRr)a_c}^P8 zHJ1+#@@5iqsg%)zETbv#_pnUFVXZz$c1d}71Z$g&*R7JOZjxu)&#{^KlKNQzxk=e^D-d@aaqv<9Nj*J zhs#K1rKYbY@gGriDt|dJpLIq$J2md z?g-=e%jl`T=iDkmac&ni&`8bf%6T|jFxzINY=8+(#FKsVyzikx+7a!3Dkf?!sJp&s z4y9Th)bf`h_pUEW@B&oQ)kSxf^Cw8~3J4$r=fM|tC&ZKYd(gGFq`vlbaL0rj8$cj1 z|4))fePdKyXz#oRUElcK(A7@RP);^-DVNIuQbwL6C9?sjEyVfS2(X8ptC`{b2JCV7 z!YL>elbERK6#aS?y@AtDGy3(Z=+~3v1!8PM^{7Q8Wb1&bp)Ef(uI2v{L7D@Xm^6_B zzD(2j)GLx1Am?&>7}g472nhn@XfFOQm>ljD+U1&TuG{x_O__+er`VLVby`snG&&V7 zx(L920L3!^2r>0n)H~M7Lmy4lpsgv}EPqVM7$K(pnL5H+>X>U0+vi$^eibsjqu3LkRy3U}c>AU<5R2^IuE2 zey4A}ME90B)k<%MfPVXluIbdHnR}Bms{Qd^7%dtOndjix{}E&|^n2KvD5GnK??Gh7 z?GPD{nu(dzd+$;Dogip9uPdkh`uB?XFo=57KAhtemx5e889(JT=&-;s5?HB0py;b0 zH^FTA*N_nA(?e=~#jChpD!LEkBBW&&jpob`5Pu#X&OmRXjot~m=cP8%n-B7x&?};o z88RTIzlqy)vsZx3D3_k2 z@OLSyHcH7As8U~{Wx-zvtf^#mIt|mTwZIaZLPFPrbtehUq1@CeW{V{xb2A)5b0{}y zd^11ODL>4sQ@Wm)_K*Y0(dVEqb3}&B99^!Yu4L(#50fbshv|j0P zq#iRzjB-giG8GJ^kfBO_rEDukb1iZ`^%#od35vtXmvP{#2U|I{b(_-LN4;g`msocm zBr0g9lk(f8ylmTTN`AZL&&1haJSOCm-RP#vQZD} z1M5gib`)mrXQ8%fEOfsm#LREAQ2QztddCuC<|8cBkzk>68a1RlWEOhPfSttwRhUt!(FS1a~$WbY%lS0&9%sk3Mi69A4d*1`05+`5g z`3+Jxcca&oULWgkX(xT!jmBdqH-(t_ClodiJ6%!)HQ{Lwp&8X%wwUgD_H1xONg1{SZG@(3+18D zn*EuX6Ip2c!z4s`z5+rePQJ|ZeWWhR>(Swhlpfu=&~ILq2TCW%Z$n}hoj@|bl=v%) zo&jlgw(`xqY3zH&g(H>k8zEETD>DgMZU~3`#hO_6RclD zdCq~sWGahZP<&~cZE)#%XHMN9YMytC+Ca|Bf&DOmpB^fSL^9c>r=Nr_)`|!Fo!6_ZM-KvFIU?|47n%0n9~XSL^|E z#*cyB2yDg?0FMHg`x*e&m*WH@S9=4}gTC@2E;#v~7i3>0{Sqfn_LmzeGHi1wf)?eu-HJeZzn{IQ z@lEfNPu`t;<(>E?PM-Ass`75`+ezj226n&Fh1HRy(yLkq|IeCHIube|?U#}?ighiG;SFN{_CmGx>+gYUNN!nv* zFcj#iB~HF{9s~y!qXYAomEP^Fzo+OsAa_Ax4xQVj@mI>We@I#o^E5jD)&EsqBJs^) zUe0y|GP7}mcv_*9DTB)+Xx3@hV4uNx6^KaHwkOC7s_j< z|3P;Pz78H;irhn-r-78y<=npv4*(USmz3_i2Ujmehe56aAf}VRBTC>|RqFriX%@PM zgrt@C6!QdG`R?m1a4QMSq07D|BLEOnQB|owQ8XLm|3i|W18{@@AAk=C7!6=DsxzBz zc(4kW`U;$n8@IsNFynpvE(i;~j4o40Sl2-F4^X~l(6!YF{YUCjX1+ne&7|6o(7(#W zBe?R=%RN$^rv1}4e<7VO*3z}u2z?(De*}(xDcxHbp+C&T1thuJw7EwSFA_Q$=0Uo* zsA(6SL&!r4Y4ia3riozq)Fx-=XX@GKWRKKVYioz)B5GHm$jn@rsU!2c0&)0qj}dzI zyv+HT)#wqP9EhOyR1iKqU(3uSrUu0DT9OqwF&d$1)%irvIA2ufmq0CZ3IIfruj$!R z%|mRjQu9>jlUlaLl}2;eJ#KHhSx<6C7Q@}SIPT~9L8l;!va0ial9%tXLcwKFIqvCm z4MzJFrzp#ZkZ@U*;*CuiGR+hyf`^1D)^W-%V#RqK>{^*QCqp5aJ($wscyNbAx{-S4 zr9vP+?p64;i?i%iD9<8iaEbdI77szNxeMqa+N6}5ky@S>7)CmUsQ~Hj&16+|_;n$- zQzmF-`PAI?nU|wG6nR8hK8fM>ZW)xPrs`jvpRzek*+h$z@aIzLB=o{m76_eAE=U(H zWXk-~37nT?qSxi2N~PuG7L>9~ghElzkQ8XooB);z5Q!qojJ9JbqQ6BTG^_}0jm`j!p@=6@`x{Xk(YP_x83O~O1;BV`YX>GK zZv+9=gce)@Mt4bU>5Rlp10Xj=ngJufat)#>!600yC5;1hTBD{MOfm>d;n)V#(t?I$ zYg=M^YsW$rmN_*TOhkeK#l^ST^k2F(S7DxO&giXfm%hJi0Y-*bG$Rkl16{@uV87Ms z-MW7Ii^g)dTmON!9i*#|80YIP%k_eKy=8&?-L8*MKu+cp#Jbp&CCZD`h%=aBKY~Q2Rhz4wJ;ZhGZ#ax_(brLNEI|(C=ynvmLr%#;6V> zDGR5yHvoRAEAbMLf(6R^uezG`Jj1_N&-l8YZRE-S5e9DLQEX^cKJ~zvi1t(WFnyc0 z2Ud1%Jqh?D*$6wg`Zq|V8H_J)J$cg|@6Epmgg=R9H2J|Ul6*ol>Lo%lFN=DkWPVb( z+<0#3a=UJ)pS|>>-?kMMl910;L;(=-XF|j8{;d@LG2xo6y=UAA!ahmpBLayT=hJV5 ze#o@zGKk!{l@j%?FutxQf21dl=$jwcw> zwI*61(vgU4PKdTBX7$UTWJkPpLr0{^N7DwPnnwb_tv|0Lc znTF_4F5@KD8ABkkSZgF^vra}sAvv0cnp@i<9nn-(kZPsZ91e$)rf6elQp6%{5mb{F zJ=4PI=C<&LxY*DZZ9o7rFQLX{46fUf?HxkqPzK)=O*XVe;G6=bWe8Y0-Hzqhp=etZ z=h2L_InvP>3AIPsqp>XruQS|KCOWW^fd<_qJR;fc;n+sh#FTG~QX^7;z*QS_L#eh6 zAzI|CU4$F2O}54&5X0;cv3Nyj@v7je)pemYb(M8XLp7@wSJn(-g%*aRKTuy#D=I=* zAESJ!Iu2+lGq9#eqZJn)6lAwc^|-VyDXSL`Z;F`xhK@NM0t?~MsZ_3Kh){d9DMAO9 zXmXG`Q~U$GQKJm(Uszz@k!%cY3Zo6;u*==IGZ7Q*Sg3+^LXs#o6ZV50iB?tDEJeGZ z?_;+Lg*r&`2nGdb54wd<=x2 za3b28Rx`L?6vZNbEB6>JWUol8zpM`Jjnu8tr&^mR_DzW}`Xu*03?(6%dAVzaLg?%W zB)(DV%GUsdsH%94gm1B$KxDQv_5=hCIIT6!Q9)Kr?IFC2oBfECuxG{-QS?c3AM2n2 zKxTJ#xjAe>0B1sqjr_|P&3fQjSSZxop1@9dZ8AblNxKVr2o*EUqTDQI*{UIxlO0Jc z1)4$qh+cGnF1Bg#I+#^geU_Jk$u`B}qmbGmY^rKtK%@=f#*N8NvnMxOqorAAYJ6E^ zSPbD63o(o)NMI}?On*FD@p~6+4tVFLyhfTR@f*X%IOQC;vEua1fMjT5ErVBBq)P|t2=RHvN{OsykiEGDo}}*mWW_Yw{1Ek8 z<%6oiFh8pVEtlc@_%@5UZlA`;1Byt34D&5DGRQDLsYPz+nb5$EBl((@h_wR+a6+wE zAE|Pf$fC8&dl}cV0#XI^i;13O!s!AL(NKP&G%3d^4$Brf| zhafRnsz|!n+_tw&A+u9*3|89Z7Aa&|N{+$RcDXeQsZYr<*l3q?R)G`B2^b-)n>bAR zY&p48wG3-ziny3F-tZL~zR7!?IUT@shQ<$W|3A*|xvD#I6rKP=`EFSdiJf|dr6DQ8p4$rMPQ#%L$ zoAS&vG|n_0o}_W6cDUE_Bn9+d?tZBQRa)7 zFYEFC?h>VGSE%WFpUxl8$l+!g>|`iiCf$F!O!`UH8kH{23EG>h#_j%TyMWUaPO$h{ zc3QgZJZD$pZoS=V$xHd-Divi~UuLb>Ev?lIvKC!2aOJo&vSEF)Cf zI;(b8nj_*$|Dy!)grb9~Xq@JFcFyxkWUxv-t9UFkNDX+ZjxDMoYru1IxF)E9bsoye zvNgZObB4~{m+fFkX7yC(OY6p8R+V8bc(-;Ur-wmgt&ZxHS6sx??&v(Cct1?Z@UuiOT=kz%z%w2fN@S-VqV&@*eNr}AmJ3juD72r z-v{^u;*`bC?G%}!?iS8bCEw`2QsGW{zPreaU+z_M_p9{H0Iv z4Ry7{cMP2II_3Fx7~ci?Bzd2*&-b6rB3C#)qL zZxQ~cEgi6DmUwI`IhepeZ$TV@guug`f-os4sIx60{PLl@U(Sc{(>oQxi{ibNVd0Oo zgqmaF_DHCu3A&64eU$ABiKTwzRIwB)$^hH-6Rv>d(xOk=3eCd$ zb?Pqx=M>&W!tvkf3P{;I9r_+g#VlA=a+z2YYtA%lX zUyDyE@WltkPcj_8G=`t+@lUQ;pWhGDsr30&jQz7d*MFx&f1f7+ioknkWPukJm?fcE z_;>2x3L1$}e6;cVb!eG1ZS2Ry%vc$wvK#+){oY{#B8y(7-z%}y51*oN`Bkv&`rC#F zr0_QCl-PcxN)tXrU1W=Lu( ze+rcgknQ^XeHH%h3cbhFzB&Cr0;XO6@W_A^KRnXZaud_Gar-f;7XXRSK0q6Phlany z!g)5i>G2-|!>&(nSf{O6b^K4#|0!hc`t){n+A5U3GhQeD5@e~XaQys!A^uLzK2?A2 zpRCVuy$W5sfBr7Uv58<{V|f!x;Y@!G47)zR->FaOce0`?S&z%N9QyUX^-{9lCkyNQ z9VV9Y&-e)dWQ*(Hsq|ZXro?))uztDTO!zARiocWzs$~6X4m!!w#^v8_uwd#dA z&is`JOouOX$8-2+JG3B$VI?99W0{EsKcW8BL+ICDZR*#4Lj7r&aEM67ePL7oGDmO@ zRL1TY;-q3lo2lQaI(i|{h4|-w&&P!kpbh4Kzq(mk>|i?mn_dS0)e&8VE0D&-q-5*+ zS9T_Sfrrmu2GZTud*KGsJy=~8u#+ULV?^Tf2s=~9nG&BL45S-^kNX4ZBLpAM2huaF z_reXNXIjVKf%GgKPXl(6gmvK+e7v+Xb&)OjI5?1=gX3DjPLi;WQHqaScBU>y3O*hU zq~}@3nSu0t95>e6NfOrac04{V*qJ)svd827K>8T#`$hxlW3Bh^4Wt)fHphB9Ny57A z`%`wNF2>tY3)Tglji+yV+F7_Mql*dY!l39v9sC?zU8sXf8r|ohn5Zt!PovZR<3hU| z8G!*j8VB6s&;0|4K+eU#CequBjtd%AQ99d;hFxut_F4)$`Sm&c(n>xJdETt@&-?c^ zioQ+JdED5n?Cf#a87lsr(oUZ6Ir`P@l8*kF7XXyn_5vI%{%?X#@#i}F|I?rkmbVv_ ze6Nc00pPUJ^#s}B>2dNM_Mn;PA>+iIj{5!`5qk8$H)fC6QdmYG>@a{`(x=4@{6VY^!<@uc9@6!2$g3( zw$MxTcvVr{#IpwnvveYpB??$_S}Js17F#j(GCSK3J*_vou7gZVuQ`bby4ppOvQ zw>O87ccUV?gTylu^ug>;0o`XetYEsH7)-tz^pUOtvEEUy&l4T*&*O2M+c6^TpdYFS zPjnqdTXG2d-Jnz64m<4Ms_Z!L8+}d5JM(<6lIPNR!rWx~*s z0od}zHda$Kq~`p~8&aWgax*O_>}-o9B2E6;v#^+%7@^jt&4kSq<~^;ZWP3X;dy#&Y zu3RGBRIRSOWNC;oa>H=Si@f_Kz3|c}#ES{(<^mzL-XLsW6=Z(&MWogysR{LRv5kt| zTB8xdA_T1Qz#5|P7IPKFlFL_CUQ)dnvD!H16#0-jVLr5~s%p*Bx=>x^qMD_YF8Q^@ z#&{B|jc`M6;71z+0sFTT2Yzrd#iMU2(%n1rn~Ne8k+O&M zM{7q)mOkAW3fY{fuRS^?27IKFZj?sboFYx}Xb2y_X~M!MS}-72^Mq*Gg*_LPJ@wT| zhrV2=p)wl6-AYwyxd6fZjO9QDPAGl$av*20Z)nojFb7JRAG;h#lZ~qui45fY7kC2- AJOBUy literal 0 HcmV?d00001 diff --git a/regressions/ck_cohort_rw/validate/validate.c b/regressions/ck_cohort_rw/validate/validate.c new file mode 100644 index 0000000..4854f65 --- /dev/null +++ b/regressions/ck_cohort_rw/validate/validate.c @@ -0,0 +1,208 @@ +/* + * Copyright 2013 Samy Al Bahra. + * Copything 2013 Brendon Scheinman. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "../../common.h" + +#ifndef ITERATE +#define ITERATE 1000000 +#endif + +static struct affinity a; +static unsigned int locked; +static int nthr; +static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER; + +static void +ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context) +{ + (void)context; + ck_spinlock_fas_lock(lock); +} + +static void +ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context) +{ + (void)context; + ck_spinlock_fas_unlock(lock); +} + +static bool +ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) +{ + (void)context; + return ck_spinlock_fas_locked(lock); +} + +CK_COHORT_PROTOTYPE(fas_fas, + ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context, + ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context) +CK_COHORT_RW_PROTOTYPE(fas_fas) + +static CK_COHORT_INSTANCE(fas_fas) *cohorts; +static CK_COHORT_RW_INSTANCE(fas_fas) rw_cohort = CK_COHORT_RW_INITIALIZER; +static int n_cohorts; + +static void * +thread(void *null CK_CC_UNUSED) +{ + int i = ITERATE; + unsigned int l; + unsigned int core; + CK_COHORT_INSTANCE(fas_fas) *cohort; + + if (aff_iterate_core(&a, &core)) { + perror("ERROR: Could not affine thread"); + exit(EXIT_FAILURE); + } + + cohort = cohorts + (core / (int)(a.delta)) % n_cohorts; + + while (i--) { + CK_COHORT_RW_WRITE_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + { + l = ck_pr_load_uint(&locked); + if (l != 0) { + ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l); + } + + ck_pr_inc_uint(&locked); + ck_pr_inc_uint(&locked); + ck_pr_inc_uint(&locked); + ck_pr_inc_uint(&locked); + ck_pr_inc_uint(&locked); + ck_pr_inc_uint(&locked); + ck_pr_inc_uint(&locked); + ck_pr_inc_uint(&locked); + + l = ck_pr_load_uint(&locked); + if (l != 8) { + ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l); + } + + ck_pr_dec_uint(&locked); + ck_pr_dec_uint(&locked); + ck_pr_dec_uint(&locked); + ck_pr_dec_uint(&locked); + ck_pr_dec_uint(&locked); + ck_pr_dec_uint(&locked); + ck_pr_dec_uint(&locked); + ck_pr_dec_uint(&locked); + + l = ck_pr_load_uint(&locked); + if (l != 0) { + ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l); + } + } + CK_COHORT_RW_WRITE_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + { + l = ck_pr_load_uint(&locked); + if (l != 0) { + ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l); + } + } + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + } + + return (NULL); +} + +int +main(int argc, char *argv[]) +{ + pthread_t *threads; + int threads_per_cohort; + ck_spinlock_fas_t *local_lock; + int i; + + if (argc != 4) { + ck_error("Usage: validate \n"); + } + + n_cohorts = atoi(argv[1]); + if (n_cohorts <= 0) { + ck_error("ERROR: Number of cohorts must be greater than 0\n"); + } + + threads_per_cohort = atoi(argv[2]); + if (threads_per_cohort <= 0) { + ck_error("ERROR: Threads per cohort must be greater than 0\n"); + } + + nthr = n_cohorts * threads_per_cohort; + + threads = malloc(sizeof(pthread_t) * nthr); + if (threads == NULL) { + ck_error("ERROR: Could not allocate thread structures\n"); + } + + a.delta = atoi(argv[3]); + + fprintf(stderr, "Creating cohorts..."); + cohorts = malloc(sizeof(CK_COHORT_INSTANCE(fas_fas)) * n_cohorts); + if (cohorts == NULL) { + ck_error("ERROR: Could not allocate base cohort structures\n"); + } + for (i = 0 ; i < n_cohorts ; i++) { + local_lock = malloc(sizeof(ck_spinlock_fas_t)); + CK_COHORT_INIT(fas_fas, cohorts + i, &global_fas_lock, local_lock, + CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT); + } + fprintf(stderr, "done\n"); + + fprintf(stderr, "Creating threads..."); + for (i = 0; i < nthr; i++) { + if (pthread_create(&threads[i], NULL, thread, NULL)) { + ck_error("ERROR: Could not create thread %d\n", i); + } + } + fprintf(stderr, "done\n"); + + fprintf(stderr, "Waiting for threads to finish correctness regression..."); + for (i = 0; i < nthr; i++) + pthread_join(threads[i], NULL); + fprintf(stderr, "done (passed)\n"); + + return (0); +} + From 114c916feab68ca95e9f22925f4114ba76078c9f Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Sat, 9 Mar 2013 19:28:02 -0600 Subject: [PATCH 02/20] ck_cohort_rw: Cleaned up regression files from last commit --- .gitignore | 1 + regressions/ck_cohort_rw/ck_cohort.h | 25 --------------------- regressions/ck_cohort_rw/validate/validate | Bin 30672 -> 0 bytes 3 files changed, 1 insertion(+), 25 deletions(-) delete mode 100644 regressions/ck_cohort_rw/ck_cohort.h delete mode 100755 regressions/ck_cohort_rw/validate/validate diff --git a/.gitignore b/.gitignore index 8ce486d..0193686 100644 --- a/.gitignore +++ b/.gitignore @@ -142,3 +142,4 @@ regressions/ck_queue/validate/ck_slist regressions/ck_cohort/validate/validate regressions/ck_cohort/benchmark/ck_cohort.LATENCY regressions/ck_cohort/benchmark/ck_cohort.THROUGHPUT +regressions/ck_cohort_rw/validate/validate diff --git a/regressions/ck_cohort_rw/ck_cohort.h b/regressions/ck_cohort_rw/ck_cohort.h deleted file mode 100644 index 847544c..0000000 --- a/regressions/ck_cohort_rw/ck_cohort.h +++ /dev/null @@ -1,25 +0,0 @@ -#define LOCK_NAME "ck_cohort" -#define LOCK_DEFINE\ - static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;\ - static ck_spinlock_fas_t local_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;\ - static void\ - ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)\ - {\ - (void)context;\ - ck_spinlock_fas_lock(lock);\ - }\ -\ - static void\ - ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)\ - {\ - (void)context;\ - ck_spinlock_fas_unlock(lock);\ - }\ - CK_COHORT_PROTOTYPE(fas_fas,\ - ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context,\ - ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context)\ - static CK_COHORT_INSTANCE(fas_fas) CK_CC_CACHELINE cohort = CK_COHORT_INITIALIZER -#define LOCK_INIT CK_COHORT_INIT(fas_fas, &cohort, &global_fas_lock, &local_fas_lock,\ - CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT) -#define LOCK CK_COHORT_LOCK(fas_fas, &cohort, NULL, NULL) -#define UNLOCK CK_COHORT_UNLOCK(fas_fas, &cohort, NULL, NULL) diff --git a/regressions/ck_cohort_rw/validate/validate b/regressions/ck_cohort_rw/validate/validate deleted file mode 100755 index c8f46cc62658105cb8fa2762df7aa17aae1abb03..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30672 zcmeHwdwf*Yz3z#{0NOsxEwK62mV*xOVc@+Ms4-o&-7aDH3 zX-o89-gv2Y>^ryo+wkA@{^m`R&&EI5ko7Vh%WM*T?#UkR8o<)Ga*$M47{L7M5cKvT z=-(WI{%=FjDNG;!om(>qgT=FT2zqD;`r;wzly4vYo!b>44CZ(D5cHcsA1TI(PTzV! zLU2D3q0ol*Xh$fX2*(nkkO(c`6j~kG&>BxfVvF0t@pvRI&?Pz(EwM705;*mtSxw*BYHL*pshuhks zjo>#yP&8i`YlXp3Xj5}%thFQ29BORYDB@e8CCtT5dL;Ep`gMcC5b-tOwJ0gSrT9;dZyiDWClxI*j&NN(2b?wye}yPXo;Fk7 zCFKb}HgP=_vQOLhSI#$$ZPdT5O|`>t5X@b`$rlh#sXKmH;^PUYlpXJtcpl-fDULrZ z@eIN#MaTC^+(kI0=J+0oe^>yVn(p{EiJv5#Qgpmi;%^a7sX4x0;;$1q9sTZ{UGorIu&2v-oMg&Q^<59CUA^5u?DiakX3nkIlK+L= zvQtu`?+VG!x%Dm7G3VQT-P50hSg`Tk;O+(Q!h-0lhCpyvZ*}*Go^^v9AUKOM0e909QAD9DMr!dip;2yR;YV_Jlyq*?Bp1 z%HAz|W$CVWyOtlI%=H~1k?yyG`sf7^2_ACoN3#aI7a4~Z$sOa+A{zh2p+yB|aW2($ z$1_@R#}9pTC!8MZ6I&laSU&^sIHleUs|jKC{eVpOc)v%iRK(X1YaduW-lxFX^<0;~ zru)sZSN4=WhpKjF2H^cqeZTvdQkk>sF3X!ScfzgY?I=u!2k~}^>8*-bbF4rP=fc|z z@;17byuH|ULD?(KITM!%QQfnkuY_>_r*ey|eHbmE9i( zdoEf`nb}=aXvnrZw4~6WeLM&^xxw8_3Vontl`2X>->#4P`wyU*kY7Ny`mI60Rz~opW6~M=MLkRgdq8_? z8w9!!^%a3Y{dzuzhVG+fFIIj(uqa2oKkLy^K2i3fjN|=T_W|iX5`6j(o}fPK0U$NI zt46&&B4;8Q5%2Bk@q7!!UA@V_oT%(a1Ftk_JncSxv$o^3mdxpq#fkU!y!}_AL&oT- z%7yskV9%IT`$67i%FDu%-%(Yd^?0hJhlEX@{FW-eJ)>t6^kUAgn~=H_{hlJAD12ry z&MflGBGW9~X7Mrl3?xsX=ox(izdJrOa(2~9Gu~s8>HU>iyl56bHj9I1@uXQiViw;< z(KGr%{8BLIz)EEgGDG#w*-4GkQ+4j#s)C#y{|@+2RW3Dju&2s6gc7^mb7e0gmMfr6 zMAA$KgFV9}aPiLP62njQdjrx%m2yF64{75MA%J)3R7Yo2=*N|*xgom zw5GeM@K|v7^snSlFPwY}6`=RkU>`nF(|tqX>0tM|LX1G~RMbp;v$Ff+>h4p)r$5%K zyMNjD-uuYRu6GmXmc2slQPcflP4}rK-G8d=AN6XG4k2^jPQDfFS%*EQr>!uzrl+Z} zpazhyrUwH=P0tM&Aa)$Ac!vAEV?43OJB)Crv(bV(PWLCq{eUhL<@kiZ$U@0y&)3wMVoz%(Uxc|5nt$AU<${5I7?Pk5c%BL*A!_>gcqulE9Gyc z(~7S>8Bh2cBEAjsWITod!X3U+rDergCr{t)o<2b_y?%6}PBk%u_TiL1*_cSiB5~p_@^=9FF4GI%Iw@7u4UQ#G<}*YBRh9-wy`1; zWQv^zgkr?Jj^q|)=k9hb$}YG?ugvz{D#H|L} z{`&X+D3y1*0#sPfc8lF6&d*4CT_(;KtiKTECNCNQ%DWgSM=LrPbYq{kzKQE)$=3>W z50y5)UgBpQV@75FvYY*B=%>vKC&5ixzzKyJpPst;15@)|B6#jQrKc)Up zFZg-2)6o&tE`LzvDOC=`ybH8dzx@%3Fyt(q(B<~v8KTg&VZ{*FU7lX_4c+k2a}8ZHrby!O`(aae52p^U8^b?> zA@^Nmcy|?n??Y(#trA>I;B94~dW>Pl9VlVB1Vr~@z{blBB!3z35tHfRulss{8JmeZ zd|SrWpOZ`joMhZcplcH3z760t{AZ9*j&M)KPLZ*l?n4eUPU1I{oVfcDP38;|L42oV z;i;o&-R@qnbi@5TF!xshXO&F_n6(GsQSV6YluHQjy`XsWG>RKey%F%dW3^i(YIv_D zZh=B`z25-YJ5Hem-n$Txcf3M<-d(7X_Z)>5duu#E&sAut_XngqL7^4iY7#zAp#kqn zip8hUpqHNhc+Xd8tv8p@3lv)KJw-tlYE&iHr1f%W@lMh1mS~H&m0~VdHR|+wN%%sA zCcLi`*RQq83U2m(kI)%f9Z-GJHqCn#g*8*Fkob1Z`vr=+T-ygD`lKD2_aN~rw1*}B zCC&Q>(y!DKQhtx-)u`l4or&j?VW>B{qLA{morotO$P+*e!EK~D^n-sEYscYswgecv) zQ{Mt*9>igILGt{&s+e$RgXVddRI+b}x}m)=m!xJR0PQDS z{bF#mpPHCkWM2SjpMJ^RB)A`f`U*1_!uSI?j2XsPB#&z7nv`qY4I}yr)5s`;<`FSg zVEkN4n!IyG_HV!}&{zJB9Q;oblpr<4cgev@Q_wK}LM9cKW$d@{D3w%^yz`*$zE2|! zJrm;j!yx6pkHAqe0K|aoNZ^?SkKPL{_k2yydl@A6PE(V5nW?#xz%iS^QJPZekBHyh zsOe*mk@iVV-()GAWQCJbVQe`hJcl*?Hj?RB7a^lB)U>}bLtmIOu~3@G`!>vZp3}yWRr)a_c}^P8 zHJ1+#@@5iqsg%)zETbv#_pnUFVXZz$c1d}71Z$g&*R7JOZjxu)&#{^KlKNQzxk=e^D-d@aaqv<9Nj*J zhs#K1rKYbY@gGriDt|dJpLIq$J2md z?g-=e%jl`T=iDkmac&ni&`8bf%6T|jFxzINY=8+(#FKsVyzikx+7a!3Dkf?!sJp&s z4y9Th)bf`h_pUEW@B&oQ)kSxf^Cw8~3J4$r=fM|tC&ZKYd(gGFq`vlbaL0rj8$cj1 z|4))fePdKyXz#oRUElcK(A7@RP);^-DVNIuQbwL6C9?sjEyVfS2(X8ptC`{b2JCV7 z!YL>elbERK6#aS?y@AtDGy3(Z=+~3v1!8PM^{7Q8Wb1&bp)Ef(uI2v{L7D@Xm^6_B zzD(2j)GLx1Am?&>7}g472nhn@XfFOQm>ljD+U1&TuG{x_O__+er`VLVby`snG&&V7 zx(L920L3!^2r>0n)H~M7Lmy4lpsgv}EPqVM7$K(pnL5H+>X>U0+vi$^eibsjqu3LkRy3U}c>AU<5R2^IuE2 zey4A}ME90B)k<%MfPVXluIbdHnR}Bms{Qd^7%dtOndjix{}E&|^n2KvD5GnK??Gh7 z?GPD{nu(dzd+$;Dogip9uPdkh`uB?XFo=57KAhtemx5e889(JT=&-;s5?HB0py;b0 zH^FTA*N_nA(?e=~#jChpD!LEkBBW&&jpob`5Pu#X&OmRXjot~m=cP8%n-B7x&?};o z88RTIzlqy)vsZx3D3_k2 z@OLSyHcH7As8U~{Wx-zvtf^#mIt|mTwZIaZLPFPrbtehUq1@CeW{V{xb2A)5b0{}y zd^11ODL>4sQ@Wm)_K*Y0(dVEqb3}&B99^!Yu4L(#50fbshv|j0P zq#iRzjB-giG8GJ^kfBO_rEDukb1iZ`^%#od35vtXmvP{#2U|I{b(_-LN4;g`msocm zBr0g9lk(f8ylmTTN`AZL&&1haJSOCm-RP#vQZD} z1M5gib`)mrXQ8%fEOfsm#LREAQ2QztddCuC<|8cBkzk>68a1RlWEOhPfSttwRhUt!(FS1a~$WbY%lS0&9%sk3Mi69A4d*1`05+`5g z`3+Jxcca&oULWgkX(xT!jmBdqH-(t_ClodiJ6%!)HQ{Lwp&8X%wwUgD_H1xONg1{SZG@(3+18D zn*EuX6Ip2c!z4s`z5+rePQJ|ZeWWhR>(Swhlpfu=&~ILq2TCW%Z$n}hoj@|bl=v%) zo&jlgw(`xqY3zH&g(H>k8zEETD>DgMZU~3`#hO_6RclD zdCq~sWGahZP<&~cZE)#%XHMN9YMytC+Ca|Bf&DOmpB^fSL^9c>r=Nr_)`|!Fo!6_ZM-KvFIU?|47n%0n9~XSL^|E z#*cyB2yDg?0FMHg`x*e&m*WH@S9=4}gTC@2E;#v~7i3>0{Sqfn_LmzeGHi1wf)?eu-HJeZzn{IQ z@lEfNPu`t;<(>E?PM-Ass`75`+ezj226n&Fh1HRy(yLkq|IeCHIube|?U#}?ighiG;SFN{_CmGx>+gYUNN!nv* zFcj#iB~HF{9s~y!qXYAomEP^Fzo+OsAa_Ax4xQVj@mI>We@I#o^E5jD)&EsqBJs^) zUe0y|GP7}mcv_*9DTB)+Xx3@hV4uNx6^KaHwkOC7s_j< z|3P;Pz78H;irhn-r-78y<=npv4*(USmz3_i2Ujmehe56aAf}VRBTC>|RqFriX%@PM zgrt@C6!QdG`R?m1a4QMSq07D|BLEOnQB|owQ8XLm|3i|W18{@@AAk=C7!6=DsxzBz zc(4kW`U;$n8@IsNFynpvE(i;~j4o40Sl2-F4^X~l(6!YF{YUCjX1+ne&7|6o(7(#W zBe?R=%RN$^rv1}4e<7VO*3z}u2z?(De*}(xDcxHbp+C&T1thuJw7EwSFA_Q$=0Uo* zsA(6SL&!r4Y4ia3riozq)Fx-=XX@GKWRKKVYioz)B5GHm$jn@rsU!2c0&)0qj}dzI zyv+HT)#wqP9EhOyR1iKqU(3uSrUu0DT9OqwF&d$1)%irvIA2ufmq0CZ3IIfruj$!R z%|mRjQu9>jlUlaLl}2;eJ#KHhSx<6C7Q@}SIPT~9L8l;!va0ial9%tXLcwKFIqvCm z4MzJFrzp#ZkZ@U*;*CuiGR+hyf`^1D)^W-%V#RqK>{^*QCqp5aJ($wscyNbAx{-S4 zr9vP+?p64;i?i%iD9<8iaEbdI77szNxeMqa+N6}5ky@S>7)CmUsQ~Hj&16+|_;n$- zQzmF-`PAI?nU|wG6nR8hK8fM>ZW)xPrs`jvpRzek*+h$z@aIzLB=o{m76_eAE=U(H zWXk-~37nT?qSxi2N~PuG7L>9~ghElzkQ8XooB);z5Q!qojJ9JbqQ6BTG^_}0jm`j!p@=6@`x{Xk(YP_x83O~O1;BV`YX>GK zZv+9=gce)@Mt4bU>5Rlp10Xj=ngJufat)#>!600yC5;1hTBD{MOfm>d;n)V#(t?I$ zYg=M^YsW$rmN_*TOhkeK#l^ST^k2F(S7DxO&giXfm%hJi0Y-*bG$Rkl16{@uV87Ms z-MW7Ii^g)dTmON!9i*#|80YIP%k_eKy=8&?-L8*MKu+cp#Jbp&CCZD`h%=aBKY~Q2Rhz4wJ;ZhGZ#ax_(brLNEI|(C=ynvmLr%#;6V> zDGR5yHvoRAEAbMLf(6R^uezG`Jj1_N&-l8YZRE-S5e9DLQEX^cKJ~zvi1t(WFnyc0 z2Ud1%Jqh?D*$6wg`Zq|V8H_J)J$cg|@6Epmgg=R9H2J|Ul6*ol>Lo%lFN=DkWPVb( z+<0#3a=UJ)pS|>>-?kMMl910;L;(=-XF|j8{;d@LG2xo6y=UAA!ahmpBLayT=hJV5 ze#o@zGKk!{l@j%?FutxQf21dl=$jwcw> zwI*61(vgU4PKdTBX7$UTWJkPpLr0{^N7DwPnnwb_tv|0Lc znTF_4F5@KD8ABkkSZgF^vra}sAvv0cnp@i<9nn-(kZPsZ91e$)rf6elQp6%{5mb{F zJ=4PI=C<&LxY*DZZ9o7rFQLX{46fUf?HxkqPzK)=O*XVe;G6=bWe8Y0-Hzqhp=etZ z=h2L_InvP>3AIPsqp>XruQS|KCOWW^fd<_qJR;fc;n+sh#FTG~QX^7;z*QS_L#eh6 zAzI|CU4$F2O}54&5X0;cv3Nyj@v7je)pemYb(M8XLp7@wSJn(-g%*aRKTuy#D=I=* zAESJ!Iu2+lGq9#eqZJn)6lAwc^|-VyDXSL`Z;F`xhK@NM0t?~MsZ_3Kh){d9DMAO9 zXmXG`Q~U$GQKJm(Uszz@k!%cY3Zo6;u*==IGZ7Q*Sg3+^LXs#o6ZV50iB?tDEJeGZ z?_;+Lg*r&`2nGdb54wd<=x2 za3b28Rx`L?6vZNbEB6>JWUol8zpM`Jjnu8tr&^mR_DzW}`Xu*03?(6%dAVzaLg?%W zB)(DV%GUsdsH%94gm1B$KxDQv_5=hCIIT6!Q9)Kr?IFC2oBfECuxG{-QS?c3AM2n2 zKxTJ#xjAe>0B1sqjr_|P&3fQjSSZxop1@9dZ8AblNxKVr2o*EUqTDQI*{UIxlO0Jc z1)4$qh+cGnF1Bg#I+#^geU_Jk$u`B}qmbGmY^rKtK%@=f#*N8NvnMxOqorAAYJ6E^ zSPbD63o(o)NMI}?On*FD@p~6+4tVFLyhfTR@f*X%IOQC;vEua1fMjT5ErVBBq)P|t2=RHvN{OsykiEGDo}}*mWW_Yw{1Ek8 z<%6oiFh8pVEtlc@_%@5UZlA`;1Byt34D&5DGRQDLsYPz+nb5$EBl((@h_wR+a6+wE zAE|Pf$fC8&dl}cV0#XI^i;13O!s!AL(NKP&G%3d^4$Brf| zhafRnsz|!n+_tw&A+u9*3|89Z7Aa&|N{+$RcDXeQsZYr<*l3q?R)G`B2^b-)n>bAR zY&p48wG3-ziny3F-tZL~zR7!?IUT@shQ<$W|3A*|xvD#I6rKP=`EFSdiJf|dr6DQ8p4$rMPQ#%L$ zoAS&vG|n_0o}_W6cDUE_Bn9+d?tZBQRa)7 zFYEFC?h>VGSE%WFpUxl8$l+!g>|`iiCf$F!O!`UH8kH{23EG>h#_j%TyMWUaPO$h{ zc3QgZJZD$pZoS=V$xHd-Divi~UuLb>Ev?lIvKC!2aOJo&vSEF)Cf zI;(b8nj_*$|Dy!)grb9~Xq@JFcFyxkWUxv-t9UFkNDX+ZjxDMoYru1IxF)E9bsoye zvNgZObB4~{m+fFkX7yC(OY6p8R+V8bc(-;Ur-wmgt&ZxHS6sx??&v(Cct1?Z@UuiOT=kz%z%w2fN@S-VqV&@*eNr}AmJ3juD72r z-v{^u;*`bC?G%}!?iS8bCEw`2QsGW{zPreaU+z_M_p9{H0Iv z4Ry7{cMP2II_3Fx7~ci?Bzd2*&-b6rB3C#)qL zZxQ~cEgi6DmUwI`IhepeZ$TV@guug`f-os4sIx60{PLl@U(Sc{(>oQxi{ibNVd0Oo zgqmaF_DHCu3A&64eU$ABiKTwzRIwB)$^hH-6Rv>d(xOk=3eCd$ zb?Pqx=M>&W!tvkf3P{;I9r_+g#VlA=a+z2YYtA%lX zUyDyE@WltkPcj_8G=`t+@lUQ;pWhGDsr30&jQz7d*MFx&f1f7+ioknkWPukJm?fcE z_;>2x3L1$}e6;cVb!eG1ZS2Ry%vc$wvK#+){oY{#B8y(7-z%}y51*oN`Bkv&`rC#F zr0_QCl-PcxN)tXrU1W=Lu( ze+rcgknQ^XeHH%h3cbhFzB&Cr0;XO6@W_A^KRnXZaud_Gar-f;7XXRSK0q6Phlany z!g)5i>G2-|!>&(nSf{O6b^K4#|0!hc`t){n+A5U3GhQeD5@e~XaQys!A^uLzK2?A2 zpRCVuy$W5sfBr7Uv58<{V|f!x;Y@!G47)zR->FaOce0`?S&z%N9QyUX^-{9lCkyNQ z9VV9Y&-e)dWQ*(Hsq|ZXro?))uztDTO!zARiocWzs$~6X4m!!w#^v8_uwd#dA z&is`JOouOX$8-2+JG3B$VI?99W0{EsKcW8BL+ICDZR*#4Lj7r&aEM67ePL7oGDmO@ zRL1TY;-q3lo2lQaI(i|{h4|-w&&P!kpbh4Kzq(mk>|i?mn_dS0)e&8VE0D&-q-5*+ zS9T_Sfrrmu2GZTud*KGsJy=~8u#+ULV?^Tf2s=~9nG&BL45S-^kNX4ZBLpAM2huaF z_reXNXIjVKf%GgKPXl(6gmvK+e7v+Xb&)OjI5?1=gX3DjPLi;WQHqaScBU>y3O*hU zq~}@3nSu0t95>e6NfOrac04{V*qJ)svd827K>8T#`$hxlW3Bh^4Wt)fHphB9Ny57A z`%`wNF2>tY3)Tglji+yV+F7_Mql*dY!l39v9sC?zU8sXf8r|ohn5Zt!PovZR<3hU| z8G!*j8VB6s&;0|4K+eU#CequBjtd%AQ99d;hFxut_F4)$`Sm&c(n>xJdETt@&-?c^ zioQ+JdED5n?Cf#a87lsr(oUZ6Ir`P@l8*kF7XXyn_5vI%{%?X#@#i}F|I?rkmbVv_ ze6Nc00pPUJ^#s}B>2dNM_Mn;PA>+iIj{5!`5qk8$H)fC6QdmYG>@a{`(x=4@{6VY^!<@uc9@6!2$g3( zw$MxTcvVr{#IpwnvveYpB??$_S}Js17F#j(GCSK3J*_vou7gZVuQ`bby4ppOvQ zw>O87ccUV?gTylu^ug>;0o`XetYEsH7)-tz^pUOtvEEUy&l4T*&*O2M+c6^TpdYFS zPjnqdTXG2d-Jnz64m<4Ms_Z!L8+}d5JM(<6lIPNR!rWx~*s z0od}zHda$Kq~`p~8&aWgax*O_>}-o9B2E6;v#^+%7@^jt&4kSq<~^;ZWP3X;dy#&Y zu3RGBRIRSOWNC;oa>H=Si@f_Kz3|c}#ES{(<^mzL-XLsW6=Z(&MWogysR{LRv5kt| zTB8xdA_T1Qz#5|P7IPKFlFL_CUQ)dnvD!H16#0-jVLr5~s%p*Bx=>x^qMD_YF8Q^@ z#&{B|jc`M6;71z+0sFTT2Yzrd#iMU2(%n1rn~Ne8k+O&M zM{7q)mOkAW3fY{fuRS^?27IKFZj?sboFYx}Xb2y_X~M!MS}-72^Mq*Gg*_LPJ@wT| zhrV2=p)wl6-AYwyxd6fZjO9QDPAGl$av*20Z)nojFb7JRAG;h#lZ~qui45fY7kC2- AJOBUy From 77ff96b15aaebd03f97d5166dd9c550e486c7d08 Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Sat, 9 Mar 2013 21:55:07 -0600 Subject: [PATCH 03/20] ck_cohort_rw: Added latency and throughput benchmark regression tests --- .gitignore | 2 + regressions/ck_cohort_rw/benchmark/Makefile | 17 ++ regressions/ck_cohort_rw/benchmark/latency.c | 106 ++++++++ .../ck_cohort_rw/benchmark/throughput.c | 245 ++++++++++++++++++ 4 files changed, 370 insertions(+) create mode 100644 regressions/ck_cohort_rw/benchmark/Makefile create mode 100644 regressions/ck_cohort_rw/benchmark/latency.c create mode 100644 regressions/ck_cohort_rw/benchmark/throughput.c diff --git a/.gitignore b/.gitignore index 0193686..7d8c142 100644 --- a/.gitignore +++ b/.gitignore @@ -143,3 +143,5 @@ regressions/ck_cohort/validate/validate regressions/ck_cohort/benchmark/ck_cohort.LATENCY regressions/ck_cohort/benchmark/ck_cohort.THROUGHPUT regressions/ck_cohort_rw/validate/validate +regressions/ck_cohort_rw/benchmark/latency +regressions/ck_cohort_rw/benchmark/throughput diff --git a/regressions/ck_cohort_rw/benchmark/Makefile b/regressions/ck_cohort_rw/benchmark/Makefile new file mode 100644 index 0000000..8531fb9 --- /dev/null +++ b/regressions/ck_cohort_rw/benchmark/Makefile @@ -0,0 +1,17 @@ +.PHONY: clean distribution + +OBJECTS=latency throughput + +all: $(OBJECTS) + +latency: latency.c ../../../include/ck_cohort_rw.h + $(CC) $(CFLAGS) -o latency latency.c + +throughput: throughput.c ../../../include/ck_cohort_rw.h + $(CC) $(CFLAGS) -o throughput throughput.c + +clean: + rm -rf *.dSYM *~ *.o $(OBJECTS) + +include ../../../build/regressions.build +CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE diff --git a/regressions/ck_cohort_rw/benchmark/latency.c b/regressions/ck_cohort_rw/benchmark/latency.c new file mode 100644 index 0000000..67e2803 --- /dev/null +++ b/regressions/ck_cohort_rw/benchmark/latency.c @@ -0,0 +1,106 @@ +/* + * Copyright 2013 Samy Al Bahra. + * Copyright 2013 Brendon Scheinman. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include + +#include "../../common.h" + +#ifndef STEPS +#define STEPS 1000000 +#endif + +static void +ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context) +{ + (void)context; + ck_spinlock_fas_lock(lock); +} + +static void +ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context) +{ + (void)context; + ck_spinlock_fas_unlock(lock); +} + +static bool +ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) +{ + (void)context; + return ck_spinlock_fas_locked(lock); +} + +CK_COHORT_PROTOTYPE(fas_fas, + ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context, + ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context) +CK_COHORT_RW_PROTOTYPE(fas_fas) + +int +main(void) +{ + uint64_t s_b, e_b, i; + ck_spinlock_fas_t global_lock = CK_SPINLOCK_FAS_INITIALIZER; + ck_spinlock_fas_t local_lock = CK_SPINLOCK_FAS_INITIALIZER; + CK_COHORT_INSTANCE(fas_fas) cohort = CK_COHORT_INITIALIZER; + CK_COHORT_RW_INSTANCE(fas_fas) rw_cohort = CK_COHORT_RW_INITIALIZER; + + CK_COHORT_INIT(fas_fas, &cohort, &global_lock, &local_lock, + CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT); + CK_COHORT_RW_INIT(fas_fas, &rw_cohort, CK_COHORT_RW_DEFAULT_WAIT_LIMIT); + + for (i = 0; i < STEPS; i++) { + CK_COHORT_RW_WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + CK_COHORT_RW_WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + } + + s_b = rdtsc(); + for (i = 0; i < STEPS; i++) { + CK_COHORT_RW_WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + CK_COHORT_RW_WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + } + e_b = rdtsc(); + printf("WRITE: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS); + + for (i = 0; i < STEPS; i++) { + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + } + + s_b = rdtsc(); + for (i = 0; i < STEPS; i++) { + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + } + e_b = rdtsc(); + printf("READ: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS); + + return (0); +} + diff --git a/regressions/ck_cohort_rw/benchmark/throughput.c b/regressions/ck_cohort_rw/benchmark/throughput.c new file mode 100644 index 0000000..591351e --- /dev/null +++ b/regressions/ck_cohort_rw/benchmark/throughput.c @@ -0,0 +1,245 @@ +/* + * Copyright 2013 Samy Al Bahra. + * Copyright 2013 Brendon Scheinman. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../../common.h" + +#define max(x, y) (((x) > (y)) ? (x) : (y)) + +#ifndef STEPS +#define STEPS 1000000 +#endif + +static unsigned int barrier; +static unsigned int flag CK_CC_CACHELINE; +static struct affinity affinity; +static unsigned int nthr; + +static void +ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context) +{ + + (void)context; + ck_spinlock_fas_lock(lock); + return; +} + +static void +ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context) +{ + + (void)context; + ck_spinlock_fas_unlock(lock); + return; +} + +static bool +ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) +{ + + (void)context; + return ck_spinlock_fas_locked(lock); +} + +CK_COHORT_PROTOTYPE(fas_fas, + ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context, + ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context) +CK_COHORT_RW_PROTOTYPE(fas_fas) + +struct cohort_record { + CK_COHORT_INSTANCE(fas_fas) cohort; +} CK_CC_CACHELINE; +static struct cohort_record *cohorts; + +static ck_spinlock_t global_lock = CK_SPINLOCK_INITIALIZER; +static CK_COHORT_RW_INSTANCE(fas_fas) rw_cohort = CK_COHORT_RW_INITIALIZER; +static unsigned int n_cohorts; + +struct block { + unsigned int tid; +}; + +static void * +thread_rwlock(void *pun) +{ + uint64_t s_b, e_b, a, i; + uint64_t *value = pun; + CK_COHORT_INSTANCE(fas_fas) *cohort; + unsigned int core; + + if (aff_iterate_core(&affinity, &core) != 0) { + perror("ERROR: Could not affine thread"); + exit(EXIT_FAILURE); + } + + cohort = &((cohorts + (core / (int)(affinity.delta)) % n_cohorts)->cohort); + + ck_pr_inc_uint(&barrier); + while (ck_pr_load_uint(&barrier) != nthr) + ck_pr_stall(); + + for (i = 1, a = 0;; i++) { + s_b = rdtsc(); + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + e_b = rdtsc(); + + a += (e_b - s_b) >> 4; + + if (ck_pr_load_uint(&flag) == 1) + break; + } + + ck_pr_inc_uint(&barrier); + while (ck_pr_load_uint(&barrier) != nthr * 2) + ck_pr_stall(); + + *value = (a / i); + return NULL; +} + +int +main(int argc, char *argv[]) +{ + unsigned int i; + pthread_t *threads; + uint64_t *latency; + struct block *context; + ck_spinlock_fas_t *local_lock; + + if (argc != 4) { + ck_error("Usage: throughput \n"); + } + + n_cohorts = atoi(argv[1]); + if (n_cohorts <= 0) { + ck_error("ERROR: Number of cohorts must be greater than 0\n"); + } + + nthr = n_cohorts * atoi(argv[2]); + if (nthr <= 0) { + ck_error("ERROR: Number of threads must be greater than 0\n"); + } + + threads = malloc(sizeof(pthread_t) * nthr); + if (threads == NULL) { + ck_error("ERROR: Could not allocate thread structures\n"); + } + + cohorts = malloc(sizeof(struct cohort_record) * n_cohorts); + if (cohorts == NULL) { + ck_error("ERROR: Could not allocate cohort structures\n"); + } + + context = malloc(sizeof(struct block) * nthr); + if (context == NULL) { + ck_error("ERROR: Could not allocate thread contexts\n"); + } + + affinity.delta = atoi(argv[3]); + affinity.request = 0; + + latency = malloc(sizeof(*latency) * nthr); + if (latency == NULL) { + ck_error("ERROR: Could not create latency buffer\n"); + } + memset(latency, 0, sizeof(*latency) * nthr); + + fprintf(stderr, "Creating cohorts..."); + for (i = 0 ; i < n_cohorts ; i++) { + local_lock = malloc(max(CK_MD_CACHELINE, sizeof(ck_spinlock_fas_t))); + if (local_lock == NULL) { + ck_error("ERROR: Could not allocate local lock\n"); + } + CK_COHORT_INIT(fas_fas, &((cohorts + i)->cohort), &global_lock, local_lock, + CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT); + local_lock = NULL; + } + fprintf(stderr, "done\n"); + + fprintf(stderr, "Creating threads (rwlock)..."); + for (i = 0; i < nthr; i++) { + if (pthread_create(&threads[i], NULL, thread_rwlock, latency + i) != 0) { + ck_error("ERROR: Could not create thread %d\n", i); + } + } + fprintf(stderr, "done\n"); + + common_sleep(10); + ck_pr_store_uint(&flag, 1); + + fprintf(stderr, "Waiting for threads to finish acquisition regression..."); + for (i = 0; i < nthr; i++) + pthread_join(threads[i], NULL); + fprintf(stderr, "done\n\n"); + + for (i = 1; i <= nthr; i++) + printf("%10u %20" PRIu64 "\n", i, latency[i - 1]); + + return (0); +} + From 3fefa3b5a3c5d727ce38e1fcb973b67fc8dd8b9b Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Sat, 9 Mar 2013 22:10:52 -0600 Subject: [PATCH 04/20] ck_cohort: Fixed regression tests based on ck_cohort_rw changes --- regressions/ck_cohort/benchmark/throughput.c | 20 ++++++--- regressions/ck_cohort/ck_cohort.h | 45 ++++++++++++-------- regressions/ck_cohort/validate/validate.c | 13 +++++- 3 files changed, 52 insertions(+), 26 deletions(-) diff --git a/regressions/ck_cohort/benchmark/throughput.c b/regressions/ck_cohort/benchmark/throughput.c index 9242408..f94be74 100644 --- a/regressions/ck_cohort/benchmark/throughput.c +++ b/regressions/ck_cohort/benchmark/throughput.c @@ -60,26 +60,34 @@ static unsigned int barrier; static int critical CK_CC_CACHELINE; static void -ck_spinlock_lock_with_context(ck_spinlock_t *lock, void *context) +ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context) { (void)context; - ck_spinlock_lock(lock); + ck_spinlock_fas_lock(lock); return; } static void -ck_spinlock_unlock_with_context(ck_spinlock_t *lock, void *context) +ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context) { (void)context; - ck_spinlock_unlock(lock); + ck_spinlock_fas_unlock(lock); return; } +static bool +ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) +{ + + (void)context; + return ck_spinlock_fas_locked(lock); +} + CK_COHORT_PROTOTYPE(basic, - ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context, - ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context) + ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context, + ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context) struct cohort_record { CK_COHORT_INSTANCE(basic) cohort; diff --git a/regressions/ck_cohort/ck_cohort.h b/regressions/ck_cohort/ck_cohort.h index 847544c..8427d6b 100644 --- a/regressions/ck_cohort/ck_cohort.h +++ b/regressions/ck_cohort/ck_cohort.h @@ -1,24 +1,33 @@ #define LOCK_NAME "ck_cohort" #define LOCK_DEFINE\ - static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;\ - static ck_spinlock_fas_t local_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;\ - static void\ - ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)\ - {\ - (void)context;\ - ck_spinlock_fas_lock(lock);\ - }\ -\ - static void\ - ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)\ - {\ - (void)context;\ - ck_spinlock_fas_unlock(lock);\ - }\ - CK_COHORT_PROTOTYPE(fas_fas,\ - ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context,\ - ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context)\ + static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER; \ + static ck_spinlock_fas_t local_fas_lock = CK_SPINLOCK_FAS_INITIALIZER; \ + static void \ + ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context) \ + { \ + (void)context; \ + ck_spinlock_fas_lock(lock); \ + } \ + \ + static void \ + ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context) \ + { \ + (void)context; \ + ck_spinlock_fas_unlock(lock); \ + } \ + \ + static bool \ + ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) \ + { \ + (void)context; \ + return ck_spinlock_fas_locked(lock); \ + } \ + CK_COHORT_PROTOTYPE(fas_fas, \ + ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, \ + ck_spinlock_fas_locked_with_context, ck_spinlock_fas_lock_with_context, \ + ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context) \ static CK_COHORT_INSTANCE(fas_fas) CK_CC_CACHELINE cohort = CK_COHORT_INITIALIZER + #define LOCK_INIT CK_COHORT_INIT(fas_fas, &cohort, &global_fas_lock, &local_fas_lock,\ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT) #define LOCK CK_COHORT_LOCK(fas_fas, &cohort, NULL, NULL) diff --git a/regressions/ck_cohort/validate/validate.c b/regressions/ck_cohort/validate/validate.c index de6d914..df1ac6c 100644 --- a/regressions/ck_cohort/validate/validate.c +++ b/regressions/ck_cohort/validate/validate.c @@ -59,6 +59,13 @@ ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context) ck_spinlock_fas_unlock(lock); } +static bool +ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) +{ + (void)context; + return ck_spinlock_fas_locked(lock); +} + static bool ck_spinlock_fas_trylock_with_context(ck_spinlock_fas_t *lock, void *context) { @@ -67,8 +74,10 @@ ck_spinlock_fas_trylock_with_context(ck_spinlock_fas_t *lock, void *context) } CK_COHORT_TRYLOCK_PROTOTYPE(fas_fas, - ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_trylock_with_context, - ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_trylock_with_context) + ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, + ck_spinlock_fas_locked_with_context, ck_spinlock_fas_trylock_with_context, + ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, + ck_spinlock_fas_locked_with_context, ck_spinlock_fas_trylock_with_context) static CK_COHORT_INSTANCE(fas_fas) *cohorts; static int n_cohorts; From 695e29fd7bb09a1b32915d280e544a024895b000 Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Sat, 9 Mar 2013 22:27:47 -0600 Subject: [PATCH 05/20] ck_cohort: Updated documentation to reflect ck_cohort_rw changes --- doc/CK_COHORT_INIT | 1 + doc/CK_COHORT_INSTANCE | 1 + doc/CK_COHORT_LOCK | 1 + doc/CK_COHORT_PROTOTYPE | 8 ++++++++ doc/CK_COHORT_TRYLOCK | 2 ++ doc/CK_COHORT_TRYLOCK_PROTOTYPE | 16 ++++++++++++---- doc/CK_COHORT_UNLOCK | 1 + doc/ck_cohort | 28 +++++++++++++++++++--------- 8 files changed, 45 insertions(+), 13 deletions(-) diff --git a/doc/CK_COHORT_INIT b/doc/CK_COHORT_INIT index 5f367de..ed97487 100644 --- a/doc/CK_COHORT_INIT +++ b/doc/CK_COHORT_INIT @@ -60,6 +60,7 @@ argument, you should use CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT. .Xr CK_COHORT_INITIALIZER 3 , .Xr CK_COHORT_LOCK 3 , .Xr CK_COHORT_UNLOCK 3 , +.Xr CK_COHORT_LOCKED 3 , .Xr CK_COHORT_TRYLOCK 3 , .Pp Additional information available at http://concurrencykit.org/ diff --git a/doc/CK_COHORT_INSTANCE b/doc/CK_COHORT_INSTANCE index ca622a0..ee75465 100644 --- a/doc/CK_COHORT_INSTANCE +++ b/doc/CK_COHORT_INSTANCE @@ -53,6 +53,7 @@ CK_COHORT_INSTANCE(foo) *cohorts = malloc(4 * sizeof(CK_COHORT_INSTANCE(foo))); .Xr CK_COHORT_INITIALIZER 3 , .Xr CK_COHORT_LOCK 3 , .Xr CK_COHORT_UNLOCK 3 , +.Xr CK_COHORT_LOCKED 3 , .Xr CK_COHORT_TRYLOCK 3 , .Pp Additional information available at http://concurrencykit.org/ diff --git a/doc/CK_COHORT_LOCK b/doc/CK_COHORT_LOCK index a1e42e5..7f5e558 100644 --- a/doc/CK_COHORT_LOCK +++ b/doc/CK_COHORT_LOCK @@ -55,6 +55,7 @@ will be passed to the function specified by .Xr CK_COHORT_INITIALIZER 3 , .Xr CK_COHORT_INIT 3 , .Xr CK_COHORT_UNLOCK 3 , +.Xr CK_COHORT_LOCKED 3 , .Xr CK_COHORT_TRYLOCK 3 , .Pp Additional information available at http://concurrencykit.org/ diff --git a/doc/CK_COHORT_PROTOTYPE b/doc/CK_COHORT_PROTOTYPE index e8766af..f0977e0 100644 --- a/doc/CK_COHORT_PROTOTYPE +++ b/doc/CK_COHORT_PROTOTYPE @@ -53,11 +53,17 @@ of the other CK_COHORT macros. .Fa global_unlock_method : The method that should be called to relinquish the global lock .br +.Fa global_locked_method +: This method should return true iff the global lock is acquired by a thread. +.br .Fa local_lock_method : The method that should be called to acquire the local lock .br .Fa local_unlock_method : The method that should be called to relinquish the local lock +.br +.Fa global_locked_method +: This method should return true iff the local lock is acquired by a thread. .Pp Instances of the defined cohort type can be declared as: .br @@ -70,5 +76,7 @@ Instances of the defined cohort type can be declared as: .Xr CK_COHORT_INIT 3 , .Xr CK_COHORT_LOCK 3 , .Xr CK_COHORT_UNLOCK 3 , +.Xr CK_COHORT_LOCKED 3 , +.Xr CK_COHORT_TRYLOCK 3 , .Pp Additional information available at http://concurrencykit.org/ diff --git a/doc/CK_COHORT_TRYLOCK b/doc/CK_COHORT_TRYLOCK index 55959df..6330642 100644 --- a/doc/CK_COHORT_TRYLOCK +++ b/doc/CK_COHORT_TRYLOCK @@ -62,6 +62,8 @@ when this call is made. .Xr CK_COHORT_INSTANCE 3 , .Xr CK_COHORT_INITIALIZER 3 , .Xr CK_COHORT_INIT 3 , +.Xr CK_COHORT_LOCK 3 , .Xr CK_COHORT_UNLOCK 3 , +.Xr CK_COHORT_LOCKED 3 , .Pp Additional information available at http://concurrencykit.org/ diff --git a/doc/CK_COHORT_TRYLOCK_PROTOTYPE b/doc/CK_COHORT_TRYLOCK_PROTOTYPE index 62f3e68..6a038e5 100644 --- a/doc/CK_COHORT_TRYLOCK_PROTOTYPE +++ b/doc/CK_COHORT_TRYLOCK_PROTOTYPE @@ -34,13 +34,13 @@ Concurrency Kit (libck, \-lck) .Sh SYNOPSIS .In ck_cohort.h .Fn CK_COHORT_TRYLOCK_PROTOTYPE "COHORT_NAME cohort_name" "LOCK_FXN global_lock_method" \ -"LOCK_FXN global_unlock_method" "TRYLOCK_FXN global_trylock_method" \ -"LOCK_FXN local_lock_method" "LOCK_FXN local_unlock_method" "TRYLOCK_FXN local_trylock_method" +"LOCK_FXN global_unlock_method" "BOOL_LOCK_FXN global_locked_method" \ +"BOOL_LOCK_FXN global_trylock_method" "LOCK_FXN local_lock_method" \ +"LOCK_FXN local_unlock_method" "BOOL_LOCK_FXN local_locked_method" "BOOL_LOCK_FXN local_trylock_method" .Sh DESCRIPTION The ck_cohort.h header file does not define any cohort types. Instead, the user must use the CK_COHORT_PROTOTYPE or CK_COHORT_TRYLOCK_PROTOTYPE macros to define any types -they want to use. -They must use CK_COHORT_TRYLOCK_PROTOTYPE if they want their cohort type to have support +they want to use. They must use CK_COHORT_TRYLOCK_PROTOTYPE if they want their cohort type to have support for trylock operations. The CK_COHORT_TRYLOCK_PROTOTYPE macro takes the following arguments: .Pp .Fa cohort_name @@ -53,6 +53,9 @@ of the other CK_COHORT macros. .Fa global_unlock_method : The method that should be called to relinquish the global lock .br +.Fa global_locked_method +: This method should return true iff the global lock is acquired by a thread. +.br .Fa global_trylock_method : The method that should be called to try to acquire the global lock. It should not block and return true iff the lock was successfully acquired. @@ -63,6 +66,9 @@ It should not block and return true iff the lock was successfully acquired. .Fa local_unlock_method : The method that should be called to relinquish the local lock .br +.Fa global_locked_method +: This method should return true iff the global lock is acquired by a thread. +.br .Fa local_trylock_method : The method that should be called to try to acquire the local lock. It should not block and return true iff the lock was successfully acquired. @@ -78,5 +84,7 @@ Instances of the defined cohort type can be declared as: .Xr CK_COHORT_INIT 3 , .Xr CK_COHORT_LOCK 3 , .Xr CK_COHORT_UNLOCK 3 , +.Xr CK_COHORT_LOCKED 3 , +.Xr CK_COHORT_TRYLOCK 3 , .Pp Additional information available at http://concurrencykit.org/ diff --git a/doc/CK_COHORT_UNLOCK b/doc/CK_COHORT_UNLOCK index 7dca173..ffecaa4 100644 --- a/doc/CK_COHORT_UNLOCK +++ b/doc/CK_COHORT_UNLOCK @@ -55,6 +55,7 @@ will be passed to the function specified by .Xr CK_COHORT_INITIALIZER 3 , .Xr CK_COHORT_INIT 3 , .Xr CK_COHORT_LOCK 3 , +.Xr CK_COHORT_LOCKED 3 , .Xr CK_COHORT_TRYLOCK 3 , .Pp Additional information available at http://concurrencykit.org/ diff --git a/doc/ck_cohort b/doc/ck_cohort index 91f6c1e..a053385 100644 --- a/doc/ck_cohort +++ b/doc/ck_cohort @@ -36,8 +36,10 @@ Concurrency Kit (libck, \-lck) .Fn CK_COHORT_PROTOTYPE "COHORT_NAME cohort_name" "LOCK_FXN global_lock_method" \ "LOCK_FXN global_unlock_method" "LOCK_FXN local_lock_method" "LOCK_FXN local_unlock_method" .Fn CK_COHORT_TRYLOCK_PROTOTYPE "COHORT_NAME cohort_name" \ -"LOCK_FXN global_lock_method" "LOCK_FXN global_unlock_method" "TRYLOCK_FXN global_trylock_method" \ -"LOCK_FXN local_lock_method" "LOCK_FXN local_unlock_method" "TRYLOCK_FXN local_trylock_method" +"LOCK_FXN global_lock_method" "LOCK_FXN global_unlock_method" \ +"BOOL_LOCK_FXN global_locked_method" BOOL_LOCK_FXN global_trylock_method" \ +"LOCK_FXN local_lock_method" "LOCK_FXN local_unlock_method" \ +"BOOL_LOCK_FXN local_locked_method" BOOL_LOCK_FXN local_trylock_method" .Fn CK_COHORT_INSTANCE "COHORT_NAME cohort_name" .Fn CK_COHORT_INIT "COHORT_NAME cohort_name" "ck_cohort *cohort" \ "void *global_lock" "void *local_lock" "unsigned int pass_limit" @@ -50,7 +52,7 @@ Where LOCK_FXN refers to a method with the signature .br void(void *lock, void *context) .br -and TRYLOCK_FXN refers to a method with the signature +BOOL_LOCK_FXN refers to a method with the signature .br bool(void *lock, void *context) .Pp @@ -59,9 +61,9 @@ The argument in each signature is used to pass along any additional information that the lock might need for its lock, unlock and trylock methods. The values for this argument are provided to each call to -.Xr CK_COHORT_LOCK 3 -, -.Xr CK_COHORT_UNLOCK 3 +.Xr CK_COHORT_LOCK 3 , +.Xr CK_COHORT_UNLOCK 3 , +.Xr CK_COHORT_LOCKED 3 , and .Xr CK_COHORT_TRYLOCK 3 . @@ -94,7 +96,7 @@ man pages for more details. #include /* - * Create lock/unlock methods with signatures that match + * Create cohort methods with signatures that match * the required signature */ static void @@ -113,13 +115,20 @@ ck_spinlock_unlock_with_context(ck_spinlock_t *lock, void *context) return; } +static bool +ck_spinlock_locked_with_context(ck_spinlock_t *lock, void *context) +{ + (void)context; + return ck_spinlock_locked(lock); +} + /* * define a cohort type named "test_cohort" that will use * the above methods for both its global and local locks */ CK_COHORT_PROTOTYPE(test_cohort, - ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context, - ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context) + ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context, ck_spinlock_locked_with_context + ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context, ck_spinlock_locked_with_context) static ck_spinlock_t global_lock = CK_SPINLOCK_INITIALIZER; static unsigned int ready; @@ -197,6 +206,7 @@ main(void) .Xr CK_COHORT_INIT 3 , .Xr CK_COHORT_LOCK 3 , .Xr CK_COHORT_UNLOCK 3 , +.Xr CK_COHORT_LOCKED 3 , .Xr CK_COHORT_TRYLOCK 3 , .Pp Additional information available at http://concurrencykit.org/ From 63bfa0d3537a9cee43a586fa038de0ddd8f3b7f5 Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Sun, 10 Mar 2013 17:48:46 -0500 Subject: [PATCH 06/20] ck_cohort_rw: Reduced line wrapping --- include/ck_cohort_rw.h | 170 ++++++++++++++++++++--------------------- 1 file changed, 83 insertions(+), 87 deletions(-) diff --git a/include/ck_cohort_rw.h b/include/ck_cohort_rw.h index 471377c..7d1bbf4 100644 --- a/include/ck_cohort_rw.h +++ b/include/ck_cohort_rw.h @@ -48,95 +48,91 @@ #define CK_COHORT_RW_WRITE_UNLOCK(N, RW, C, GC, LC) ck_cohort_rw_##N##_write_unlock(RW, C, GC, LC) #define CK_COHORT_RW_DEFAULT_WAIT_LIMIT 1000 -#define CK_COHORT_RW_PROTOTYPE(N) \ - CK_COHORT_RW_INSTANCE(N) { \ - CK_COHORT_INSTANCE(N) *cohort; \ - unsigned int read_counter; \ - unsigned int write_barrier; \ - unsigned int wait_limit; \ - }; \ - \ - CK_CC_INLINE static void \ - ck_cohort_rw_##N##_init(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \ - unsigned int wait_limit) \ - { \ - rw_cohort->read_counter = 0; \ - rw_cohort->write_barrier = 0; \ - rw_cohort->wait_limit = wait_limit; \ - ck_pr_barrier(); \ - return; \ - } \ - \ - CK_CC_INLINE static void \ - ck_cohort_rw_##N##_write_lock(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \ - CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ - void *local_context) \ - { \ - while (ck_pr_load_uint(&rw_cohort->write_barrier) > 0) { \ - ck_pr_stall(); \ - } \ - \ - CK_COHORT_LOCK(N, cohort, global_context, local_context); \ - \ - while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \ - ck_pr_stall(); \ - } \ - \ - return; \ - } \ - \ - CK_CC_INLINE static void \ - ck_cohort_rw_##N##_write_unlock(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \ - CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ - void *local_context) \ - { \ - (void)rw_cohort; \ - CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ - } \ - \ - CK_CC_INLINE static void \ - ck_cohort_rw_##N##_read_lock(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \ - CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ - void *local_context) \ - { \ - unsigned int wait_count = 0; \ - bool raised = false; \ - start: \ - ck_pr_inc_uint(&rw_cohort->read_counter); \ - if (CK_COHORT_LOCKED(N, cohort, global_context, local_context) \ - == true) { \ - ck_pr_dec_uint(&rw_cohort->read_counter); \ - while (CK_COHORT_LOCKED(N, cohort, global_context, \ - local_context) == true) { \ - ck_pr_stall(); \ - if (++wait_count > rw_cohort->wait_limit \ - && raised == false) { \ - ck_pr_inc_uint( \ - &rw_cohort->write_barrier); \ - raised = true; \ - } \ - } \ - goto start; \ - } \ - \ - if (raised == true) { \ - ck_pr_dec_uint(&rw_cohort->write_barrier); \ - } \ - \ - return; \ - } \ - \ - CK_CC_INLINE static void \ - ck_cohort_rw_##N##_read_unlock(CK_COHORT_RW_INSTANCE(N) *cohort) \ - { \ - ck_pr_dec_uint(&cohort->read_counter); \ +#define CK_COHORT_RW_PROTOTYPE(N) \ + CK_COHORT_RW_INSTANCE(N) { \ + CK_COHORT_INSTANCE(N) *cohort; \ + unsigned int read_counter; \ + unsigned int write_barrier; \ + unsigned int wait_limit; \ + }; \ + \ + CK_CC_INLINE static void \ + ck_cohort_rw_##N##_init(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \ + unsigned int wait_limit) \ + { \ + rw_cohort->read_counter = 0; \ + rw_cohort->write_barrier = 0; \ + rw_cohort->wait_limit = wait_limit; \ + ck_pr_barrier(); \ + return; \ + } \ + \ + CK_CC_INLINE static void \ + ck_cohort_rw_##N##_write_lock(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \ + CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ + void *local_context) \ + { \ + while (ck_pr_load_uint(&rw_cohort->write_barrier) > 0) { \ + ck_pr_stall(); \ + } \ + \ + CK_COHORT_LOCK(N, cohort, global_context, local_context); \ + \ + while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \ + ck_pr_stall(); \ + } \ + \ + return; \ + } \ + \ + CK_CC_INLINE static void \ + ck_cohort_rw_##N##_write_unlock(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \ + CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ + void *local_context) \ + { \ + (void)rw_cohort; \ + CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ + } \ + \ + CK_CC_INLINE static void \ + ck_cohort_rw_##N##_read_lock(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \ + CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ + void *local_context) \ + { \ + unsigned int wait_count = 0; \ + bool raised = false; \ + start: \ + ck_pr_inc_uint(&rw_cohort->read_counter); \ + if (CK_COHORT_LOCKED(N, cohort, global_context, local_context) == true) { \ + ck_pr_dec_uint(&rw_cohort->read_counter); \ + while (CK_COHORT_LOCKED(N, cohort, global_context, local_context) == true) {\ + ck_pr_stall(); \ + if (++wait_count > rw_cohort->wait_limit && raised == false) { \ + ck_pr_inc_uint(&rw_cohort->write_barrier); \ + raised = true; \ + } \ + } \ + goto start; \ + } \ + \ + if (raised == true) { \ + ck_pr_dec_uint(&rw_cohort->write_barrier); \ + } \ + \ + return; \ + } \ + \ + CK_CC_INLINE static void \ + ck_cohort_rw_##N##_read_unlock(CK_COHORT_RW_INSTANCE(N) *cohort) \ + { \ + ck_pr_dec_uint(&cohort->read_counter); \ } -#define CK_COHORT_RW_INITIALIZER { \ - .cohort = NULL, \ - .read_counter = 0, \ - .write_barrier = 0, \ - .wait_limit = 0 \ +#define CK_COHORT_RW_INITIALIZER { \ + .cohort = NULL, \ + .read_counter = 0, \ + .write_barrier = 0, \ + .wait_limit = 0 \ } #endif /* _CK_COHORT_RW_H */ From 646cb2cb069e09fa27af27055e954bdabcd68ab3 Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Tue, 12 Mar 2013 18:48:27 -0500 Subject: [PATCH 07/20] ck_cohort_rw: Removed unused member variable --- include/ck_cohort_rw.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/ck_cohort_rw.h b/include/ck_cohort_rw.h index 7d1bbf4..e0ac1d1 100644 --- a/include/ck_cohort_rw.h +++ b/include/ck_cohort_rw.h @@ -50,7 +50,6 @@ #define CK_COHORT_RW_PROTOTYPE(N) \ CK_COHORT_RW_INSTANCE(N) { \ - CK_COHORT_INSTANCE(N) *cohort; \ unsigned int read_counter; \ unsigned int write_barrier; \ unsigned int wait_limit; \ @@ -129,7 +128,6 @@ } #define CK_COHORT_RW_INITIALIZER { \ - .cohort = NULL, \ .read_counter = 0, \ .write_barrier = 0, \ .wait_limit = 0 \ From a352b46d0b0682ac5771263f3277db6f7a63c11c Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Mon, 15 Apr 2013 22:27:45 -0500 Subject: [PATCH 08/20] ck_rw_cohort: renamed ck_rw_cohort namespace --- .gitignore | 6 +- include/{ck_cohort_rw.h => ck_rw_cohort.h} | 38 +++++----- .../benchmark/Makefile | 4 +- .../benchmark/latency.c | 24 +++---- .../benchmark/throughput.c | 70 +++++++++---------- .../validate/Makefile | 2 +- .../validate/validate.c | 14 ++-- 7 files changed, 79 insertions(+), 79 deletions(-) rename include/{ck_cohort_rw.h => ck_rw_cohort.h} (80%) rename regressions/{ck_cohort_rw => ck_rw_cohort}/benchmark/Makefile (72%) rename regressions/{ck_cohort_rw => ck_rw_cohort}/benchmark/latency.c (81%) rename regressions/{ck_cohort_rw => ck_rw_cohort}/benchmark/throughput.c (74%) rename regressions/{ck_cohort_rw => ck_rw_cohort}/validate/Makefile (83%) rename regressions/{ck_cohort_rw => ck_rw_cohort}/validate/validate.c (93%) diff --git a/.gitignore b/.gitignore index 7d8c142..3d53783 100644 --- a/.gitignore +++ b/.gitignore @@ -142,6 +142,6 @@ regressions/ck_queue/validate/ck_slist regressions/ck_cohort/validate/validate regressions/ck_cohort/benchmark/ck_cohort.LATENCY regressions/ck_cohort/benchmark/ck_cohort.THROUGHPUT -regressions/ck_cohort_rw/validate/validate -regressions/ck_cohort_rw/benchmark/latency -regressions/ck_cohort_rw/benchmark/throughput +regressions/ck_rw_cohort/validate/validate +regressions/ck_rw_cohort/benchmark/latency +regressions/ck_rw_cohort/benchmark/throughput diff --git a/include/ck_cohort_rw.h b/include/ck_rw_cohort.h similarity index 80% rename from include/ck_cohort_rw.h rename to include/ck_rw_cohort.h index e0ac1d1..30ee106 100644 --- a/include/ck_cohort_rw.h +++ b/include/ck_rw_cohort.h @@ -25,8 +25,8 @@ * SUCH DAMAGE. */ -#ifndef _CK_COHORT_RW_H -#define _CK_COHORT_RW_H +#ifndef _CK_RW_COHORT_H +#define _CK_RW_COHORT_H /* * This is an implementation of NUMA-aware reader-writer locks as described in: @@ -39,24 +39,24 @@ #include #include -#define CK_COHORT_RW_NAME(N) ck_cohort_rw_##N -#define CK_COHORT_RW_INSTANCE(N) struct CK_COHORT_RW_NAME(N) -#define CK_COHORT_RW_INIT(N, RW, WL) ck_cohort_rw_##N##_init(RW, WL) -#define CK_COHORT_RW_READ_LOCK(N, RW, C, GC, LC) ck_cohort_rw_##N##_read_lock(RW, C, GC, LC) -#define CK_COHORT_RW_READ_UNLOCK(N, RW) ck_cohort_rw_##N##_read_unlock(RW) -#define CK_COHORT_RW_WRITE_LOCK(N, RW, C, GC, LC) ck_cohort_rw_##N##_write_lock(RW, C, GC, LC) -#define CK_COHORT_RW_WRITE_UNLOCK(N, RW, C, GC, LC) ck_cohort_rw_##N##_write_unlock(RW, C, GC, LC) -#define CK_COHORT_RW_DEFAULT_WAIT_LIMIT 1000 +#define CK_RW_COHORT_NAME(N) ck_rw_cohort_##N +#define CK_RW_COHORT_INSTANCE(N) struct CK_RW_COHORT_NAME(N) +#define CK_RW_COHORT_INIT(N, RW, WL) ck_rw_cohort_##N##_init(RW, WL) +#define CK_RW_COHORT_READ_LOCK(N, RW, C, GC, LC) ck_rw_cohort_##N##_read_lock(RW, C, GC, LC) +#define CK_RW_COHORT_READ_UNLOCK(N, RW) ck_rw_cohort_##N##_read_unlock(RW) +#define CK_RW_COHORT_WRITE_LOCK(N, RW, C, GC, LC) ck_rw_cohort_##N##_write_lock(RW, C, GC, LC) +#define CK_RW_COHORT_WRITE_UNLOCK(N, RW, C, GC, LC) ck_rw_cohort_##N##_write_unlock(RW, C, GC, LC) +#define CK_RW_COHORT_DEFAULT_WAIT_LIMIT 1000 -#define CK_COHORT_RW_PROTOTYPE(N) \ - CK_COHORT_RW_INSTANCE(N) { \ +#define CK_RW_COHORT_PROTOTYPE(N) \ + CK_RW_COHORT_INSTANCE(N) { \ unsigned int read_counter; \ unsigned int write_barrier; \ unsigned int wait_limit; \ }; \ \ CK_CC_INLINE static void \ - ck_cohort_rw_##N##_init(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \ + ck_rw_cohort_##N##_init(CK_RW_COHORT_INSTANCE(N) *rw_cohort, \ unsigned int wait_limit) \ { \ rw_cohort->read_counter = 0; \ @@ -67,7 +67,7 @@ } \ \ CK_CC_INLINE static void \ - ck_cohort_rw_##N##_write_lock(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \ + ck_rw_cohort_##N##_write_lock(CK_RW_COHORT_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -85,7 +85,7 @@ } \ \ CK_CC_INLINE static void \ - ck_cohort_rw_##N##_write_unlock(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \ + ck_rw_cohort_##N##_write_unlock(CK_RW_COHORT_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -94,7 +94,7 @@ } \ \ CK_CC_INLINE static void \ - ck_cohort_rw_##N##_read_lock(CK_COHORT_RW_INSTANCE(N) *rw_cohort, \ + ck_rw_cohort_##N##_read_lock(CK_RW_COHORT_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -122,15 +122,15 @@ } \ \ CK_CC_INLINE static void \ - ck_cohort_rw_##N##_read_unlock(CK_COHORT_RW_INSTANCE(N) *cohort) \ + ck_rw_cohort_##N##_read_unlock(CK_RW_COHORT_INSTANCE(N) *cohort) \ { \ ck_pr_dec_uint(&cohort->read_counter); \ } -#define CK_COHORT_RW_INITIALIZER { \ +#define CK_RW_COHORT_INITIALIZER { \ .read_counter = 0, \ .write_barrier = 0, \ .wait_limit = 0 \ } -#endif /* _CK_COHORT_RW_H */ +#endif /* _CK_RW_COHORT_H */ diff --git a/regressions/ck_cohort_rw/benchmark/Makefile b/regressions/ck_rw_cohort/benchmark/Makefile similarity index 72% rename from regressions/ck_cohort_rw/benchmark/Makefile rename to regressions/ck_rw_cohort/benchmark/Makefile index 8531fb9..6a751b5 100644 --- a/regressions/ck_cohort_rw/benchmark/Makefile +++ b/regressions/ck_rw_cohort/benchmark/Makefile @@ -4,10 +4,10 @@ OBJECTS=latency throughput all: $(OBJECTS) -latency: latency.c ../../../include/ck_cohort_rw.h +latency: latency.c ../../../include/ck_rw_cohort.h $(CC) $(CFLAGS) -o latency latency.c -throughput: throughput.c ../../../include/ck_cohort_rw.h +throughput: throughput.c ../../../include/ck_rw_cohort.h $(CC) $(CFLAGS) -o throughput throughput.c clean: diff --git a/regressions/ck_cohort_rw/benchmark/latency.c b/regressions/ck_rw_cohort/benchmark/latency.c similarity index 81% rename from regressions/ck_cohort_rw/benchmark/latency.c rename to regressions/ck_rw_cohort/benchmark/latency.c index 67e2803..d27fd41 100644 --- a/regressions/ck_cohort_rw/benchmark/latency.c +++ b/regressions/ck_rw_cohort/benchmark/latency.c @@ -25,7 +25,7 @@ * SUCH DAMAGE. */ -#include +#include #include #include #include @@ -60,7 +60,7 @@ ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) CK_COHORT_PROTOTYPE(fas_fas, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context) -CK_COHORT_RW_PROTOTYPE(fas_fas) +CK_RW_COHORT_PROTOTYPE(fas_fas) int main(void) @@ -69,34 +69,34 @@ main(void) ck_spinlock_fas_t global_lock = CK_SPINLOCK_FAS_INITIALIZER; ck_spinlock_fas_t local_lock = CK_SPINLOCK_FAS_INITIALIZER; CK_COHORT_INSTANCE(fas_fas) cohort = CK_COHORT_INITIALIZER; - CK_COHORT_RW_INSTANCE(fas_fas) rw_cohort = CK_COHORT_RW_INITIALIZER; + CK_RW_COHORT_INSTANCE(fas_fas) rw_cohort = CK_RW_COHORT_INITIALIZER; CK_COHORT_INIT(fas_fas, &cohort, &global_lock, &local_lock, CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT); - CK_COHORT_RW_INIT(fas_fas, &rw_cohort, CK_COHORT_RW_DEFAULT_WAIT_LIMIT); + CK_RW_COHORT_INIT(fas_fas, &rw_cohort, CK_RW_COHORT_DEFAULT_WAIT_LIMIT); for (i = 0; i < STEPS; i++) { - CK_COHORT_RW_WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); - CK_COHORT_RW_WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + CK_RW_COHORT_WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + CK_RW_COHORT_WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); } s_b = rdtsc(); for (i = 0; i < STEPS; i++) { - CK_COHORT_RW_WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); - CK_COHORT_RW_WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + CK_RW_COHORT_WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + CK_RW_COHORT_WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); } e_b = rdtsc(); printf("WRITE: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS); for (i = 0; i < STEPS; i++) { - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); } s_b = rdtsc(); for (i = 0; i < STEPS; i++) { - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); } e_b = rdtsc(); printf("READ: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS); diff --git a/regressions/ck_cohort_rw/benchmark/throughput.c b/regressions/ck_rw_cohort/benchmark/throughput.c similarity index 74% rename from regressions/ck_cohort_rw/benchmark/throughput.c rename to regressions/ck_rw_cohort/benchmark/throughput.c index 591351e..8a0c519 100644 --- a/regressions/ck_cohort_rw/benchmark/throughput.c +++ b/regressions/ck_rw_cohort/benchmark/throughput.c @@ -26,7 +26,7 @@ */ #include -#include +#include #include #include #include @@ -78,7 +78,7 @@ ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) CK_COHORT_PROTOTYPE(fas_fas, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context) -CK_COHORT_RW_PROTOTYPE(fas_fas) +CK_RW_COHORT_PROTOTYPE(fas_fas) struct cohort_record { CK_COHORT_INSTANCE(fas_fas) cohort; @@ -86,7 +86,7 @@ struct cohort_record { static struct cohort_record *cohorts; static ck_spinlock_t global_lock = CK_SPINLOCK_INITIALIZER; -static CK_COHORT_RW_INSTANCE(fas_fas) rw_cohort = CK_COHORT_RW_INITIALIZER; +static CK_RW_COHORT_INSTANCE(fas_fas) rw_cohort = CK_RW_COHORT_INITIALIZER; static unsigned int n_cohorts; struct block { @@ -114,38 +114,38 @@ thread_rwlock(void *pun) for (i = 1, a = 0;; i++) { s_b = rdtsc(); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); e_b = rdtsc(); a += (e_b - s_b) >> 4; diff --git a/regressions/ck_cohort_rw/validate/Makefile b/regressions/ck_rw_cohort/validate/Makefile similarity index 83% rename from regressions/ck_cohort_rw/validate/Makefile rename to regressions/ck_rw_cohort/validate/Makefile index 2582598..e2552be 100644 --- a/regressions/ck_cohort_rw/validate/Makefile +++ b/regressions/ck_rw_cohort/validate/Makefile @@ -4,7 +4,7 @@ OBJECTS=validate all: $(OBJECTS) -validate: validate.c ../../../include/ck_cohort_rw.h +validate: validate.c ../../../include/ck_rw_cohort.h $(CC) $(CFLAGS) -o validate validate.c -g check: all diff --git a/regressions/ck_cohort_rw/validate/validate.c b/regressions/ck_rw_cohort/validate/validate.c similarity index 93% rename from regressions/ck_cohort_rw/validate/validate.c rename to regressions/ck_rw_cohort/validate/validate.c index 4854f65..871dcd3 100644 --- a/regressions/ck_cohort_rw/validate/validate.c +++ b/regressions/ck_rw_cohort/validate/validate.c @@ -37,7 +37,7 @@ #include #include -#include +#include #include #include "../../common.h" @@ -75,10 +75,10 @@ ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) CK_COHORT_PROTOTYPE(fas_fas, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context) -CK_COHORT_RW_PROTOTYPE(fas_fas) +CK_RW_COHORT_PROTOTYPE(fas_fas) static CK_COHORT_INSTANCE(fas_fas) *cohorts; -static CK_COHORT_RW_INSTANCE(fas_fas) rw_cohort = CK_COHORT_RW_INITIALIZER; +static CK_RW_COHORT_INSTANCE(fas_fas) rw_cohort = CK_RW_COHORT_INITIALIZER; static int n_cohorts; static void * @@ -97,7 +97,7 @@ thread(void *null CK_CC_UNUSED) cohort = cohorts + (core / (int)(a.delta)) % n_cohorts; while (i--) { - CK_COHORT_RW_WRITE_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WRITE_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); { l = ck_pr_load_uint(&locked); if (l != 0) { @@ -132,16 +132,16 @@ thread(void *null CK_CC_UNUSED) ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l); } } - CK_COHORT_RW_WRITE_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WRITE_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_COHORT_RW_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); { l = ck_pr_load_uint(&locked); if (l != 0) { ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l); } } - CK_COHORT_RW_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); } return (NULL); From f93369c4fca929347abf336d8fe761c08ffb678f Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Thu, 18 Apr 2013 13:27:19 -0500 Subject: [PATCH 09/20] ck_rw_cohort: Removed backwards jump --- include/ck_rw_cohort.h | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/include/ck_rw_cohort.h b/include/ck_rw_cohort.h index 30ee106..bb2b771 100644 --- a/include/ck_rw_cohort.h +++ b/include/ck_rw_cohort.h @@ -100,19 +100,22 @@ { \ unsigned int wait_count = 0; \ bool raised = false; \ - start: \ - ck_pr_inc_uint(&rw_cohort->read_counter); \ - if (CK_COHORT_LOCKED(N, cohort, global_context, local_context) == true) { \ - ck_pr_dec_uint(&rw_cohort->read_counter); \ - while (CK_COHORT_LOCKED(N, cohort, global_context, local_context) == true) {\ - ck_pr_stall(); \ - if (++wait_count > rw_cohort->wait_limit && raised == false) { \ - ck_pr_inc_uint(&rw_cohort->write_barrier); \ - raised = true; \ - } \ - } \ - goto start; \ - } \ + \ + while (true) { \ + ck_pr_inc_uint(&rw_cohort->read_counter); \ + if (CK_COHORT_LOCKED(N, cohort, global_context, local_context) == false) {\ + break; \ + } \ + \ + ck_pr_dec_uint(&rw_cohort->read_counter); \ + while (CK_COHORT_LOCKED(N, cohort, global_context, local_context) == true) {\ + ck_pr_stall(); \ + if (++wait_count > rw_cohort->wait_limit && raised == false) { \ + ck_pr_inc_uint(&rw_cohort->write_barrier); \ + raised = true; \ + } \ + } \ + } \ \ if (raised == true) { \ ck_pr_dec_uint(&rw_cohort->write_barrier); \ From 21750b932196776cb80b80243f276264b483e117 Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Sun, 21 Apr 2013 20:51:58 -0500 Subject: [PATCH 10/20] ck_rw_cohort: Added read-preference logic and updated validation test to use it --- .gitignore | 5 +- include/ck_rw_cohort.h | 129 +++++++++++++++--- regressions/ck_rw_cohort/benchmark/latency.c | 22 +-- .../ck_rw_cohort/benchmark/throughput.c | 68 ++++----- regressions/ck_rw_cohort/ck_neutral.h | 7 + regressions/ck_rw_cohort/ck_rp.h | 7 + regressions/ck_rw_cohort/ck_wp.h | 17 +++ regressions/ck_rw_cohort/validate/Makefile | 13 +- .../ck_rw_cohort/validate/ck_neutral.c | 2 + regressions/ck_rw_cohort/validate/ck_rp.c | 2 + regressions/ck_rw_cohort/validate/ck_wp.c | 2 + .../validate/{validate.c => validate.h} | 13 +- 12 files changed, 216 insertions(+), 71 deletions(-) create mode 100644 regressions/ck_rw_cohort/ck_neutral.h create mode 100644 regressions/ck_rw_cohort/ck_rp.h create mode 100644 regressions/ck_rw_cohort/ck_wp.h create mode 100644 regressions/ck_rw_cohort/validate/ck_neutral.c create mode 100644 regressions/ck_rw_cohort/validate/ck_rp.c create mode 100644 regressions/ck_rw_cohort/validate/ck_wp.c rename regressions/ck_rw_cohort/validate/{validate.c => validate.h} (93%) diff --git a/.gitignore b/.gitignore index 3d53783..ee0d0ca 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ build/Makefile *.a *.so *.dSYM +.*.sw[op] regressions/ck_cohort/benchmark/ck_cohort.LATENCY regressions/ck_cohort/benchmark/ck_cohort.THROUGHPUT regressions/ck_pflock/benchmark/latency @@ -142,6 +143,8 @@ regressions/ck_queue/validate/ck_slist regressions/ck_cohort/validate/validate regressions/ck_cohort/benchmark/ck_cohort.LATENCY regressions/ck_cohort/benchmark/ck_cohort.THROUGHPUT -regressions/ck_rw_cohort/validate/validate +regressions/ck_rw_cohort/validate/ck_neutral +regressions/ck_rw_cohort/validate/ck_rp +regressions/ck_rw_cohort/validate/ck_wp regressions/ck_rw_cohort/benchmark/latency regressions/ck_rw_cohort/benchmark/throughput diff --git a/include/ck_rw_cohort.h b/include/ck_rw_cohort.h index bb2b771..dfcb138 100644 --- a/include/ck_rw_cohort.h +++ b/include/ck_rw_cohort.h @@ -39,24 +39,24 @@ #include #include -#define CK_RW_COHORT_NAME(N) ck_rw_cohort_##N -#define CK_RW_COHORT_INSTANCE(N) struct CK_RW_COHORT_NAME(N) -#define CK_RW_COHORT_INIT(N, RW, WL) ck_rw_cohort_##N##_init(RW, WL) -#define CK_RW_COHORT_READ_LOCK(N, RW, C, GC, LC) ck_rw_cohort_##N##_read_lock(RW, C, GC, LC) -#define CK_RW_COHORT_READ_UNLOCK(N, RW) ck_rw_cohort_##N##_read_unlock(RW) -#define CK_RW_COHORT_WRITE_LOCK(N, RW, C, GC, LC) ck_rw_cohort_##N##_write_lock(RW, C, GC, LC) -#define CK_RW_COHORT_WRITE_UNLOCK(N, RW, C, GC, LC) ck_rw_cohort_##N##_write_unlock(RW, C, GC, LC) -#define CK_RW_COHORT_DEFAULT_WAIT_LIMIT 1000 +#define CK_RW_COHORT_WP_NAME(N) ck_rw_cohort_wp_##N +#define CK_RW_COHORT_WP_INSTANCE(N) struct CK_RW_COHORT_WP_NAME(N) +#define CK_RW_COHORT_WP_INIT(N, RW, WL) ck_rw_cohort_wp_##N##_init(RW, WL) +#define CK_RW_COHORT_WP_READ_LOCK(N, RW, C, GC, LC) ck_rw_cohort_wp_##N##_read_lock(RW, C, GC, LC) +#define CK_RW_COHORT_WP_READ_UNLOCK(N, RW, C, GC, LC) ck_rw_cohort_wp_##N##_read_unlock(RW) +#define CK_RW_COHORT_WP_WRITE_LOCK(N, RW, C, GC, LC) ck_rw_cohort_wp_##N##_write_lock(RW, C, GC, LC) +#define CK_RW_COHORT_WP_WRITE_UNLOCK(N, RW, C, GC, LC) ck_rw_cohort_wp_##N##_write_unlock(RW, C, GC, LC) +#define CK_RW_COHORT_WP_DEFAULT_WAIT_LIMIT 1000 -#define CK_RW_COHORT_PROTOTYPE(N) \ - CK_RW_COHORT_INSTANCE(N) { \ +#define CK_RW_COHORT_WP_PROTOTYPE(N) \ + CK_RW_COHORT_WP_INSTANCE(N) { \ unsigned int read_counter; \ unsigned int write_barrier; \ unsigned int wait_limit; \ }; \ \ CK_CC_INLINE static void \ - ck_rw_cohort_##N##_init(CK_RW_COHORT_INSTANCE(N) *rw_cohort, \ + ck_rw_cohort_wp_##N##_init(CK_RW_COHORT_WP_INSTANCE(N) *rw_cohort, \ unsigned int wait_limit) \ { \ rw_cohort->read_counter = 0; \ @@ -67,7 +67,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rw_cohort_##N##_write_lock(CK_RW_COHORT_INSTANCE(N) *rw_cohort, \ + ck_rw_cohort_wp_##N##_write_lock(CK_RW_COHORT_WP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -85,7 +85,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rw_cohort_##N##_write_unlock(CK_RW_COHORT_INSTANCE(N) *rw_cohort, \ + ck_rw_cohort_wp_##N##_write_unlock(CK_RW_COHORT_WP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -94,7 +94,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rw_cohort_##N##_read_lock(CK_RW_COHORT_INSTANCE(N) *rw_cohort, \ + ck_rw_cohort_wp_##N##_read_lock(CK_RW_COHORT_WP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -125,15 +125,112 @@ } \ \ CK_CC_INLINE static void \ - ck_rw_cohort_##N##_read_unlock(CK_RW_COHORT_INSTANCE(N) *cohort) \ + ck_rw_cohort_wp_##N##_read_unlock(CK_RW_COHORT_WP_INSTANCE(N) *cohort) \ { \ ck_pr_dec_uint(&cohort->read_counter); \ } -#define CK_RW_COHORT_INITIALIZER { \ +#define CK_RW_COHORT_WP_INITIALIZER { \ .read_counter = 0, \ .write_barrier = 0, \ .wait_limit = 0 \ } + +#define CK_RW_COHORT_RP_NAME(N) ck_rw_cohort_rp_##N +#define CK_RW_COHORT_RP_INSTANCE(N) struct CK_RW_COHORT_RP_NAME(N) +#define CK_RW_COHORT_RP_INIT(N, RW, WL) ck_rw_cohort_rp_##N##_init(RW, WL) +#define CK_RW_COHORT_RP_READ_LOCK(N, RW, C, GC, LC) ck_rw_cohort_rp_##N##_read_lock(RW, C, GC, LC) +#define CK_RW_COHORT_RP_READ_UNLOCK(N, RW, C, GC, LC) ck_rw_cohort_rp_##N##_read_unlock(RW) +#define CK_RW_COHORT_RP_WRITE_LOCK(N, RW, C, GC, LC) ck_rw_cohort_rp_##N##_write_lock(RW, C, GC, LC) +#define CK_RW_COHORT_RP_WRITE_UNLOCK(N, RW, C, GC, LC) ck_rw_cohort_rp_##N##_write_unlock(RW, C, GC, LC) +#define CK_RW_COHORT_RP_DEFAULT_WAIT_LIMIT 1000 + +#define CK_RW_COHORT_RP_PROTOTYPE(N) \ + CK_RW_COHORT_RP_INSTANCE(N) { \ + unsigned int read_counter; \ + unsigned int read_barrier; \ + unsigned int wait_limit; \ + }; \ + \ + CK_CC_INLINE static void \ + ck_rw_cohort_rp_##N##_init(CK_RW_COHORT_RP_INSTANCE(N) *rw_cohort, \ + unsigned int wait_limit) \ + { \ + rw_cohort->read_counter = 0; \ + rw_cohort->read_barrier = 0; \ + rw_cohort->wait_limit = wait_limit; \ + ck_pr_barrier(); \ + return; \ + } \ + \ + CK_CC_INLINE static void \ + ck_rw_cohort_rp_##N##_write_lock(CK_RW_COHORT_RP_INSTANCE(N) *rw_cohort, \ + CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ + void *local_context) \ + { \ + unsigned int wait_count = 0; \ + bool raised = false; \ + \ + while (true) { \ + CK_COHORT_LOCK(N, cohort, global_context, local_context); \ + if (ck_pr_load_uint(&rw_cohort->read_counter) == 0) { \ + break; \ + } else { \ + CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ + while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \ + ck_pr_stall(); \ + if (++wait_count > rw_cohort->wait_limit && raised == false) {\ + ck_pr_inc_uint(&rw_cohort->read_barrier); \ + raised = true; \ + } \ + } \ + } \ + } \ + \ + if (raised == true) { \ + ck_pr_dec_uint(&rw_cohort->read_barrier); \ + } \ + \ + return; \ + } \ + \ + CK_CC_INLINE static void \ + ck_rw_cohort_rp_##N##_write_unlock(CK_RW_COHORT_RP_INSTANCE(N) *rw_cohort, \ + CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \ + { \ + (void)rw_cohort; \ + CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ + } \ + \ + CK_CC_INLINE static void \ + ck_rw_cohort_rp_##N##_read_lock(CK_RW_COHORT_RP_INSTANCE(N) *rw_cohort, \ + CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ + void *local_context) \ + { \ + while (ck_pr_load_uint(&rw_cohort->read_barrier) > 0) { \ + ck_pr_stall(); \ + } \ + ck_pr_inc_uint(&rw_cohort->read_counter); \ + \ + while (CK_COHORT_LOCKED(N, cohort, global_context, local_context) == true) { \ + ck_pr_stall(); \ + } \ + \ + return; \ + } \ + \ + \ + CK_CC_INLINE static void \ + ck_rw_cohort_rp_##N##_read_unlock(CK_RW_COHORT_RP_INSTANCE(N) *cohort) \ + { \ + ck_pr_dec_uint(&cohort->read_counter); \ + } + +#define CK_RW_COHORT_RP_INITIALIZER { \ + .read_counter = 0, \ + .read_barrier = 0, \ + .wait_limit = 0 \ +} + #endif /* _CK_RW_COHORT_H */ diff --git a/regressions/ck_rw_cohort/benchmark/latency.c b/regressions/ck_rw_cohort/benchmark/latency.c index d27fd41..f05ee2e 100644 --- a/regressions/ck_rw_cohort/benchmark/latency.c +++ b/regressions/ck_rw_cohort/benchmark/latency.c @@ -60,7 +60,7 @@ ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) CK_COHORT_PROTOTYPE(fas_fas, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context) -CK_RW_COHORT_PROTOTYPE(fas_fas) +CK_RW_COHORT_WP_PROTOTYPE(fas_fas) int main(void) @@ -69,34 +69,34 @@ main(void) ck_spinlock_fas_t global_lock = CK_SPINLOCK_FAS_INITIALIZER; ck_spinlock_fas_t local_lock = CK_SPINLOCK_FAS_INITIALIZER; CK_COHORT_INSTANCE(fas_fas) cohort = CK_COHORT_INITIALIZER; - CK_RW_COHORT_INSTANCE(fas_fas) rw_cohort = CK_RW_COHORT_INITIALIZER; + CK_RW_COHORT_WP_INSTANCE(fas_fas) rw_cohort = CK_RW_COHORT_WP_INITIALIZER; CK_COHORT_INIT(fas_fas, &cohort, &global_lock, &local_lock, CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT); - CK_RW_COHORT_INIT(fas_fas, &rw_cohort, CK_RW_COHORT_DEFAULT_WAIT_LIMIT); + CK_RW_COHORT_WP_INIT(fas_fas, &rw_cohort, CK_RW_COHORT_WP_DEFAULT_WAIT_LIMIT); for (i = 0; i < STEPS; i++) { - CK_RW_COHORT_WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); - CK_RW_COHORT_WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + CK_RW_COHORT_WP_WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + CK_RW_COHORT_WP_WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); } s_b = rdtsc(); for (i = 0; i < STEPS; i++) { - CK_RW_COHORT_WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); - CK_RW_COHORT_WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + CK_RW_COHORT_WP_WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + CK_RW_COHORT_WP_WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); } e_b = rdtsc(); printf("WRITE: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS); for (i = 0; i < STEPS; i++) { - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); } s_b = rdtsc(); for (i = 0; i < STEPS; i++) { - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); } e_b = rdtsc(); printf("READ: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS); diff --git a/regressions/ck_rw_cohort/benchmark/throughput.c b/regressions/ck_rw_cohort/benchmark/throughput.c index 8a0c519..b629c73 100644 --- a/regressions/ck_rw_cohort/benchmark/throughput.c +++ b/regressions/ck_rw_cohort/benchmark/throughput.c @@ -78,7 +78,7 @@ ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) CK_COHORT_PROTOTYPE(fas_fas, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context) -CK_RW_COHORT_PROTOTYPE(fas_fas) +CK_RW_COHORT_WP_PROTOTYPE(fas_fas) struct cohort_record { CK_COHORT_INSTANCE(fas_fas) cohort; @@ -86,7 +86,7 @@ struct cohort_record { static struct cohort_record *cohorts; static ck_spinlock_t global_lock = CK_SPINLOCK_INITIALIZER; -static CK_RW_COHORT_INSTANCE(fas_fas) rw_cohort = CK_RW_COHORT_INITIALIZER; +static CK_RW_COHORT_WP_INSTANCE(fas_fas) rw_cohort = CK_RW_COHORT_WP_INITIALIZER; static unsigned int n_cohorts; struct block { @@ -114,38 +114,38 @@ thread_rwlock(void *pun) for (i = 1, a = 0;; i++) { s_b = rdtsc(); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); e_b = rdtsc(); a += (e_b - s_b) >> 4; diff --git a/regressions/ck_rw_cohort/ck_neutral.h b/regressions/ck_rw_cohort/ck_neutral.h new file mode 100644 index 0000000..2d29c73 --- /dev/null +++ b/regressions/ck_rw_cohort/ck_neutral.h @@ -0,0 +1,7 @@ +#define LOCK_PROTOTYPE CK_RW_COHORT_NEUTRAL_PROTOTYPE +#define LOCK_INSTANCE CK_RW_COHORT_NEUTRAL_INSTANCE +#define LOCK_INITIALIZER CK_RW_COHORT_NEUTRAL_INITIALIZER +#define READ_LOCK CK_RW_COHORT_NEUTRAL_READ_LOCK +#define WRITE_LOCK CK_RW_COHORT_NEUTRAL_WRITE_LOCK +#define READ_UNLOCK CK_RW_COHORT_NEUTRAL_READ_UNLOCK +#define WRITE_UNLOCK CK_RW_COHORT_NEUTRAL_WRITE_UNLOCK diff --git a/regressions/ck_rw_cohort/ck_rp.h b/regressions/ck_rw_cohort/ck_rp.h new file mode 100644 index 0000000..d4b7d4e --- /dev/null +++ b/regressions/ck_rw_cohort/ck_rp.h @@ -0,0 +1,7 @@ +#define LOCK_PROTOTYPE CK_RW_COHORT_RP_PROTOTYPE +#define LOCK_INSTANCE CK_RW_COHORT_RP_INSTANCE +#define LOCK_INITIALIZER CK_RW_COHORT_RP_INITIALIZER +#define READ_LOCK CK_RW_COHORT_RP_READ_LOCK +#define READ_UNLOCK CK_RW_COHORT_RP_READ_UNLOCK +#define WRITE_LOCK CK_RW_COHORT_RP_WRITE_LOCK +#define WRITE_UNLOCK CK_RW_COHORT_RP_WRITE_UNLOCK diff --git a/regressions/ck_rw_cohort/ck_wp.h b/regressions/ck_rw_cohort/ck_wp.h new file mode 100644 index 0000000..bb9b6b2 --- /dev/null +++ b/regressions/ck_rw_cohort/ck_wp.h @@ -0,0 +1,17 @@ +#define LOCK_PROTOTYPE CK_RW_COHORT_WP_PROTOTYPE +#define LOCK_INSTANCE CK_RW_COHORT_WP_INSTANCE +#define LOCK_INITIALIZER CK_RW_COHORT_WP_INITIALIZER +#define READ_LOCK CK_RW_COHORT_WP_READ_LOCK +#define WRITE_LOCK CK_RW_COHORT_WP_WRITE_LOCK +#define READ_UNLOCK CK_RW_COHORT_WP_READ_UNLOCK +#define WRITE_UNLOCK CK_RW_COHORT_WP_WRITE_UNLOCK +/* +#define WRITE_LOCK(N, RW, C, GC, LC)\ + CK_RW_COHORT_WP_WRITE_LOCK(N, RW, C, GC, LC) + +#define WRITE_UNLOCK(N, RW, C, GC, LC)\ + CK_RW_COHORT_WP_WRITE_UNLOCK(N, RW, C, GC, LC); + +#define READ_LOCK(N, RW, C, GC, LC)\ + CK_RW_COHORT_WP_READ_LOCK(N, RW) +*/ \ No newline at end of file diff --git a/regressions/ck_rw_cohort/validate/Makefile b/regressions/ck_rw_cohort/validate/Makefile index e2552be..7214c60 100644 --- a/regressions/ck_rw_cohort/validate/Makefile +++ b/regressions/ck_rw_cohort/validate/Makefile @@ -1,11 +1,18 @@ .PHONY: check clean distribution -OBJECTS=validate +#OBJECTS=ck_neutral ck_rp ck_wp +OBJECTS=ck_rp ck_wp all: $(OBJECTS) -validate: validate.c ../../../include/ck_rw_cohort.h - $(CC) $(CFLAGS) -o validate validate.c -g +ck_neutral: ck_neutral.c ../../../include/ck_rw_cohort.h + $(CC) $(CFLAGS) -o ck_neutral ck_neutral.c -g + +ck_rp: ck_rp.c ../../../include/ck_rw_cohort.h + $(CC) $(CFLAGS) -o ck_rp ck_rp.c -g + +ck_wp: ck_wp.c ../../../include/ck_rw_cohort.h + $(CC) $(CFLAGS) -o ck_wp ck_wp.c -g check: all ./validate $(CORES) 1 diff --git a/regressions/ck_rw_cohort/validate/ck_neutral.c b/regressions/ck_rw_cohort/validate/ck_neutral.c new file mode 100644 index 0000000..7884dc5 --- /dev/null +++ b/regressions/ck_rw_cohort/validate/ck_neutral.c @@ -0,0 +1,2 @@ +#include "../ck_neutral.h" +#include "validate.h" diff --git a/regressions/ck_rw_cohort/validate/ck_rp.c b/regressions/ck_rw_cohort/validate/ck_rp.c new file mode 100644 index 0000000..d63e9d5 --- /dev/null +++ b/regressions/ck_rw_cohort/validate/ck_rp.c @@ -0,0 +1,2 @@ +#include "../ck_rp.h" +#include "validate.h" diff --git a/regressions/ck_rw_cohort/validate/ck_wp.c b/regressions/ck_rw_cohort/validate/ck_wp.c new file mode 100644 index 0000000..f89be35 --- /dev/null +++ b/regressions/ck_rw_cohort/validate/ck_wp.c @@ -0,0 +1,2 @@ +#include "../ck_wp.h" +#include "validate.h" diff --git a/regressions/ck_rw_cohort/validate/validate.c b/regressions/ck_rw_cohort/validate/validate.h similarity index 93% rename from regressions/ck_rw_cohort/validate/validate.c rename to regressions/ck_rw_cohort/validate/validate.h index 871dcd3..27c7283 100644 --- a/regressions/ck_rw_cohort/validate/validate.c +++ b/regressions/ck_rw_cohort/validate/validate.h @@ -46,6 +46,7 @@ #define ITERATE 1000000 #endif + static struct affinity a; static unsigned int locked; static int nthr; @@ -75,10 +76,10 @@ ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) CK_COHORT_PROTOTYPE(fas_fas, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context) -CK_RW_COHORT_PROTOTYPE(fas_fas) +LOCK_PROTOTYPE(fas_fas) static CK_COHORT_INSTANCE(fas_fas) *cohorts; -static CK_RW_COHORT_INSTANCE(fas_fas) rw_cohort = CK_RW_COHORT_INITIALIZER; +static LOCK_INSTANCE(fas_fas) rw_cohort = LOCK_INITIALIZER; static int n_cohorts; static void * @@ -97,7 +98,7 @@ thread(void *null CK_CC_UNUSED) cohort = cohorts + (core / (int)(a.delta)) % n_cohorts; while (i--) { - CK_RW_COHORT_WRITE_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + WRITE_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); { l = ck_pr_load_uint(&locked); if (l != 0) { @@ -132,16 +133,16 @@ thread(void *null CK_CC_UNUSED) ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l); } } - CK_RW_COHORT_WRITE_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + WRITE_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); { l = ck_pr_load_uint(&locked); if (l != 0) { ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l); } } - CK_RW_COHORT_READ_UNLOCK(fas_fas, &rw_cohort); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); } return (NULL); From f693a16253a3dc618cf573a4b0ecc8801d04c4cd Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Mon, 22 Apr 2013 10:00:14 -0500 Subject: [PATCH 11/20] ck_rw_cohort: Added neutral reader-writer lock --- include/ck_rw_cohort.h | 73 +++++++++++++++++++++- regressions/ck_rw_cohort/validate/Makefile | 3 +- 2 files changed, 71 insertions(+), 5 deletions(-) diff --git a/include/ck_rw_cohort.h b/include/ck_rw_cohort.h index dfcb138..e46bee2 100644 --- a/include/ck_rw_cohort.h +++ b/include/ck_rw_cohort.h @@ -56,7 +56,7 @@ }; \ \ CK_CC_INLINE static void \ - ck_rw_cohort_wp_##N##_init(CK_RW_COHORT_WP_INSTANCE(N) *rw_cohort, \ + ck_rw_cohort_wp_##N##_init(CK_RW_COHORT_WP_INSTANCE(N) *rw_cohort, \ unsigned int wait_limit) \ { \ rw_cohort->read_counter = 0; \ @@ -67,7 +67,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rw_cohort_wp_##N##_write_lock(CK_RW_COHORT_WP_INSTANCE(N) *rw_cohort, \ + ck_rw_cohort_wp_##N##_write_lock(CK_RW_COHORT_WP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -85,7 +85,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rw_cohort_wp_##N##_write_unlock(CK_RW_COHORT_WP_INSTANCE(N) *rw_cohort, \ + ck_rw_cohort_wp_##N##_write_unlock(CK_RW_COHORT_WP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -233,4 +233,71 @@ .wait_limit = 0 \ } + +#define CK_RW_COHORT_NEUTRAL_NAME(N) ck_rw_cohort_neutral_##N +#define CK_RW_COHORT_NEUTRAL_INSTANCE(N) struct CK_RW_COHORT_NEUTRAL_NAME(N) +#define CK_RW_COHORT_NEUTRAL_INIT(N, RW, WL) ck_rw_cohort_neutral_##N##_init(RW, WL) +#define CK_RW_COHORT_NEUTRAL_READ_LOCK(N, RW, C, GC, LC) ck_rw_cohort_neutral_##N##_read_lock(RW, C, GC, LC) +#define CK_RW_COHORT_NEUTRAL_READ_UNLOCK(N, RW, C, GC, LC) ck_rw_cohort_neutral_##N##_read_unlock(RW) +#define CK_RW_COHORT_NEUTRAL_WRITE_LOCK(N, RW, C, GC, LC) ck_rw_cohort_neutral_##N##_write_lock(RW, C, GC, LC) +#define CK_RW_COHORT_NEUTRAL_WRITE_UNLOCK(N, RW, C, GC, LC) ck_rw_cohort_neutral_##N##_write_unlock(RW, C, GC, LC) +#define CK_RW_COHORT_NEUTRAL_DEFAULT_WAIT_LIMIT 1000 + +#define CK_RW_COHORT_NEUTRAL_PROTOTYPE(N) \ + CK_RW_COHORT_NEUTRAL_INSTANCE(N) { \ + unsigned int read_counter; \ + }; \ + \ + CK_CC_INLINE static void \ + ck_rw_cohort_neutral_##N##_init(CK_RW_COHORT_NEUTRAL_INSTANCE(N) *rw_cohort) \ + { \ + rw_cohort->read_counter = 0; \ + ck_pr_barrier(); \ + return; \ + } \ + \ + CK_CC_INLINE static void \ + ck_rw_cohort_neutral_##N##_write_lock(CK_RW_COHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \ + CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ + void *local_context) \ + { \ + CK_COHORT_LOCK(N, cohort, global_context, local_context); \ + while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \ + ck_pr_stall(); \ + } \ + return; \ + } \ + \ + CK_CC_INLINE static void \ + ck_rw_cohort_neutral_##N##_write_unlock(CK_RW_COHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \ + CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \ + { \ + (void)rw_cohort; \ + CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ + } \ + \ + CK_CC_INLINE static void \ + ck_rw_cohort_neutral_##N##_read_lock(CK_RW_COHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \ + CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ + void *local_context) \ + { \ + CK_COHORT_LOCK(N, cohort, global_context, local_context); \ + ck_pr_inc_uint(&rw_cohort->read_counter); \ + CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ + \ + return; \ + } \ + \ + \ + CK_CC_INLINE static void \ + ck_rw_cohort_neutral_##N##_read_unlock(CK_RW_COHORT_NEUTRAL_INSTANCE(N) *cohort) \ + { \ + ck_pr_dec_uint(&cohort->read_counter); \ + } + +#define CK_RW_COHORT_NEUTRAL_INITIALIZER { \ + .read_counter = 0, \ +} + + #endif /* _CK_RW_COHORT_H */ diff --git a/regressions/ck_rw_cohort/validate/Makefile b/regressions/ck_rw_cohort/validate/Makefile index 7214c60..7436962 100644 --- a/regressions/ck_rw_cohort/validate/Makefile +++ b/regressions/ck_rw_cohort/validate/Makefile @@ -1,7 +1,6 @@ .PHONY: check clean distribution -#OBJECTS=ck_neutral ck_rp ck_wp -OBJECTS=ck_rp ck_wp +OBJECTS=ck_neutral ck_rp ck_wp all: $(OBJECTS) From 0d20391563ef1b9fad658c3b75502a2061e7182f Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Mon, 22 Apr 2013 11:48:41 -0500 Subject: [PATCH 12/20] ck_rw_cohort: Added benchmark tests for reader-writer cohort locks --- .gitignore | 8 ++- include/ck_rw_cohort.h | 2 +- regressions/ck_rw_cohort/benchmark/Makefile | 23 +++++-- .../ck_rw_cohort/benchmark/ck_neutral.c | 7 ++ regressions/ck_rw_cohort/benchmark/ck_rp.c | 7 ++ regressions/ck_rw_cohort/benchmark/ck_wp.c | 7 ++ .../benchmark/{latency.c => latency.h} | 22 +++--- .../benchmark/{throughput.c => throughput.h} | 68 +++++++++---------- regressions/ck_rw_cohort/ck_neutral.h | 1 + regressions/ck_rw_cohort/ck_rp.h | 1 + regressions/ck_rw_cohort/ck_wp.h | 11 +-- 11 files changed, 95 insertions(+), 62 deletions(-) create mode 100644 regressions/ck_rw_cohort/benchmark/ck_neutral.c create mode 100644 regressions/ck_rw_cohort/benchmark/ck_rp.c create mode 100644 regressions/ck_rw_cohort/benchmark/ck_wp.c rename regressions/ck_rw_cohort/benchmark/{latency.c => latency.h} (79%) rename regressions/ck_rw_cohort/benchmark/{throughput.c => throughput.h} (73%) diff --git a/.gitignore b/.gitignore index ee0d0ca..4e9157b 100644 --- a/.gitignore +++ b/.gitignore @@ -146,5 +146,9 @@ regressions/ck_cohort/benchmark/ck_cohort.THROUGHPUT regressions/ck_rw_cohort/validate/ck_neutral regressions/ck_rw_cohort/validate/ck_rp regressions/ck_rw_cohort/validate/ck_wp -regressions/ck_rw_cohort/benchmark/latency -regressions/ck_rw_cohort/benchmark/throughput +regressions/ck_rw_cohort/benchmark/ck_neutral.LATENCY +regressions/ck_rw_cohort/benchmark/ck_neutral.THROUGHPUT +regressions/ck_rw_cohort/benchmark/ck_rp.LATENCY +regressions/ck_rw_cohort/benchmark/ck_rp.THROUGHPUT +regressions/ck_rw_cohort/benchmark/ck_wp.LATENCY +regressions/ck_rw_cohort/benchmark/ck_wp.THROUGHPUT diff --git a/include/ck_rw_cohort.h b/include/ck_rw_cohort.h index e46bee2..0904000 100644 --- a/include/ck_rw_cohort.h +++ b/include/ck_rw_cohort.h @@ -236,7 +236,7 @@ #define CK_RW_COHORT_NEUTRAL_NAME(N) ck_rw_cohort_neutral_##N #define CK_RW_COHORT_NEUTRAL_INSTANCE(N) struct CK_RW_COHORT_NEUTRAL_NAME(N) -#define CK_RW_COHORT_NEUTRAL_INIT(N, RW, WL) ck_rw_cohort_neutral_##N##_init(RW, WL) +#define CK_RW_COHORT_NEUTRAL_INIT(N, RW) ck_rw_cohort_neutral_##N##_init(RW) #define CK_RW_COHORT_NEUTRAL_READ_LOCK(N, RW, C, GC, LC) ck_rw_cohort_neutral_##N##_read_lock(RW, C, GC, LC) #define CK_RW_COHORT_NEUTRAL_READ_UNLOCK(N, RW, C, GC, LC) ck_rw_cohort_neutral_##N##_read_unlock(RW) #define CK_RW_COHORT_NEUTRAL_WRITE_LOCK(N, RW, C, GC, LC) ck_rw_cohort_neutral_##N##_write_lock(RW, C, GC, LC) diff --git a/regressions/ck_rw_cohort/benchmark/Makefile b/regressions/ck_rw_cohort/benchmark/Makefile index 6a751b5..054c85c 100644 --- a/regressions/ck_rw_cohort/benchmark/Makefile +++ b/regressions/ck_rw_cohort/benchmark/Makefile @@ -1,14 +1,29 @@ .PHONY: clean distribution OBJECTS=latency throughput +OBJECTS=ck_neutral.THROUGHPUT ck_neutral.LATENCY \ + ck_rp.THROUGHPUT ck_rp.LATENCY \ + ck_wp.THROUGHPUT ck_wp.LATENCY all: $(OBJECTS) -latency: latency.c ../../../include/ck_rw_cohort.h - $(CC) $(CFLAGS) -o latency latency.c +ck_neutral.THROUGHPUT: ck_neutral.c + $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_neutral.THROUGHPUT ck_neutral.c -throughput: throughput.c ../../../include/ck_rw_cohort.h - $(CC) $(CFLAGS) -o throughput throughput.c +ck_neutral.LATENCY: ck_neutral.c + $(CC) -DLATENCY $(CFLAGS) -o ck_neutral.LATENCY ck_neutral.c + +ck_rp.THROUGHPUT: ck_rp.c + $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_rp.THROUGHPUT ck_rp.c + +ck_rp.LATENCY: ck_rp.c + $(CC) -DLATENCY $(CFLAGS) -o ck_rp.LATENCY ck_rp.c + +ck_wp.THROUGHPUT: ck_wp.c + $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_wp.THROUGHPUT ck_wp.c + +ck_wp.LATENCY: ck_wp.c + $(CC) -DLATENCY $(CFLAGS) -o ck_wp.LATENCY ck_wp.c clean: rm -rf *.dSYM *~ *.o $(OBJECTS) diff --git a/regressions/ck_rw_cohort/benchmark/ck_neutral.c b/regressions/ck_rw_cohort/benchmark/ck_neutral.c new file mode 100644 index 0000000..9fb85db --- /dev/null +++ b/regressions/ck_rw_cohort/benchmark/ck_neutral.c @@ -0,0 +1,7 @@ +#include "../ck_neutral.h" + +#ifdef THROUGHPUT +#include "throughput.h" +#elif defined(LATENCY) +#include "latency.h" +#endif diff --git a/regressions/ck_rw_cohort/benchmark/ck_rp.c b/regressions/ck_rw_cohort/benchmark/ck_rp.c new file mode 100644 index 0000000..798e578 --- /dev/null +++ b/regressions/ck_rw_cohort/benchmark/ck_rp.c @@ -0,0 +1,7 @@ +#include "../ck_rp.h" + +#ifdef THROUGHPUT +#include "throughput.h" +#elif defined(LATENCY) +#include "latency.h" +#endif diff --git a/regressions/ck_rw_cohort/benchmark/ck_wp.c b/regressions/ck_rw_cohort/benchmark/ck_wp.c new file mode 100644 index 0000000..07b0cce --- /dev/null +++ b/regressions/ck_rw_cohort/benchmark/ck_wp.c @@ -0,0 +1,7 @@ +#include "../ck_wp.h" + +#ifdef THROUGHPUT +#include "throughput.h" +#elif defined(LATENCY) +#include "latency.h" +#endif diff --git a/regressions/ck_rw_cohort/benchmark/latency.c b/regressions/ck_rw_cohort/benchmark/latency.h similarity index 79% rename from regressions/ck_rw_cohort/benchmark/latency.c rename to regressions/ck_rw_cohort/benchmark/latency.h index f05ee2e..60d7c0d 100644 --- a/regressions/ck_rw_cohort/benchmark/latency.c +++ b/regressions/ck_rw_cohort/benchmark/latency.h @@ -60,7 +60,7 @@ ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) CK_COHORT_PROTOTYPE(fas_fas, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context) -CK_RW_COHORT_WP_PROTOTYPE(fas_fas) +LOCK_PROTOTYPE(fas_fas) int main(void) @@ -69,34 +69,34 @@ main(void) ck_spinlock_fas_t global_lock = CK_SPINLOCK_FAS_INITIALIZER; ck_spinlock_fas_t local_lock = CK_SPINLOCK_FAS_INITIALIZER; CK_COHORT_INSTANCE(fas_fas) cohort = CK_COHORT_INITIALIZER; - CK_RW_COHORT_WP_INSTANCE(fas_fas) rw_cohort = CK_RW_COHORT_WP_INITIALIZER; + LOCK_INSTANCE(fas_fas) rw_cohort = LOCK_INITIALIZER; CK_COHORT_INIT(fas_fas, &cohort, &global_lock, &local_lock, CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT); - CK_RW_COHORT_WP_INIT(fas_fas, &rw_cohort, CK_RW_COHORT_WP_DEFAULT_WAIT_LIMIT); + LOCK_INIT(fas_fas, &rw_cohort, CK_RW_COHORT_WP_DEFAULT_WAIT_LIMIT); for (i = 0; i < STEPS; i++) { - CK_RW_COHORT_WP_WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); - CK_RW_COHORT_WP_WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); } s_b = rdtsc(); for (i = 0; i < STEPS; i++) { - CK_RW_COHORT_WP_WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); - CK_RW_COHORT_WP_WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); } e_b = rdtsc(); printf("WRITE: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS); for (i = 0; i < STEPS; i++) { - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); } s_b = rdtsc(); for (i = 0; i < STEPS; i++) { - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); } e_b = rdtsc(); printf("READ: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS); diff --git a/regressions/ck_rw_cohort/benchmark/throughput.c b/regressions/ck_rw_cohort/benchmark/throughput.h similarity index 73% rename from regressions/ck_rw_cohort/benchmark/throughput.c rename to regressions/ck_rw_cohort/benchmark/throughput.h index b629c73..787a036 100644 --- a/regressions/ck_rw_cohort/benchmark/throughput.c +++ b/regressions/ck_rw_cohort/benchmark/throughput.h @@ -78,7 +78,7 @@ ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) CK_COHORT_PROTOTYPE(fas_fas, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context, ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context) -CK_RW_COHORT_WP_PROTOTYPE(fas_fas) +LOCK_PROTOTYPE(fas_fas) struct cohort_record { CK_COHORT_INSTANCE(fas_fas) cohort; @@ -86,7 +86,7 @@ struct cohort_record { static struct cohort_record *cohorts; static ck_spinlock_t global_lock = CK_SPINLOCK_INITIALIZER; -static CK_RW_COHORT_WP_INSTANCE(fas_fas) rw_cohort = CK_RW_COHORT_WP_INITIALIZER; +static LOCK_INSTANCE(fas_fas) rw_cohort = LOCK_INITIALIZER; static unsigned int n_cohorts; struct block { @@ -114,38 +114,38 @@ thread_rwlock(void *pun) for (i = 1, a = 0;; i++) { s_b = rdtsc(); - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); - CK_RW_COHORT_WP_READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); - CK_RW_COHORT_WP_READ_UNLOCK(fas_fas, &rw_cohort); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); + READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL); e_b = rdtsc(); a += (e_b - s_b) >> 4; diff --git a/regressions/ck_rw_cohort/ck_neutral.h b/regressions/ck_rw_cohort/ck_neutral.h index 2d29c73..07c9d27 100644 --- a/regressions/ck_rw_cohort/ck_neutral.h +++ b/regressions/ck_rw_cohort/ck_neutral.h @@ -1,6 +1,7 @@ #define LOCK_PROTOTYPE CK_RW_COHORT_NEUTRAL_PROTOTYPE #define LOCK_INSTANCE CK_RW_COHORT_NEUTRAL_INSTANCE #define LOCK_INITIALIZER CK_RW_COHORT_NEUTRAL_INITIALIZER +#define LOCK_INIT(N, C, W) CK_RW_COHORT_NEUTRAL_INIT(N, C) #define READ_LOCK CK_RW_COHORT_NEUTRAL_READ_LOCK #define WRITE_LOCK CK_RW_COHORT_NEUTRAL_WRITE_LOCK #define READ_UNLOCK CK_RW_COHORT_NEUTRAL_READ_UNLOCK diff --git a/regressions/ck_rw_cohort/ck_rp.h b/regressions/ck_rw_cohort/ck_rp.h index d4b7d4e..ecc6391 100644 --- a/regressions/ck_rw_cohort/ck_rp.h +++ b/regressions/ck_rw_cohort/ck_rp.h @@ -1,6 +1,7 @@ #define LOCK_PROTOTYPE CK_RW_COHORT_RP_PROTOTYPE #define LOCK_INSTANCE CK_RW_COHORT_RP_INSTANCE #define LOCK_INITIALIZER CK_RW_COHORT_RP_INITIALIZER +#define LOCK_INIT CK_RW_COHORT_RP_INIT #define READ_LOCK CK_RW_COHORT_RP_READ_LOCK #define READ_UNLOCK CK_RW_COHORT_RP_READ_UNLOCK #define WRITE_LOCK CK_RW_COHORT_RP_WRITE_LOCK diff --git a/regressions/ck_rw_cohort/ck_wp.h b/regressions/ck_rw_cohort/ck_wp.h index bb9b6b2..afc1112 100644 --- a/regressions/ck_rw_cohort/ck_wp.h +++ b/regressions/ck_rw_cohort/ck_wp.h @@ -1,17 +1,8 @@ #define LOCK_PROTOTYPE CK_RW_COHORT_WP_PROTOTYPE #define LOCK_INSTANCE CK_RW_COHORT_WP_INSTANCE #define LOCK_INITIALIZER CK_RW_COHORT_WP_INITIALIZER +#define LOCK_INIT CK_RW_COHORT_WP_INIT #define READ_LOCK CK_RW_COHORT_WP_READ_LOCK #define WRITE_LOCK CK_RW_COHORT_WP_WRITE_LOCK #define READ_UNLOCK CK_RW_COHORT_WP_READ_UNLOCK #define WRITE_UNLOCK CK_RW_COHORT_WP_WRITE_UNLOCK -/* -#define WRITE_LOCK(N, RW, C, GC, LC)\ - CK_RW_COHORT_WP_WRITE_LOCK(N, RW, C, GC, LC) - -#define WRITE_UNLOCK(N, RW, C, GC, LC)\ - CK_RW_COHORT_WP_WRITE_UNLOCK(N, RW, C, GC, LC); - -#define READ_LOCK(N, RW, C, GC, LC)\ - CK_RW_COHORT_WP_READ_LOCK(N, RW) -*/ \ No newline at end of file From cc4b248bcb8ee6413459bc8a251bca2d92fb0c16 Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Tue, 23 Apr 2013 14:27:27 -0400 Subject: [PATCH 13/20] ck_rwcohort: Renamed ck_rwcohort namespace --- include/{ck_rw_cohort.h => ck_rwcohort.h} | 102 +++++++++--------- regressions/ck_rw_cohort/ck_neutral.h | 8 -- regressions/ck_rw_cohort/ck_rp.h | 8 -- regressions/ck_rw_cohort/ck_wp.h | 8 -- .../benchmark/Makefile | 0 .../benchmark/ck_neutral.c | 0 .../benchmark/ck_rp.c | 0 .../benchmark/ck_wp.c | 0 .../benchmark/latency.h | 4 +- .../benchmark/throughput.h | 2 +- regressions/ck_rwcohort/ck_neutral.h | 8 ++ regressions/ck_rwcohort/ck_rp.h | 8 ++ regressions/ck_rwcohort/ck_wp.h | 8 ++ .../validate/Makefile | 6 +- .../validate/ck_neutral.c | 0 .../validate/ck_rp.c | 0 .../validate/ck_wp.c | 0 .../validate/validate.h | 2 +- 18 files changed, 82 insertions(+), 82 deletions(-) rename include/{ck_rw_cohort.h => ck_rwcohort.h} (71%) delete mode 100644 regressions/ck_rw_cohort/ck_neutral.h delete mode 100644 regressions/ck_rw_cohort/ck_rp.h delete mode 100644 regressions/ck_rw_cohort/ck_wp.h rename regressions/{ck_rw_cohort => ck_rwcohort}/benchmark/Makefile (100%) rename regressions/{ck_rw_cohort => ck_rwcohort}/benchmark/ck_neutral.c (100%) rename regressions/{ck_rw_cohort => ck_rwcohort}/benchmark/ck_rp.c (100%) rename regressions/{ck_rw_cohort => ck_rwcohort}/benchmark/ck_wp.c (100%) rename regressions/{ck_rw_cohort => ck_rwcohort}/benchmark/latency.h (97%) rename regressions/{ck_rw_cohort => ck_rwcohort}/benchmark/throughput.h (99%) create mode 100644 regressions/ck_rwcohort/ck_neutral.h create mode 100644 regressions/ck_rwcohort/ck_rp.h create mode 100644 regressions/ck_rwcohort/ck_wp.h rename regressions/{ck_rw_cohort => ck_rwcohort}/validate/Makefile (70%) rename regressions/{ck_rw_cohort => ck_rwcohort}/validate/ck_neutral.c (100%) rename regressions/{ck_rw_cohort => ck_rwcohort}/validate/ck_rp.c (100%) rename regressions/{ck_rw_cohort => ck_rwcohort}/validate/ck_wp.c (100%) rename regressions/{ck_rw_cohort => ck_rwcohort}/validate/validate.h (99%) diff --git a/include/ck_rw_cohort.h b/include/ck_rwcohort.h similarity index 71% rename from include/ck_rw_cohort.h rename to include/ck_rwcohort.h index 0904000..67a2d1f 100644 --- a/include/ck_rw_cohort.h +++ b/include/ck_rwcohort.h @@ -25,8 +25,8 @@ * SUCH DAMAGE. */ -#ifndef _CK_RW_COHORT_H -#define _CK_RW_COHORT_H +#ifndef _CK_RWCOHORT_H +#define _CK_RWCOHORT_H /* * This is an implementation of NUMA-aware reader-writer locks as described in: @@ -39,24 +39,24 @@ #include #include -#define CK_RW_COHORT_WP_NAME(N) ck_rw_cohort_wp_##N -#define CK_RW_COHORT_WP_INSTANCE(N) struct CK_RW_COHORT_WP_NAME(N) -#define CK_RW_COHORT_WP_INIT(N, RW, WL) ck_rw_cohort_wp_##N##_init(RW, WL) -#define CK_RW_COHORT_WP_READ_LOCK(N, RW, C, GC, LC) ck_rw_cohort_wp_##N##_read_lock(RW, C, GC, LC) -#define CK_RW_COHORT_WP_READ_UNLOCK(N, RW, C, GC, LC) ck_rw_cohort_wp_##N##_read_unlock(RW) -#define CK_RW_COHORT_WP_WRITE_LOCK(N, RW, C, GC, LC) ck_rw_cohort_wp_##N##_write_lock(RW, C, GC, LC) -#define CK_RW_COHORT_WP_WRITE_UNLOCK(N, RW, C, GC, LC) ck_rw_cohort_wp_##N##_write_unlock(RW, C, GC, LC) -#define CK_RW_COHORT_WP_DEFAULT_WAIT_LIMIT 1000 +#define CK_RWCOHORT_WP_NAME(N) ck_rwcohort_wp_##N +#define CK_RWCOHORT_WP_INSTANCE(N) struct CK_RWCOHORT_WP_NAME(N) +#define CK_RWCOHORT_WP_INIT(N, RW, WL) ck_rwcohort_wp_##N##_init(RW, WL) +#define CK_RWCOHORT_WP_READ_LOCK(N, RW, C, GC, LC) ck_rwcohort_wp_##N##_read_lock(RW, C, GC, LC) +#define CK_RWCOHORT_WP_READ_UNLOCK(N, RW, C, GC, LC) ck_rwcohort_wp_##N##_read_unlock(RW) +#define CK_RWCOHORT_WP_WRITE_LOCK(N, RW, C, GC, LC) ck_rwcohort_wp_##N##_write_lock(RW, C, GC, LC) +#define CK_RWCOHORT_WP_WRITE_UNLOCK(N, RW, C, GC, LC) ck_rwcohort_wp_##N##_write_unlock(RW, C, GC, LC) +#define CK_RWCOHORT_WP_DEFAULT_WAIT_LIMIT 1000 -#define CK_RW_COHORT_WP_PROTOTYPE(N) \ - CK_RW_COHORT_WP_INSTANCE(N) { \ +#define CK_RWCOHORT_WP_PROTOTYPE(N) \ + CK_RWCOHORT_WP_INSTANCE(N) { \ unsigned int read_counter; \ unsigned int write_barrier; \ unsigned int wait_limit; \ }; \ \ CK_CC_INLINE static void \ - ck_rw_cohort_wp_##N##_init(CK_RW_COHORT_WP_INSTANCE(N) *rw_cohort, \ + ck_rwcohort_wp_##N##_init(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \ unsigned int wait_limit) \ { \ rw_cohort->read_counter = 0; \ @@ -67,7 +67,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rw_cohort_wp_##N##_write_lock(CK_RW_COHORT_WP_INSTANCE(N) *rw_cohort, \ + ck_rwcohort_wp_##N##_write_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -85,7 +85,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rw_cohort_wp_##N##_write_unlock(CK_RW_COHORT_WP_INSTANCE(N) *rw_cohort, \ + ck_rwcohort_wp_##N##_write_unlock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -94,7 +94,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rw_cohort_wp_##N##_read_lock(CK_RW_COHORT_WP_INSTANCE(N) *rw_cohort, \ + ck_rwcohort_wp_##N##_read_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -125,36 +125,36 @@ } \ \ CK_CC_INLINE static void \ - ck_rw_cohort_wp_##N##_read_unlock(CK_RW_COHORT_WP_INSTANCE(N) *cohort) \ + ck_rwcohort_wp_##N##_read_unlock(CK_RWCOHORT_WP_INSTANCE(N) *cohort) \ { \ ck_pr_dec_uint(&cohort->read_counter); \ } -#define CK_RW_COHORT_WP_INITIALIZER { \ +#define CK_RWCOHORT_WP_INITIALIZER { \ .read_counter = 0, \ .write_barrier = 0, \ .wait_limit = 0 \ } -#define CK_RW_COHORT_RP_NAME(N) ck_rw_cohort_rp_##N -#define CK_RW_COHORT_RP_INSTANCE(N) struct CK_RW_COHORT_RP_NAME(N) -#define CK_RW_COHORT_RP_INIT(N, RW, WL) ck_rw_cohort_rp_##N##_init(RW, WL) -#define CK_RW_COHORT_RP_READ_LOCK(N, RW, C, GC, LC) ck_rw_cohort_rp_##N##_read_lock(RW, C, GC, LC) -#define CK_RW_COHORT_RP_READ_UNLOCK(N, RW, C, GC, LC) ck_rw_cohort_rp_##N##_read_unlock(RW) -#define CK_RW_COHORT_RP_WRITE_LOCK(N, RW, C, GC, LC) ck_rw_cohort_rp_##N##_write_lock(RW, C, GC, LC) -#define CK_RW_COHORT_RP_WRITE_UNLOCK(N, RW, C, GC, LC) ck_rw_cohort_rp_##N##_write_unlock(RW, C, GC, LC) -#define CK_RW_COHORT_RP_DEFAULT_WAIT_LIMIT 1000 +#define CK_RWCOHORT_RP_NAME(N) ck_rwcohort_rp_##N +#define CK_RWCOHORT_RP_INSTANCE(N) struct CK_RWCOHORT_RP_NAME(N) +#define CK_RWCOHORT_RP_INIT(N, RW, WL) ck_rwcohort_rp_##N##_init(RW, WL) +#define CK_RWCOHORT_RP_READ_LOCK(N, RW, C, GC, LC) ck_rwcohort_rp_##N##_read_lock(RW, C, GC, LC) +#define CK_RWCOHORT_RP_READ_UNLOCK(N, RW, C, GC, LC) ck_rwcohort_rp_##N##_read_unlock(RW) +#define CK_RWCOHORT_RP_WRITE_LOCK(N, RW, C, GC, LC) ck_rwcohort_rp_##N##_write_lock(RW, C, GC, LC) +#define CK_RWCOHORT_RP_WRITE_UNLOCK(N, RW, C, GC, LC) ck_rwcohort_rp_##N##_write_unlock(RW, C, GC, LC) +#define CK_RWCOHORT_RP_DEFAULT_WAIT_LIMIT 1000 -#define CK_RW_COHORT_RP_PROTOTYPE(N) \ - CK_RW_COHORT_RP_INSTANCE(N) { \ +#define CK_RWCOHORT_RP_PROTOTYPE(N) \ + CK_RWCOHORT_RP_INSTANCE(N) { \ unsigned int read_counter; \ unsigned int read_barrier; \ unsigned int wait_limit; \ }; \ \ CK_CC_INLINE static void \ - ck_rw_cohort_rp_##N##_init(CK_RW_COHORT_RP_INSTANCE(N) *rw_cohort, \ + ck_rwcohort_rp_##N##_init(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \ unsigned int wait_limit) \ { \ rw_cohort->read_counter = 0; \ @@ -165,7 +165,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rw_cohort_rp_##N##_write_lock(CK_RW_COHORT_RP_INSTANCE(N) *rw_cohort, \ + ck_rwcohort_rp_##N##_write_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -196,7 +196,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rw_cohort_rp_##N##_write_unlock(CK_RW_COHORT_RP_INSTANCE(N) *rw_cohort, \ + ck_rwcohort_rp_##N##_write_unlock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \ { \ (void)rw_cohort; \ @@ -204,7 +204,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rw_cohort_rp_##N##_read_lock(CK_RW_COHORT_RP_INSTANCE(N) *rw_cohort, \ + ck_rwcohort_rp_##N##_read_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -222,34 +222,34 @@ \ \ CK_CC_INLINE static void \ - ck_rw_cohort_rp_##N##_read_unlock(CK_RW_COHORT_RP_INSTANCE(N) *cohort) \ + ck_rwcohort_rp_##N##_read_unlock(CK_RWCOHORT_RP_INSTANCE(N) *cohort) \ { \ ck_pr_dec_uint(&cohort->read_counter); \ } -#define CK_RW_COHORT_RP_INITIALIZER { \ +#define CK_RWCOHORT_RP_INITIALIZER { \ .read_counter = 0, \ .read_barrier = 0, \ .wait_limit = 0 \ } -#define CK_RW_COHORT_NEUTRAL_NAME(N) ck_rw_cohort_neutral_##N -#define CK_RW_COHORT_NEUTRAL_INSTANCE(N) struct CK_RW_COHORT_NEUTRAL_NAME(N) -#define CK_RW_COHORT_NEUTRAL_INIT(N, RW) ck_rw_cohort_neutral_##N##_init(RW) -#define CK_RW_COHORT_NEUTRAL_READ_LOCK(N, RW, C, GC, LC) ck_rw_cohort_neutral_##N##_read_lock(RW, C, GC, LC) -#define CK_RW_COHORT_NEUTRAL_READ_UNLOCK(N, RW, C, GC, LC) ck_rw_cohort_neutral_##N##_read_unlock(RW) -#define CK_RW_COHORT_NEUTRAL_WRITE_LOCK(N, RW, C, GC, LC) ck_rw_cohort_neutral_##N##_write_lock(RW, C, GC, LC) -#define CK_RW_COHORT_NEUTRAL_WRITE_UNLOCK(N, RW, C, GC, LC) ck_rw_cohort_neutral_##N##_write_unlock(RW, C, GC, LC) -#define CK_RW_COHORT_NEUTRAL_DEFAULT_WAIT_LIMIT 1000 +#define CK_RWCOHORT_NEUTRAL_NAME(N) ck_rwcohort_neutral_##N +#define CK_RWCOHORT_NEUTRAL_INSTANCE(N) struct CK_RWCOHORT_NEUTRAL_NAME(N) +#define CK_RWCOHORT_NEUTRAL_INIT(N, RW) ck_rwcohort_neutral_##N##_init(RW) +#define CK_RWCOHORT_NEUTRAL_READ_LOCK(N, RW, C, GC, LC) ck_rwcohort_neutral_##N##_read_lock(RW, C, GC, LC) +#define CK_RWCOHORT_NEUTRAL_READ_UNLOCK(N, RW, C, GC, LC) ck_rwcohort_neutral_##N##_read_unlock(RW) +#define CK_RWCOHORT_NEUTRAL_WRITE_LOCK(N, RW, C, GC, LC) ck_rwcohort_neutral_##N##_write_lock(RW, C, GC, LC) +#define CK_RWCOHORT_NEUTRAL_WRITE_UNLOCK(N, RW, C, GC, LC) ck_rwcohort_neutral_##N##_write_unlock(RW, C, GC, LC) +#define CK_RWCOHORT_NEUTRAL_DEFAULT_WAIT_LIMIT 1000 -#define CK_RW_COHORT_NEUTRAL_PROTOTYPE(N) \ - CK_RW_COHORT_NEUTRAL_INSTANCE(N) { \ +#define CK_RWCOHORT_NEUTRAL_PROTOTYPE(N) \ + CK_RWCOHORT_NEUTRAL_INSTANCE(N) { \ unsigned int read_counter; \ }; \ \ CK_CC_INLINE static void \ - ck_rw_cohort_neutral_##N##_init(CK_RW_COHORT_NEUTRAL_INSTANCE(N) *rw_cohort) \ + ck_rwcohort_neutral_##N##_init(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort) \ { \ rw_cohort->read_counter = 0; \ ck_pr_barrier(); \ @@ -257,7 +257,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rw_cohort_neutral_##N##_write_lock(CK_RW_COHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \ + ck_rwcohort_neutral_##N##_write_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -269,7 +269,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rw_cohort_neutral_##N##_write_unlock(CK_RW_COHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \ + ck_rwcohort_neutral_##N##_write_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \ { \ (void)rw_cohort; \ @@ -277,7 +277,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rw_cohort_neutral_##N##_read_lock(CK_RW_COHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \ + ck_rwcohort_neutral_##N##_read_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -290,14 +290,14 @@ \ \ CK_CC_INLINE static void \ - ck_rw_cohort_neutral_##N##_read_unlock(CK_RW_COHORT_NEUTRAL_INSTANCE(N) *cohort) \ + ck_rwcohort_neutral_##N##_read_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *cohort) \ { \ ck_pr_dec_uint(&cohort->read_counter); \ } -#define CK_RW_COHORT_NEUTRAL_INITIALIZER { \ +#define CK_RWCOHORT_NEUTRAL_INITIALIZER { \ .read_counter = 0, \ } -#endif /* _CK_RW_COHORT_H */ +#endif /* _CK_RWCOHORT_H */ diff --git a/regressions/ck_rw_cohort/ck_neutral.h b/regressions/ck_rw_cohort/ck_neutral.h deleted file mode 100644 index 07c9d27..0000000 --- a/regressions/ck_rw_cohort/ck_neutral.h +++ /dev/null @@ -1,8 +0,0 @@ -#define LOCK_PROTOTYPE CK_RW_COHORT_NEUTRAL_PROTOTYPE -#define LOCK_INSTANCE CK_RW_COHORT_NEUTRAL_INSTANCE -#define LOCK_INITIALIZER CK_RW_COHORT_NEUTRAL_INITIALIZER -#define LOCK_INIT(N, C, W) CK_RW_COHORT_NEUTRAL_INIT(N, C) -#define READ_LOCK CK_RW_COHORT_NEUTRAL_READ_LOCK -#define WRITE_LOCK CK_RW_COHORT_NEUTRAL_WRITE_LOCK -#define READ_UNLOCK CK_RW_COHORT_NEUTRAL_READ_UNLOCK -#define WRITE_UNLOCK CK_RW_COHORT_NEUTRAL_WRITE_UNLOCK diff --git a/regressions/ck_rw_cohort/ck_rp.h b/regressions/ck_rw_cohort/ck_rp.h deleted file mode 100644 index ecc6391..0000000 --- a/regressions/ck_rw_cohort/ck_rp.h +++ /dev/null @@ -1,8 +0,0 @@ -#define LOCK_PROTOTYPE CK_RW_COHORT_RP_PROTOTYPE -#define LOCK_INSTANCE CK_RW_COHORT_RP_INSTANCE -#define LOCK_INITIALIZER CK_RW_COHORT_RP_INITIALIZER -#define LOCK_INIT CK_RW_COHORT_RP_INIT -#define READ_LOCK CK_RW_COHORT_RP_READ_LOCK -#define READ_UNLOCK CK_RW_COHORT_RP_READ_UNLOCK -#define WRITE_LOCK CK_RW_COHORT_RP_WRITE_LOCK -#define WRITE_UNLOCK CK_RW_COHORT_RP_WRITE_UNLOCK diff --git a/regressions/ck_rw_cohort/ck_wp.h b/regressions/ck_rw_cohort/ck_wp.h deleted file mode 100644 index afc1112..0000000 --- a/regressions/ck_rw_cohort/ck_wp.h +++ /dev/null @@ -1,8 +0,0 @@ -#define LOCK_PROTOTYPE CK_RW_COHORT_WP_PROTOTYPE -#define LOCK_INSTANCE CK_RW_COHORT_WP_INSTANCE -#define LOCK_INITIALIZER CK_RW_COHORT_WP_INITIALIZER -#define LOCK_INIT CK_RW_COHORT_WP_INIT -#define READ_LOCK CK_RW_COHORT_WP_READ_LOCK -#define WRITE_LOCK CK_RW_COHORT_WP_WRITE_LOCK -#define READ_UNLOCK CK_RW_COHORT_WP_READ_UNLOCK -#define WRITE_UNLOCK CK_RW_COHORT_WP_WRITE_UNLOCK diff --git a/regressions/ck_rw_cohort/benchmark/Makefile b/regressions/ck_rwcohort/benchmark/Makefile similarity index 100% rename from regressions/ck_rw_cohort/benchmark/Makefile rename to regressions/ck_rwcohort/benchmark/Makefile diff --git a/regressions/ck_rw_cohort/benchmark/ck_neutral.c b/regressions/ck_rwcohort/benchmark/ck_neutral.c similarity index 100% rename from regressions/ck_rw_cohort/benchmark/ck_neutral.c rename to regressions/ck_rwcohort/benchmark/ck_neutral.c diff --git a/regressions/ck_rw_cohort/benchmark/ck_rp.c b/regressions/ck_rwcohort/benchmark/ck_rp.c similarity index 100% rename from regressions/ck_rw_cohort/benchmark/ck_rp.c rename to regressions/ck_rwcohort/benchmark/ck_rp.c diff --git a/regressions/ck_rw_cohort/benchmark/ck_wp.c b/regressions/ck_rwcohort/benchmark/ck_wp.c similarity index 100% rename from regressions/ck_rw_cohort/benchmark/ck_wp.c rename to regressions/ck_rwcohort/benchmark/ck_wp.c diff --git a/regressions/ck_rw_cohort/benchmark/latency.h b/regressions/ck_rwcohort/benchmark/latency.h similarity index 97% rename from regressions/ck_rw_cohort/benchmark/latency.h rename to regressions/ck_rwcohort/benchmark/latency.h index 60d7c0d..b6426d8 100644 --- a/regressions/ck_rw_cohort/benchmark/latency.h +++ b/regressions/ck_rwcohort/benchmark/latency.h @@ -25,7 +25,7 @@ * SUCH DAMAGE. */ -#include +#include #include #include #include @@ -73,7 +73,7 @@ main(void) CK_COHORT_INIT(fas_fas, &cohort, &global_lock, &local_lock, CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT); - LOCK_INIT(fas_fas, &rw_cohort, CK_RW_COHORT_WP_DEFAULT_WAIT_LIMIT); + LOCK_INIT(fas_fas, &rw_cohort, CK_RWCOHORT_WP_DEFAULT_WAIT_LIMIT); for (i = 0; i < STEPS; i++) { WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL); diff --git a/regressions/ck_rw_cohort/benchmark/throughput.h b/regressions/ck_rwcohort/benchmark/throughput.h similarity index 99% rename from regressions/ck_rw_cohort/benchmark/throughput.h rename to regressions/ck_rwcohort/benchmark/throughput.h index 787a036..a76d4ef 100644 --- a/regressions/ck_rw_cohort/benchmark/throughput.h +++ b/regressions/ck_rwcohort/benchmark/throughput.h @@ -26,7 +26,7 @@ */ #include -#include +#include #include #include #include diff --git a/regressions/ck_rwcohort/ck_neutral.h b/regressions/ck_rwcohort/ck_neutral.h new file mode 100644 index 0000000..dbbda9d --- /dev/null +++ b/regressions/ck_rwcohort/ck_neutral.h @@ -0,0 +1,8 @@ +#define LOCK_PROTOTYPE CK_RWCOHORT_NEUTRAL_PROTOTYPE +#define LOCK_INSTANCE CK_RWCOHORT_NEUTRAL_INSTANCE +#define LOCK_INITIALIZER CK_RWCOHORT_NEUTRAL_INITIALIZER +#define LOCK_INIT(N, C, W) CK_RWCOHORT_NEUTRAL_INIT(N, C) +#define READ_LOCK CK_RWCOHORT_NEUTRAL_READ_LOCK +#define WRITE_LOCK CK_RWCOHORT_NEUTRAL_WRITE_LOCK +#define READ_UNLOCK CK_RWCOHORT_NEUTRAL_READ_UNLOCK +#define WRITE_UNLOCK CK_RWCOHORT_NEUTRAL_WRITE_UNLOCK diff --git a/regressions/ck_rwcohort/ck_rp.h b/regressions/ck_rwcohort/ck_rp.h new file mode 100644 index 0000000..e20f3d2 --- /dev/null +++ b/regressions/ck_rwcohort/ck_rp.h @@ -0,0 +1,8 @@ +#define LOCK_PROTOTYPE CK_RWCOHORT_RP_PROTOTYPE +#define LOCK_INSTANCE CK_RWCOHORT_RP_INSTANCE +#define LOCK_INITIALIZER CK_RWCOHORT_RP_INITIALIZER +#define LOCK_INIT CK_RWCOHORT_RP_INIT +#define READ_LOCK CK_RWCOHORT_RP_READ_LOCK +#define READ_UNLOCK CK_RWCOHORT_RP_READ_UNLOCK +#define WRITE_LOCK CK_RWCOHORT_RP_WRITE_LOCK +#define WRITE_UNLOCK CK_RWCOHORT_RP_WRITE_UNLOCK diff --git a/regressions/ck_rwcohort/ck_wp.h b/regressions/ck_rwcohort/ck_wp.h new file mode 100644 index 0000000..556c7df --- /dev/null +++ b/regressions/ck_rwcohort/ck_wp.h @@ -0,0 +1,8 @@ +#define LOCK_PROTOTYPE CK_RWCOHORT_WP_PROTOTYPE +#define LOCK_INSTANCE CK_RWCOHORT_WP_INSTANCE +#define LOCK_INITIALIZER CK_RWCOHORT_WP_INITIALIZER +#define LOCK_INIT CK_RWCOHORT_WP_INIT +#define READ_LOCK CK_RWCOHORT_WP_READ_LOCK +#define WRITE_LOCK CK_RWCOHORT_WP_WRITE_LOCK +#define READ_UNLOCK CK_RWCOHORT_WP_READ_UNLOCK +#define WRITE_UNLOCK CK_RWCOHORT_WP_WRITE_UNLOCK diff --git a/regressions/ck_rw_cohort/validate/Makefile b/regressions/ck_rwcohort/validate/Makefile similarity index 70% rename from regressions/ck_rw_cohort/validate/Makefile rename to regressions/ck_rwcohort/validate/Makefile index 7436962..9e86021 100644 --- a/regressions/ck_rw_cohort/validate/Makefile +++ b/regressions/ck_rwcohort/validate/Makefile @@ -4,13 +4,13 @@ OBJECTS=ck_neutral ck_rp ck_wp all: $(OBJECTS) -ck_neutral: ck_neutral.c ../../../include/ck_rw_cohort.h +ck_neutral: ck_neutral.c ../../../include/ck_rwcohort.h $(CC) $(CFLAGS) -o ck_neutral ck_neutral.c -g -ck_rp: ck_rp.c ../../../include/ck_rw_cohort.h +ck_rp: ck_rp.c ../../../include/ck_rwcohort.h $(CC) $(CFLAGS) -o ck_rp ck_rp.c -g -ck_wp: ck_wp.c ../../../include/ck_rw_cohort.h +ck_wp: ck_wp.c ../../../include/ck_rwcohort.h $(CC) $(CFLAGS) -o ck_wp ck_wp.c -g check: all diff --git a/regressions/ck_rw_cohort/validate/ck_neutral.c b/regressions/ck_rwcohort/validate/ck_neutral.c similarity index 100% rename from regressions/ck_rw_cohort/validate/ck_neutral.c rename to regressions/ck_rwcohort/validate/ck_neutral.c diff --git a/regressions/ck_rw_cohort/validate/ck_rp.c b/regressions/ck_rwcohort/validate/ck_rp.c similarity index 100% rename from regressions/ck_rw_cohort/validate/ck_rp.c rename to regressions/ck_rwcohort/validate/ck_rp.c diff --git a/regressions/ck_rw_cohort/validate/ck_wp.c b/regressions/ck_rwcohort/validate/ck_wp.c similarity index 100% rename from regressions/ck_rw_cohort/validate/ck_wp.c rename to regressions/ck_rwcohort/validate/ck_wp.c diff --git a/regressions/ck_rw_cohort/validate/validate.h b/regressions/ck_rwcohort/validate/validate.h similarity index 99% rename from regressions/ck_rw_cohort/validate/validate.h rename to regressions/ck_rwcohort/validate/validate.h index 27c7283..2bb9b1e 100644 --- a/regressions/ck_rw_cohort/validate/validate.h +++ b/regressions/ck_rwcohort/validate/validate.h @@ -37,7 +37,7 @@ #include #include -#include +#include #include #include "../../common.h" From ff79217398bd61df079ce444a7a71590ccf67568 Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Wed, 8 May 2013 08:03:55 -0500 Subject: [PATCH 14/20] ck_rwcohort: Updated .gitignore to reflect new ck_rwcohort naming --- .gitignore | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index 4e9157b..05a48ab 100644 --- a/.gitignore +++ b/.gitignore @@ -143,12 +143,12 @@ regressions/ck_queue/validate/ck_slist regressions/ck_cohort/validate/validate regressions/ck_cohort/benchmark/ck_cohort.LATENCY regressions/ck_cohort/benchmark/ck_cohort.THROUGHPUT -regressions/ck_rw_cohort/validate/ck_neutral -regressions/ck_rw_cohort/validate/ck_rp -regressions/ck_rw_cohort/validate/ck_wp -regressions/ck_rw_cohort/benchmark/ck_neutral.LATENCY -regressions/ck_rw_cohort/benchmark/ck_neutral.THROUGHPUT -regressions/ck_rw_cohort/benchmark/ck_rp.LATENCY -regressions/ck_rw_cohort/benchmark/ck_rp.THROUGHPUT -regressions/ck_rw_cohort/benchmark/ck_wp.LATENCY -regressions/ck_rw_cohort/benchmark/ck_wp.THROUGHPUT +regressions/ck_rwcohort/validate/ck_neutral +regressions/ck_rwcohort/validate/ck_rp +regressions/ck_rwcohort/validate/ck_wp +regressions/ck_rwcohort/benchmark/ck_neutral.LATENCY +regressions/ck_rwcohort/benchmark/ck_neutral.THROUGHPUT +regressions/ck_rwcohort/benchmark/ck_rp.LATENCY +regressions/ck_rwcohort/benchmark/ck_rp.THROUGHPUT +regressions/ck_rwcohort/benchmark/ck_wp.LATENCY +regressions/ck_rwcohort/benchmark/ck_wp.THROUGHPUT From dcdc6dd1007633df3e59be7657b4f0165a51f0bc Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Wed, 8 May 2013 15:58:28 -0500 Subject: [PATCH 15/20] ck_rwcohort: Fixed indentation and looping logic and added initial documentation --- doc/CK_RWCOHORT_INIT | 59 ++++++++++ doc/CK_RWCOHORT_INSTANCE | 62 ++++++++++ doc/CK_RWCOHORT_PROTOTYPE | 63 +++++++++++ doc/CK_RWCOHORT_READ_LOCK | 62 ++++++++++ doc/CK_RWCOHORT_READ_UNLOCK | 61 ++++++++++ doc/CK_RWCOHORT_WRITE_LOCK | 62 ++++++++++ doc/CK_RWCOHORT_WRITE_UNLOCK | 61 ++++++++++ doc/ck_rwcohort | 211 +++++++++++++++++++++++++++++++++++ include/ck_rwcohort.h | 26 ++--- 9 files changed, 654 insertions(+), 13 deletions(-) create mode 100644 doc/CK_RWCOHORT_INIT create mode 100644 doc/CK_RWCOHORT_INSTANCE create mode 100644 doc/CK_RWCOHORT_PROTOTYPE create mode 100644 doc/CK_RWCOHORT_READ_LOCK create mode 100644 doc/CK_RWCOHORT_READ_UNLOCK create mode 100644 doc/CK_RWCOHORT_WRITE_LOCK create mode 100644 doc/CK_RWCOHORT_WRITE_UNLOCK create mode 100644 doc/ck_rwcohort diff --git a/doc/CK_RWCOHORT_INIT b/doc/CK_RWCOHORT_INIT new file mode 100644 index 0000000..1d543e8 --- /dev/null +++ b/doc/CK_RWCOHORT_INIT @@ -0,0 +1,59 @@ +.\" +.\" Copyright 2013 Brendon Scheinman. +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" +.Dd February 24, 2013. +.Dt CK_RWCOHORT_INIT 3 +.Sh NAME +.Nm CK_RWCOHORT_INIT +.Nd initialize instance of a cohort-based reader-writer lock type +.Sh LIBRARY +Concurrency Kit (libck, \-lck) +.Sh SYNOPSIS +.In ck_rwcohort.h +.Fn CK_RWCOHORT_INIT "COHORT_NAME cohort_name" "LOCK *lock" "unsigned int wait_limit" +.Sh DESCRIPTION +This macro initializes the lock instance pointed to by the +.Fa lock +argument. Until a lock instance is initialized using the CK_RWCOHORT_INIT macro, any operations +involving it will have undefined behavior. The +.Fa wait_limit +argument should only be used with reader-preference or writer-preference locks. For neutral +locks, this argument should be excluded. +If you are unsure of a value to use for the +.Fa wait_limit +argument, you should use CK_RWCOHORT_DEFAULT_LOCAL_WAIT_LIMIT. +.Sh SEE ALSO +.Xr ck_rwcohort 3 , +.Xr CK_RWCOHORT_PROTOTYPE 3 , +.Xr CK_RWCOHORT_TRYLOCK_PROTOTYPE 3 , +.Xr CK_RWCOHORT_INSTANCE 3 , +.Xr CK_RWCOHORT_INITIALIZER 3 , +.Xr CK_RWCOHORT_LOCK 3 , +.Xr CK_RWCOHORT_UNLOCK 3 , +.Xr CK_RWCOHORT_LOCKED 3 , +.Xr CK_RWCOHORT_TRYLOCK 3 , +.Pp +Additional information available at http://concurrencykit.org/ diff --git a/doc/CK_RWCOHORT_INSTANCE b/doc/CK_RWCOHORT_INSTANCE new file mode 100644 index 0000000..c4e7f10 --- /dev/null +++ b/doc/CK_RWCOHORT_INSTANCE @@ -0,0 +1,62 @@ +.\" +.\" Copyright 2013 Brendon Scheinman. +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" +.Dd February 24, 2013. +.Dt CK_RWCOHORT_INSTANCE 3 +.Sh NAME +.Nm CK_RWCOHORT_INSTANCE +.Nd declare an instance of a cohort-based reader-writer lock type +.Sh LIBRARY +Concurrency Kit (libck, \-lck) +.Sh SYNOPSIS +.In ck_cohort.h +.Fn CK_RWCOHORT_INSTANCE "COHORT_NAME cohort_name" +.Sh DESCRIPTION +The user must use this macro to declare instances of lock types that they have +defined using the +.Xr CK_RWCOHORT_PROTOTYPE 3 +macro. The cohort_name must be the same as the one used in the prototype macro. +For instance, if CK_RWCOHORT_PROTOTYPE was called with the name "foo", the +CK_RWCOHORT_INSTANCE macro should be called as +.br +CK_RWCOHORT_INSTANCE(foo) cohort; +.Pp +This macro should also be used when allocating memory for cohorts. For instance, +to allocate a block of 4 cohorts: +.br +CK_RWCOHORT_INSTANCE(foo) *cohorts = malloc(4 * sizeof(CK_RWCOHORT_INSTANCE(foo))); +.Sh SEE ALSO +.Xr ck_rwcohort 3 , +.Xr CK_RWCOHORT_PROTOTYPE 3 , +.Xr CK_RWCOHORT_TRYLOCK_PROTOTYPE 3 , +.Xr CK_RWCOHORT_INSTANCE 3 , +.Xr CK_RWCOHORT_INITIALIZER 3 , +.Xr CK_RWCOHORT_LOCK 3 , +.Xr CK_RWCOHORT_UNLOCK 3 , +.Xr CK_RWCOHORT_LOCKED 3 , +.Xr CK_RWCOHORT_TRYLOCK 3 , +.Pp +Additional information available at http://concurrencykit.org/ diff --git a/doc/CK_RWCOHORT_PROTOTYPE b/doc/CK_RWCOHORT_PROTOTYPE new file mode 100644 index 0000000..cd8710d --- /dev/null +++ b/doc/CK_RWCOHORT_PROTOTYPE @@ -0,0 +1,63 @@ +.\" +.\" Copyright 2013 Brendon Scheinman. +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" +.Dd February 24, 2013. +.Dt CK_RWCOHORT_PROTOTYPE 3 +.Sh NAME +.Nm CK_RWCOHORT_PROTOTYPE +.Nd define reader-writer cohort-based lock using the specified cohort type +.Sh LIBRARY +Concurrency Kit (libck, \-lck) +.Sh SYNOPSIS +.In ck_rwcohort.h +.Fn CK_RWCOHORT_PROTOTYPE "COHORT_NAME cohort_name" +.Sh DESCRIPTION +The ck_rwcohort.h header file does not define any cohort types. Instead, the user must use +the CK_RWCOHORT_PROTOTYPE macro to define any types they want to use. +This macro takes a single argument which corresponds to the type of the cohort lock that +the reader-writer lock should use. A cohort type must have already been defined with that name +using the +.Xr CK_COHORT_PROTOTYPE 3 +or +.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3 +macros. +.Pp +Instances of the defined lock type can be declared as: +.br + CK_RWCOHORT_INSTANCE(cohort_name) lock; +.Sh SEE ALSO +.Xr ck_rwcohort 3 , +.Xr CK_COHORT_PROTOTYPE 3 , +.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3 , +.Xr CK_RWCOHORT_INSTANCE 3 , +.Xr CK_RWCOHORT_INITIALIZER 3 , +.Xr CK_RWCOHORT_INIT 3 , +.Xr CK_RWCOHORT_READ_LOCK 3 , +.Xr CK_RWCOHORT_READ_UNLOCK 3 , +.Xr CK_RWCOHORT_WRITE_LOCK 3 , +.Xr CK_RWCOHORT_WRITE_UNLOCK 3 , +.Pp +Additional information available at http://concurrencykit.org/ diff --git a/doc/CK_RWCOHORT_READ_LOCK b/doc/CK_RWCOHORT_READ_LOCK new file mode 100644 index 0000000..68e8da0 --- /dev/null +++ b/doc/CK_RWCOHORT_READ_LOCK @@ -0,0 +1,62 @@ +.\" +.\" Copyright 2013 Brendon Scheinman. +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" +.Dd February 24, 2013. +.Dt CK_RWCOHORT_READ_LOCK 3 +.Sh NAME +.Nm CK_RWCOHORT_READ_LOCK +.Nd acquire read-only permission for cohort-based reader-writer lock +.Sh LIBRARY +Concurrency Kit (libck, \-lck) +.Sh SYNOPSIS +.In ck_cohort.h +.Fn CK_RWCOHORT_READ_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ +"void *global_context" "void *local_context" +.Sh DESCRIPTION +This call will acquire read-only permission from +.Fa lock . +The call will block until this permission has been acquired. +.Fa cohort +must point to a cohort whose global lock is the same as all other cohorts used with +.Fa lock . +The +.Fa global_context +and +.Fa local_context +arguments will be passed along as the context arguments to any calls to +.Fa cohort . +. +.Sh SEE ALSO +.Xr ck_cohort 3 , +.Xr CK_RWCOHORT_PROTOTYPE 3 , +.Xr CK_RWCOHORT_INSTANCE 3 , +.Xr CK_RWCOHORT_INITIALIZER 3 , +.Xr CK_RWCOHORT_INIT 3 , +.Xr CK_RWCOHORT_READ_UNLOCK 3 , +.Xr CK_RWCOHORT_WRITE_LOCK 3 , +.Xr CK_RWCOHORT_WRITE_UNLOCK 3 , +.Pp +Additional information available at http://concurrencykit.org/ diff --git a/doc/CK_RWCOHORT_READ_UNLOCK b/doc/CK_RWCOHORT_READ_UNLOCK new file mode 100644 index 0000000..04f2ff9 --- /dev/null +++ b/doc/CK_RWCOHORT_READ_UNLOCK @@ -0,0 +1,61 @@ +.\" +.\" Copyright 2013 Brendon Scheinman. +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" +.Dd February 24, 2013. +.Dt CK_RWCOHORT_READ_UNLOCK 3 +.Sh NAME +.Nm CK_RWCOHORT_READ_UNLOCK +.Nd relinquish read-only access to cohort-based reader-writer lock +.Sh LIBRARY +Concurrency Kit (libck, \-lck) +.Sh SYNOPSIS +.In ck_cohort.h +.Fn CK_RWCOHORT_READ_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ +"void *global_context" "void *local_context" +.Sh DESCRIPTION +This call will relinquish read-only permission to +.Fa lock . +.Fa cohort +must point to a cohort whose global lock is the same as all other cohorts used with +.Fa lock . +The +.Fa global_context +and +.Fa local_context +arguments will be passed along as the context arguments to any calls to +.Fa cohort . +. +.Sh SEE ALSO +.Xr ck_cohort 3 , +.Xr CK_RWCOHORT_PROTOTYPE 3 , +.Xr CK_RWCOHORT_INSTANCE 3 , +.Xr CK_RWCOHORT_INITIALIZER 3 , +.Xr CK_RWCOHORT_INIT 3 , +.Xr CK_RWCOHORT_READ_LOCK 3 , +.Xr CK_RWCOHORT_WRITE_LOCK 3 , +.Xr CK_RWCOHORT_WRITE_UNLOCK 3 , +.Pp +Additional information available at http://concurrencykit.org/ diff --git a/doc/CK_RWCOHORT_WRITE_LOCK b/doc/CK_RWCOHORT_WRITE_LOCK new file mode 100644 index 0000000..724d4b5 --- /dev/null +++ b/doc/CK_RWCOHORT_WRITE_LOCK @@ -0,0 +1,62 @@ +.\" +.\" Copyright 2013 Brendon Scheinman. +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" +.Dd February 24, 2013. +.Dt CK_RWCOHORT_READ_LOCK 3 +.Sh NAME +.Nm CK_RWCOHORT_READ_LOCK +.Nd acquite write access for a cohort-based reader-writer lock +.Sh LIBRARY +Concurrency Kit (libck, \-lck) +.Sh SYNOPSIS +.In ck_cohort.h +.Fn CK_RWCOHORT_READ_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ +"void *global_context" "void *local_context" +.Sh DESCRIPTION +This call will acquire write permission for +.Fa lock . +The call will block until this permission has been acquired. +.Fa cohort +must point to a cohort whose global lock is the same as all other cohorts used with +.Fa lock . +The +.Fa global_context +and +.Fa local_context +arguments will be passed along as the context arguments to any calls to +.Fa cohort . +. +.Sh SEE ALSO +.Xr ck_cohort 3 , +.Xr CK_RWCOHORT_PROTOTYPE 3 , +.Xr CK_RWCOHORT_INSTANCE 3 , +.Xr CK_RWCOHORT_INITIALIZER 3 , +.Xr CK_RWCOHORT_INIT 3 , +.Xr CK_RWCOHORT_READ_LOCK 3 , +.Xr CK_RWCOHORT_READ_UNLOCK 3 , +.Xr CK_RWCOHORT_WRITE_UNLOCK 3 , +.Pp +Additional information available at http://concurrencykit.org/ diff --git a/doc/CK_RWCOHORT_WRITE_UNLOCK b/doc/CK_RWCOHORT_WRITE_UNLOCK new file mode 100644 index 0000000..4d063f8 --- /dev/null +++ b/doc/CK_RWCOHORT_WRITE_UNLOCK @@ -0,0 +1,61 @@ +.\" +.\" Copyright 2013 Brendon Scheinman. +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" +.Dd February 24, 2013. +.Dt CK_RWCOHORT_READ_LOCK 3 +.Sh NAME +.Nm CK_RWCOHORT_READ_LOCK +.Nd relinquish write access for cohort-based reader-writer lock +.Sh LIBRARY +Concurrency Kit (libck, \-lck) +.Sh SYNOPSIS +.In ck_cohort.h +.Fn CK_RWCOHORT_READ_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ +"void *global_context" "void *local_context" +.Sh DESCRIPTION +This call will relinquish write permission for +.Fa lock . +.Fa cohort +must point to a cohort whose global lock is the same as all other cohorts used with +.Fa lock . +The +.Fa global_context +and +.Fa local_context +arguments will be passed along as the context arguments to any calls to +.Fa cohort . +. +.Sh SEE ALSO +.Xr ck_cohort 3 , +.Xr CK_RWCOHORT_PROTOTYPE 3 , +.Xr CK_RWCOHORT_INSTANCE 3 , +.Xr CK_RWCOHORT_INITIALIZER 3 , +.Xr CK_RWCOHORT_INIT 3 , +.Xr CK_RWCOHORT_READ_LOCK 3 , +.Xr CK_RWCOHORT_READ_UNLOCK 3 , +.Xr CK_RWCOHORT_WRITE_LOCK 3 , +.Pp +Additional information available at http://concurrencykit.org/ diff --git a/doc/ck_rwcohort b/doc/ck_rwcohort new file mode 100644 index 0000000..d948db6 --- /dev/null +++ b/doc/ck_rwcohort @@ -0,0 +1,211 @@ +.\" +.\" Copyright 2013 Brendon Scheinman. +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" +.Dd February 24, 2013. +.Dt ck_cohort 3 +.Sh NAME +.Nm ck_cohort +.Nd generalized interface for lock cohorts +.Sh LIBRARY +Concurrency Kit (libck, \-lck) +.Sh SYNOPSIS +.In ck_cohort.h +.Fn CK_COHORT_PROTOTYPE "COHORT_NAME cohort_name" "LOCK_FXN global_lock_method" \ +"LOCK_FXN global_unlock_method" "LOCK_FXN local_lock_method" "LOCK_FXN local_unlock_method" +.Fn CK_COHORT_TRYLOCK_PROTOTYPE "COHORT_NAME cohort_name" \ +"LOCK_FXN global_lock_method" "LOCK_FXN global_unlock_method" \ +"BOOL_LOCK_FXN global_locked_method" BOOL_LOCK_FXN global_trylock_method" \ +"LOCK_FXN local_lock_method" "LOCK_FXN local_unlock_method" \ +"BOOL_LOCK_FXN local_locked_method" BOOL_LOCK_FXN local_trylock_method" +.Fn CK_COHORT_INSTANCE "COHORT_NAME cohort_name" +.Fn CK_COHORT_INIT "COHORT_NAME cohort_name" "ck_cohort *cohort" \ +"void *global_lock" "void *local_lock" "unsigned int pass_limit" +.Fn CK_COHORT_LOCK "COHORT_NAME cohort_name" "ck_cohort *cohort" \ +"void *global_context" "void *local_context" +.Fn CK_COHORT_UNLOCK "COHORT_NAME cohort_name" "ck_cohort *cohort" \ +"void *global_context" "void *local_context" +.Pp +Where LOCK_FXN refers to a method with the signature +.br +void(void *lock, void *context) +.br +BOOL_LOCK_FXN refers to a method with the signature +.br +bool(void *lock, void *context) +.Pp +The +.Fa context +argument in each signature is used to pass along any additional information that +the lock might need for its lock, unlock and trylock methods. The values for this +argument are provided to each call to +.Xr CK_COHORT_LOCK 3 , +.Xr CK_COHORT_UNLOCK 3 , +.Xr CK_COHORT_LOCKED 3 , +and +.Xr CK_COHORT_TRYLOCK 3 +. +.Sh DESCRIPTION +ck_cohort.h provides an interface for defining lock cohorts with +arbitrary lock types. Cohorts are a mechanism for coordinating +threads on NUMA architectures in order to reduce the frequency +with which a lock is passed between threads on different clusters. +.Pp +Before using a cohort, the user must define a cohort type using +either the +.Fn CK_COHORT_PROTOTYPE +or the +.Fn CK_COHORT_TRYLOCK_PROTOTYPE +macros. These macros allow the user to specify the lock methods that +they would like the cohort to use. See the +.Xr CK_COHORT_PROTOTYPE 3 +and +.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3 +man pages for more details. +.Pp +.Sh EXAMPLE +.Bd -literal -offset indent +#include +#include + +#include +#include +#include + +/* + * Create cohort methods with signatures that match + * the required signature + */ +static void +ck_spinlock_lock_with_context(ck_spinlock_t *lock, void *context) +{ + (void)context; + ck_spinlock_lock(lock); + return; +} + +static void +ck_spinlock_unlock_with_context(ck_spinlock_t *lock, void *context) +{ + (void)context; + ck_spinlock_unlock(lock); + return; +} + +static bool +ck_spinlock_locked_with_context(ck_spinlock_t *lock, void *context) +{ + (void)context; + return ck_spinlock_locked(lock); +} + +/* + * define a cohort type named "test_cohort" that will use + * the above methods for both its global and local locks + */ +CK_COHORT_PROTOTYPE(test_cohort, + ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context, ck_spinlock_locked_with_context + ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context, ck_spinlock_locked_with_context) + +static ck_spinlock_t global_lock = CK_SPINLOCK_INITIALIZER; +static unsigned int ready; + +static void * +function(void *context) +{ + CK_COHORT_INSTANCE(test_cohort) *cohort = context; + + while (ready == 0); + + while (ready > 0) { + /* + * acquire the cohort lock before performing critical section. + * note that we pass NULL for both the global and local context + * arguments because neither the lock nor unlock functions + * will use them. + */ + CK_COHORT_LOCK(test_cohort, cohort, NULL, NULL); + + /* perform critical section */ + + /* relinquish cohort lock */ + CK_COHORT_UNLOCK(test_cohort, cohort, NULL, NULL); + } + + return NULL; +} + +int +main(void) +{ + unsigned int nthr = 4; + unsigned int n_cohorts = 2; + unsigned int i; + + /* allocate 2 cohorts of the defined type */ + CK_COHORT_INSTANCE(test_cohort) *cohorts = + calloc(n_cohorts, sizeof(CK_COHORT_INSTANCE(test_cohort))); + + /* create local locks to use with each cohort */ + ck_spinlock_t *local_locks = + calloc(n_cohorts, sizeof(ck_spinlock_t)); + + pthread_t *threads = + calloc(nthr, sizeof(pthread_t)); + + /* initialize each of the cohorts before using them */ + for (i = 0 ; i < n_cohorts ; ++i) { + CK_COHORT_INIT(test_cohort, cohorts + i, &global_lock, local_locks + i, + CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT); + } + + /* start each thread and assign cohorts equally */ + for (i = 0 ; i < nthr ; ++i) { + pthread_create(threads + i, NULL, function, cohorts + (i % n_cohorts)); + } + + ck_pr_store_uint(&ready, 1); + sleep(10); + ck_pr_store_uint(&ready, 0); + + for (i = 0 ; i < nthr ; ++i) { + pthread_join(threads[i], NULL); + } + + return 0; +} +.Ed +.Sh SEE ALSO +.Xr CK_COHORT_PROTOTYPE 3 , +.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3 , +.Xr CK_COHORT_INSTANCE 3 , +.Xr CK_COHORT_INITIALIZER 3 , +.Xr CK_COHORT_INIT 3 , +.Xr CK_COHORT_LOCK 3 , +.Xr CK_COHORT_UNLOCK 3 , +.Xr CK_COHORT_LOCKED 3 , +.Xr CK_COHORT_TRYLOCK 3 , +.Pp +Additional information available at http://concurrencykit.org/ diff --git a/include/ck_rwcohort.h b/include/ck_rwcohort.h index 67a2d1f..76fb9cb 100644 --- a/include/ck_rwcohort.h +++ b/include/ck_rwcohort.h @@ -67,7 +67,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rwcohort_wp_##N##_write_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \ + ck_rwcohort_wp_##N##_write_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -165,7 +165,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rwcohort_rp_##N##_write_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \ + ck_rwcohort_rp_##N##_write_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -176,16 +176,16 @@ CK_COHORT_LOCK(N, cohort, global_context, local_context); \ if (ck_pr_load_uint(&rw_cohort->read_counter) == 0) { \ break; \ - } else { \ - CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ - while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \ - ck_pr_stall(); \ - if (++wait_count > rw_cohort->wait_limit && raised == false) {\ - ck_pr_inc_uint(&rw_cohort->read_barrier); \ - raised = true; \ - } \ - } \ } \ + \ + CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ + while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \ + ck_pr_stall(); \ + if (++wait_count > rw_cohort->wait_limit && raised == false) { \ + ck_pr_inc_uint(&rw_cohort->read_barrier); \ + raised = true; \ + } \ + } \ } \ \ if (raised == true) { \ @@ -277,7 +277,7 @@ } \ \ CK_CC_INLINE static void \ - ck_rwcohort_neutral_##N##_read_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \ + ck_rwcohort_neutral_##N##_read_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ @@ -290,7 +290,7 @@ \ \ CK_CC_INLINE static void \ - ck_rwcohort_neutral_##N##_read_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *cohort) \ + ck_rwcohort_neutral_##N##_read_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *cohort) \ { \ ck_pr_dec_uint(&cohort->read_counter); \ } From eacaacdb6e842c5f399d87d9485bc2ba8a8498fe Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Wed, 8 May 2013 16:03:16 -0500 Subject: [PATCH 16/20] ck_rwcohort: Fixed Makefile for automated regression testing --- regressions/ck_rwcohort/validate/Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/regressions/ck_rwcohort/validate/Makefile b/regressions/ck_rwcohort/validate/Makefile index 9e86021..af9826d 100644 --- a/regressions/ck_rwcohort/validate/Makefile +++ b/regressions/ck_rwcohort/validate/Makefile @@ -14,7 +14,9 @@ ck_wp: ck_wp.c ../../../include/ck_rwcohort.h $(CC) $(CFLAGS) -o ck_wp ck_wp.c -g check: all - ./validate $(CORES) 1 + ./ck_neutral `expr $(CORES) / 2` 2 1 + ./ck_rp `expr $(CORES) / 2` 2 1 + ./ck_wp `expr $(CORES) / 2` 2 1 clean: rm -rf *.dSYM *~ *.o $(OBJECTS) From f19a1f3c2346c88d7a7a7ac9a04cf0e80b26da60 Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Fri, 10 May 2013 14:43:28 -0400 Subject: [PATCH 17/20] ck_rwcohort: Added signatures for various strategies to documentation --- doc/CK_RWCOHORT_INIT | 8 ++-- doc/CK_RWCOHORT_INSTANCE | 6 ++- doc/CK_RWCOHORT_PROTOTYPE | 4 +- doc/CK_RWCOHORT_READ_LOCK | 6 ++- doc/CK_RWCOHORT_READ_UNLOCK | 6 ++- doc/CK_RWCOHORT_WRITE_LOCK | 6 ++- doc/CK_RWCOHORT_WRITE_UNLOCK | 6 ++- doc/ck_rwcohort | 87 ++++++++++++++++-------------------- 8 files changed, 70 insertions(+), 59 deletions(-) diff --git a/doc/CK_RWCOHORT_INIT b/doc/CK_RWCOHORT_INIT index 1d543e8..e097039 100644 --- a/doc/CK_RWCOHORT_INIT +++ b/doc/CK_RWCOHORT_INIT @@ -33,18 +33,20 @@ Concurrency Kit (libck, \-lck) .Sh SYNOPSIS .In ck_rwcohort.h -.Fn CK_RWCOHORT_INIT "COHORT_NAME cohort_name" "LOCK *lock" "unsigned int wait_limit" +.Fn CK_RWCOHORT_NEUTRAL_INIT "COHORT_NAME cohort_name" "LOCK *lock" +.Fn CK_RWCOHORT_RP_INIT "COHORT_NAME cohort_name" "LOCK *lock" "unsigned int wait_limit" +.Fn CK_RWCOHORT_WP_INIT "COHORT_NAME cohort_name" "LOCK *lock" "unsigned int wait_limit" .Sh DESCRIPTION This macro initializes the lock instance pointed to by the .Fa lock argument. Until a lock instance is initialized using the CK_RWCOHORT_INIT macro, any operations -involving it will have undefined behavior. The +involving it will have undefined behavior. Note that the .Fa wait_limit argument should only be used with reader-preference or writer-preference locks. For neutral locks, this argument should be excluded. If you are unsure of a value to use for the .Fa wait_limit -argument, you should use CK_RWCOHORT_DEFAULT_LOCAL_WAIT_LIMIT. +argument, you should use CK_RWCOHORT_STRATEGY_DEFAULT_LOCAL_WAIT_LIMIT. .Sh SEE ALSO .Xr ck_rwcohort 3 , .Xr CK_RWCOHORT_PROTOTYPE 3 , diff --git a/doc/CK_RWCOHORT_INSTANCE b/doc/CK_RWCOHORT_INSTANCE index c4e7f10..fcdfeea 100644 --- a/doc/CK_RWCOHORT_INSTANCE +++ b/doc/CK_RWCOHORT_INSTANCE @@ -33,7 +33,9 @@ Concurrency Kit (libck, \-lck) .Sh SYNOPSIS .In ck_cohort.h -.Fn CK_RWCOHORT_INSTANCE "COHORT_NAME cohort_name" +.Fn CK_RWCOHORT_NEUTRAL_INSTANCE "COHORT_NAME cohort_name" +.Fn CK_RWCOHORT_RP_INSTANCE "COHORT_NAME cohort_name" +.Fn CK_RWCOHORT_WP_INSTANCE "COHORT_NAME cohort_name" .Sh DESCRIPTION The user must use this macro to declare instances of lock types that they have defined using the @@ -47,7 +49,7 @@ CK_RWCOHORT_INSTANCE(foo) cohort; This macro should also be used when allocating memory for cohorts. For instance, to allocate a block of 4 cohorts: .br -CK_RWCOHORT_INSTANCE(foo) *cohorts = malloc(4 * sizeof(CK_RWCOHORT_INSTANCE(foo))); +CK_RWCOHORT_WP_INSTANCE(foo) *cohorts = malloc(4 * sizeof(CK_RWCOHORT_WP_INSTANCE(foo))); .Sh SEE ALSO .Xr ck_rwcohort 3 , .Xr CK_RWCOHORT_PROTOTYPE 3 , diff --git a/doc/CK_RWCOHORT_PROTOTYPE b/doc/CK_RWCOHORT_PROTOTYPE index cd8710d..fb4d25a 100644 --- a/doc/CK_RWCOHORT_PROTOTYPE +++ b/doc/CK_RWCOHORT_PROTOTYPE @@ -33,7 +33,9 @@ Concurrency Kit (libck, \-lck) .Sh SYNOPSIS .In ck_rwcohort.h -.Fn CK_RWCOHORT_PROTOTYPE "COHORT_NAME cohort_name" +.Fn CK_RWCOHORT_NEUTRAL_PROTOTYPE "COHORT_NAME cohort_name" +.Fn CK_RWCOHORT_RP_PROTOTYPE "COHORT_NAME cohort_name" +.Fn CK_RWCOHORT_WP_PROTOTYPE "COHORT_NAME cohort_name" .Sh DESCRIPTION The ck_rwcohort.h header file does not define any cohort types. Instead, the user must use the CK_RWCOHORT_PROTOTYPE macro to define any types they want to use. diff --git a/doc/CK_RWCOHORT_READ_LOCK b/doc/CK_RWCOHORT_READ_LOCK index 68e8da0..bf65853 100644 --- a/doc/CK_RWCOHORT_READ_LOCK +++ b/doc/CK_RWCOHORT_READ_LOCK @@ -33,7 +33,11 @@ Concurrency Kit (libck, \-lck) .Sh SYNOPSIS .In ck_cohort.h -.Fn CK_RWCOHORT_READ_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ +.Fn CK_RWCOHORT_NEUTRAL_READ_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ +"void *global_context" "void *local_context" +.Fn CK_RWCOHORT_RP_READ_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ +"void *global_context" "void *local_context" +.Fn CK_RWCOHORT_WP_READ_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ "void *global_context" "void *local_context" .Sh DESCRIPTION This call will acquire read-only permission from diff --git a/doc/CK_RWCOHORT_READ_UNLOCK b/doc/CK_RWCOHORT_READ_UNLOCK index 04f2ff9..4dd26e1 100644 --- a/doc/CK_RWCOHORT_READ_UNLOCK +++ b/doc/CK_RWCOHORT_READ_UNLOCK @@ -33,7 +33,11 @@ Concurrency Kit (libck, \-lck) .Sh SYNOPSIS .In ck_cohort.h -.Fn CK_RWCOHORT_READ_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ +.Fn CK_RWCOHORT_NEUTRAL_READ_UNLOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ +"void *global_context" "void *local_context" +.Fn CK_RWCOHORT_RP_READ_UNLOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ +"void *global_context" "void *local_context" +.Fn CK_RWCOHORT_WP_READ_UNLOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ "void *global_context" "void *local_context" .Sh DESCRIPTION This call will relinquish read-only permission to diff --git a/doc/CK_RWCOHORT_WRITE_LOCK b/doc/CK_RWCOHORT_WRITE_LOCK index 724d4b5..b34b0ee 100644 --- a/doc/CK_RWCOHORT_WRITE_LOCK +++ b/doc/CK_RWCOHORT_WRITE_LOCK @@ -33,7 +33,11 @@ Concurrency Kit (libck, \-lck) .Sh SYNOPSIS .In ck_cohort.h -.Fn CK_RWCOHORT_READ_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ +.Fn CK_RWCOHORT_NEUTRAL_WRITE_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ +"void *global_context" "void *local_context" +.Fn CK_RWCOHORT_RP_WRITE_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ +"void *global_context" "void *local_context" +.Fn CK_RWCOHORT_WP_WRITE_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ "void *global_context" "void *local_context" .Sh DESCRIPTION This call will acquire write permission for diff --git a/doc/CK_RWCOHORT_WRITE_UNLOCK b/doc/CK_RWCOHORT_WRITE_UNLOCK index 4d063f8..61cbcf6 100644 --- a/doc/CK_RWCOHORT_WRITE_UNLOCK +++ b/doc/CK_RWCOHORT_WRITE_UNLOCK @@ -33,7 +33,11 @@ Concurrency Kit (libck, \-lck) .Sh SYNOPSIS .In ck_cohort.h -.Fn CK_RWCOHORT_READ_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ +.Fn CK_RWCOHORT_NEUTRAL_WRITE_UNLOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ +"void *global_context" "void *local_context" +.Fn CK_RWCOHORT_RP_WRITE_UNLOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ +"void *global_context" "void *local_context" +.Fn CK_RWCOHORT_WP_WRITE_UNLOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\ "void *global_context" "void *local_context" .Sh DESCRIPTION This call will relinquish write permission for diff --git a/doc/ck_rwcohort b/doc/ck_rwcohort index d948db6..8948287 100644 --- a/doc/ck_rwcohort +++ b/doc/ck_rwcohort @@ -24,66 +24,55 @@ .\" SUCH DAMAGE. .\" .\" -.Dd February 24, 2013. -.Dt ck_cohort 3 +.Dd April 23, 2013. +.Dt ck_rwcohort 3 .Sh NAME -.Nm ck_cohort -.Nd generalized interface for lock cohorts +.Nm ck_rwcohort +.Nd generalized interface for reader-writer locks using cohort locks .Sh LIBRARY Concurrency Kit (libck, \-lck) .Sh SYNOPSIS -.In ck_cohort.h -.Fn CK_COHORT_PROTOTYPE "COHORT_NAME cohort_name" "LOCK_FXN global_lock_method" \ -"LOCK_FXN global_unlock_method" "LOCK_FXN local_lock_method" "LOCK_FXN local_unlock_method" -.Fn CK_COHORT_TRYLOCK_PROTOTYPE "COHORT_NAME cohort_name" \ -"LOCK_FXN global_lock_method" "LOCK_FXN global_unlock_method" \ -"BOOL_LOCK_FXN global_locked_method" BOOL_LOCK_FXN global_trylock_method" \ -"LOCK_FXN local_lock_method" "LOCK_FXN local_unlock_method" \ -"BOOL_LOCK_FXN local_locked_method" BOOL_LOCK_FXN local_trylock_method" -.Fn CK_COHORT_INSTANCE "COHORT_NAME cohort_name" -.Fn CK_COHORT_INIT "COHORT_NAME cohort_name" "ck_cohort *cohort" \ -"void *global_lock" "void *local_lock" "unsigned int pass_limit" -.Fn CK_COHORT_LOCK "COHORT_NAME cohort_name" "ck_cohort *cohort" \ +.In ck_rwcohort.h +In each of the following macros, "STRATEGY" should be replaced with either "NEUTRAL", "RP", or "WP" +depending on which locking strategy the user prefers. RP and WP represent reader preference and +writer preference, respectively, while NEUTRAL represents a strategy neutral to reads vs. writes. +.Fn CK_RWCOHORT_STRATEGY_PROTOTYPE "COHORT_NAME cohort_name" +.Fn CK_RWCOHORT_STRATEGY_NAME "COHORT_NAME cohort_name" +.Fn CK_RWCOHORT_STRATEGY_INSTANCE "COHORT_NAME cohort_name" +.Fn CK_RWCOHORT_STRATEGY_INIT "COHORT_NAME cohort_name" "RWCOHORT lock" "unsigned int wait_limit" +Note: the wait_limit argument should be omitted for locks using the neutral strategy +.Fn CK_RWCOHORT_STRATEGY_READ_LOCK "COHORT_NAME cohort_name" "RWCOHORT lock" "COHORT cohort" \ "void *global_context" "void *local_context" -.Fn CK_COHORT_UNLOCK "COHORT_NAME cohort_name" "ck_cohort *cohort" \ +.Fn CK_RWCOHORT_STRATEGY_READ_UNLOCK "COHORT_NAME cohort_name" "RWCOHORT lock" "COHORT cohort" \ "void *global_context" "void *local_context" -.Pp -Where LOCK_FXN refers to a method with the signature -.br -void(void *lock, void *context) -.br -BOOL_LOCK_FXN refers to a method with the signature -.br -bool(void *lock, void *context) -.Pp -The -.Fa context -argument in each signature is used to pass along any additional information that -the lock might need for its lock, unlock and trylock methods. The values for this -argument are provided to each call to -.Xr CK_COHORT_LOCK 3 , -.Xr CK_COHORT_UNLOCK 3 , -.Xr CK_COHORT_LOCKED 3 , -and -.Xr CK_COHORT_TRYLOCK 3 -. +.Fn CK_RWCOHORT_STRATEGY_WRITE_LOCK "COHORT_NAME cohort_name" "RWCOHORT lock" "COHORT cohort" \ +"void *global_context" "void *local_context" +.Fn CK_RWCOHORT_STRATEGY_WRITE_UNLOCK "COHORT_NAME cohort_name" "RWCOHORT lock" "COHORT cohort" \ +"void *global_context" "void *local_context" + +Arguments of type RWCOHORT must be pointers to structs defined using the +.Xr CK_RWCOHORT_STRATEGY_PROTOTYPE 3 +macro with the same strategy and cohort name as the current call. + +Arguments of type COHORT must be pointers to structs defined using the +.Xr CK_COHORT_PROTOTYPE 3 +macro. + .Sh DESCRIPTION -ck_cohort.h provides an interface for defining lock cohorts with -arbitrary lock types. Cohorts are a mechanism for coordinating -threads on NUMA architectures in order to reduce the frequency -with which a lock is passed between threads on different clusters. +ck_rwcohort.h provides an interface for defining reader-writer locks +that use cohort locks internally to increase performance on NUMA +architectures. See +.Xr ck_cohort 3 +for more information about cohort locks. .Pp -Before using a cohort, the user must define a cohort type using +Before using a reader-writer cohort lock, the user must define a cohort type using either the -.Fn CK_COHORT_PROTOTYPE -or the -.Fn CK_COHORT_TRYLOCK_PROTOTYPE -macros. These macros allow the user to specify the lock methods that -they would like the cohort to use. See the .Xr CK_COHORT_PROTOTYPE 3 -and +or the .Xr CK_COHORT_TRYLOCK_PROTOTYPE 3 -man pages for more details. +macros, and define a reader-writer lock type using the +.Xr CK_RWCOHORT_PROTOTYPE 3 +macro. .Pp .Sh EXAMPLE .Bd -literal -offset indent From 2b33d571486211294f8b1d2bd0b94573bbac0e62 Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Mon, 13 May 2013 18:03:36 -0400 Subject: [PATCH 18/20] ck_rwcohort: Removed extra whitespace --- include/ck_rwcohort.h | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/include/ck_rwcohort.h b/include/ck_rwcohort.h index 76fb9cb..2ae88bb 100644 --- a/include/ck_rwcohort.h +++ b/include/ck_rwcohort.h @@ -54,7 +54,6 @@ unsigned int write_barrier; \ unsigned int wait_limit; \ }; \ - \ CK_CC_INLINE static void \ ck_rwcohort_wp_##N##_init(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \ unsigned int wait_limit) \ @@ -65,7 +64,6 @@ ck_pr_barrier(); \ return; \ } \ - \ CK_CC_INLINE static void \ ck_rwcohort_wp_##N##_write_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ @@ -83,7 +81,6 @@ \ return; \ } \ - \ CK_CC_INLINE static void \ ck_rwcohort_wp_##N##_write_unlock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ @@ -92,7 +89,6 @@ (void)rw_cohort; \ CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ } \ - \ CK_CC_INLINE static void \ ck_rwcohort_wp_##N##_read_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ @@ -123,7 +119,6 @@ \ return; \ } \ - \ CK_CC_INLINE static void \ ck_rwcohort_wp_##N##_read_unlock(CK_RWCOHORT_WP_INSTANCE(N) *cohort) \ { \ @@ -152,7 +147,6 @@ unsigned int read_barrier; \ unsigned int wait_limit; \ }; \ - \ CK_CC_INLINE static void \ ck_rwcohort_rp_##N##_init(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \ unsigned int wait_limit) \ @@ -163,7 +157,6 @@ ck_pr_barrier(); \ return; \ } \ - \ CK_CC_INLINE static void \ ck_rwcohort_rp_##N##_write_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ @@ -194,7 +187,6 @@ \ return; \ } \ - \ CK_CC_INLINE static void \ ck_rwcohort_rp_##N##_write_unlock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \ @@ -202,7 +194,6 @@ (void)rw_cohort; \ CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ } \ - \ CK_CC_INLINE static void \ ck_rwcohort_rp_##N##_read_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ @@ -219,8 +210,6 @@ \ return; \ } \ - \ - \ CK_CC_INLINE static void \ ck_rwcohort_rp_##N##_read_unlock(CK_RWCOHORT_RP_INSTANCE(N) *cohort) \ { \ @@ -247,7 +236,6 @@ CK_RWCOHORT_NEUTRAL_INSTANCE(N) { \ unsigned int read_counter; \ }; \ - \ CK_CC_INLINE static void \ ck_rwcohort_neutral_##N##_init(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort) \ { \ @@ -255,7 +243,6 @@ ck_pr_barrier(); \ return; \ } \ - \ CK_CC_INLINE static void \ ck_rwcohort_neutral_##N##_write_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ @@ -267,7 +254,6 @@ } \ return; \ } \ - \ CK_CC_INLINE static void \ ck_rwcohort_neutral_##N##_write_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \ @@ -275,7 +261,6 @@ (void)rw_cohort; \ CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ } \ - \ CK_CC_INLINE static void \ ck_rwcohort_neutral_##N##_read_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ @@ -287,8 +272,6 @@ \ return; \ } \ - \ - \ CK_CC_INLINE static void \ ck_rwcohort_neutral_##N##_read_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *cohort) \ { \ From babae24a07aeee61175b8d3260eeef5f8792bfd2 Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Mon, 13 May 2013 18:07:57 -0400 Subject: [PATCH 19/20] ck_rwcohort: Added missing return statements and changed while (true) loops to for (;;) --- include/ck_rwcohort.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/include/ck_rwcohort.h b/include/ck_rwcohort.h index 2ae88bb..582b232 100644 --- a/include/ck_rwcohort.h +++ b/include/ck_rwcohort.h @@ -88,6 +88,7 @@ { \ (void)rw_cohort; \ CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ + return; \ } \ CK_CC_INLINE static void \ ck_rwcohort_wp_##N##_read_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \ @@ -97,7 +98,7 @@ unsigned int wait_count = 0; \ bool raised = false; \ \ - while (true) { \ + for (;;) { \ ck_pr_inc_uint(&rw_cohort->read_counter); \ if (CK_COHORT_LOCKED(N, cohort, global_context, local_context) == false) {\ break; \ @@ -123,6 +124,7 @@ ck_rwcohort_wp_##N##_read_unlock(CK_RWCOHORT_WP_INSTANCE(N) *cohort) \ { \ ck_pr_dec_uint(&cohort->read_counter); \ + return; \ } #define CK_RWCOHORT_WP_INITIALIZER { \ @@ -165,7 +167,7 @@ unsigned int wait_count = 0; \ bool raised = false; \ \ - while (true) { \ + for (;;) { \ CK_COHORT_LOCK(N, cohort, global_context, local_context); \ if (ck_pr_load_uint(&rw_cohort->read_counter) == 0) { \ break; \ @@ -193,6 +195,7 @@ { \ (void)rw_cohort; \ CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ + return; \ } \ CK_CC_INLINE static void \ ck_rwcohort_rp_##N##_read_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \ @@ -214,6 +217,7 @@ ck_rwcohort_rp_##N##_read_unlock(CK_RWCOHORT_RP_INSTANCE(N) *cohort) \ { \ ck_pr_dec_uint(&cohort->read_counter); \ + return; \ } #define CK_RWCOHORT_RP_INITIALIZER { \ @@ -260,6 +264,7 @@ { \ (void)rw_cohort; \ CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ + return; \ } \ CK_CC_INLINE static void \ ck_rwcohort_neutral_##N##_read_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \ @@ -276,6 +281,7 @@ ck_rwcohort_neutral_##N##_read_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *cohort) \ { \ ck_pr_dec_uint(&cohort->read_counter); \ + return; \ } #define CK_RWCOHORT_NEUTRAL_INITIALIZER { \ From 94655f57ff588fc0324fc79aaceda842d9466d96 Mon Sep 17 00:00:00 2001 From: Brendon Scheinman Date: Sun, 30 Jun 2013 18:49:30 -0400 Subject: [PATCH 20/20] ck_rwcohort: Completed sample code in documentation; --- doc/ck_rwcohort | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/doc/ck_rwcohort b/doc/ck_rwcohort index 8948287..5656071 100644 --- a/doc/ck_rwcohort +++ b/doc/ck_rwcohort @@ -81,12 +81,11 @@ macro. #include #include +#include #include -/* - * Create cohort methods with signatures that match - * the required signature - */ +/* Create cohort methods with signatures that match the required signature */ + static void ck_spinlock_lock_with_context(ck_spinlock_t *lock, void *context) { @@ -115,10 +114,15 @@ ck_spinlock_locked_with_context(ck_spinlock_t *lock, void *context) * the above methods for both its global and local locks */ CK_COHORT_PROTOTYPE(test_cohort, - ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context, ck_spinlock_locked_with_context + ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context, ck_spinlock_locked_with_context, ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context, ck_spinlock_locked_with_context) +/* define a reader-writer type using the same cohort type */ +CK_RWCOHORT_WP_PROTOTYPE(test_cohort) + static ck_spinlock_t global_lock = CK_SPINLOCK_INITIALIZER; +static CK_COHORT_INSTANCE(test_cohort) *cohorts; +static CK_RWCOHORT_WP_INSTANCE(test_cohort) rw_cohort = CK_RWCOHORT_WP_INITIALIZER; static unsigned int ready; static void * @@ -126,9 +130,9 @@ function(void *context) { CK_COHORT_INSTANCE(test_cohort) *cohort = context; - while (ready == 0); + while (ck_pr_load_uint(&ready) == 0); - while (ready > 0) { + while (ck_pr_load_uint(&ready) > 0) { /* * acquire the cohort lock before performing critical section. * note that we pass NULL for both the global and local context