You are on page 1of 234

!

"#" $%&'())*&'+
,& -)"'./'*
8rlan CeLz
1lm elerls
!oshua 8loch
!oseph 8owbeer
uavld Polmes
uoug Lea




















!""#$%&'($)(* +,%-($$#%&.)
/012345 4673689:43
/012365 9;<4673689:4:

ll !ava Concurrency ln racLlce
,&0*1
!"#$% ''
()$*+,$ %'''
!"# $" %&' $()& *""+ ,)))
-".' /,0123'& ,)4
56+7"#3'.81'7$& ,4
-.+/0$) 1 2 !"0)3#4,0'3" 1
1515 6 78$)9: ;)'$* <'=03)9 3* -3",4))$",9 >
15>5 ;$"$*'0= 3* ?.)$+#= @
9:;:9: /,23")$)78 <=3$)23' >?"6'&&"?& @
9:;:;: A)123)6)$B "C <".'3)78 @
9:;:@: A)123)C)'. !07.3)78 "C 5&B76(?"7"=& /4'7$& @
9:;:D: <"?' E'&2"7&)4' %&'? F7$'?C06'& D
15@5 A'=B= 3* ?.)$+#= C
9:@:9: A0C'$B !0G0?.& H
9:@:;: I)4'7'&& !0G0?.& J
9:@:@: >'?C"?1076' !0G0?.& J
15D5 ?.)$+#= +)$ EF$)9G.$)$ H
(+)0 !I J4"#+K$"0+L= 1M
-.+/0$) >5 ?.)$+# N+*$09 11
;:9: K(0$ )& L(?'0. A0C'$BM 9;
;:;: 5$"1)6)$B 9@
;:@: I"6+)78 9J
;:D: N=0?.)78 A$0$' #)$( I"6+& 9O
;:H: I)4'7'&& 07. >'?C"?1076' ;P
-.+/0$) @5 N.+)'"O PQR$,0= >@
@:9: Q)&)R)3)$B ;@
@:;: >=R3)60$)"7 07. /&602' ;J
@:@: L(?'0. -"7C)7'1'7$ ;S
@:D: F11=$0R)3)$B @9
@:H: A0C' >=R3)60$)"7 @@
-.+/0$) D5 -3K/3='"O PQR$,0= @S
D:9: T'&)87)78 0 L(?'0.&0C' -30&& @U
D:;: F7&$076' -"7C)7'1'7$ @O
D:@: T'3'80$)78 L(?'0. A0C'$B D9
D:D: 5..)78 V=76$)"703)$B $" /,)&$)78 L(?'0.&0C' -30&&'& DU
D:H: T"6=1'7$)78 AB76(?"7)G0$)"7 >"3)6)'& DO
-.+/0$) C5 ;4'L#'"O ;L3,B= C1
H:9: AB76(?"7)G'. -"33'6$)"7& H9
H:;: -"76=??'7$ -"33'6$)"7& HD
H:@: *3"6+)78 W='='& 07. $(' >?".=6'?6"7&=1'? >0$$'?7 HJ
H:D: *3"6+)78 07. F7$'??=2$)R3' <'$(".& HO
H:H: AB76(?"7)G'?& JP
H:J: *=)3.)78 07 /CC)6)'7$X A6030R3' E'&=3$ -06(' JD
A=110?B "C >0?$ F JO

lll <lndex
(+)0 !!I N0)4,04)'"O -3",4))$"0 6//L',+0'3"= S1
-.+/0$) T5 ?+=B E%$,40'3" S>
J:9: /,'6=$)78 L0&+& )7 L(?'0.& U;
J:;: L(' /,'6=$"? V?01'#"?+ UD
J:@: V)7.)78 /,23")$0R3' >0?033'3)&1 US
A=110?B S@
-.+/0$) S5 -+",$LL+0'3" +"# N.40#3G" HC
U:9: L0&+ -076'330$)"7 SH
U:;: A$"22)78 0 L(?'0.R0&'. A'?4)6' O@
U:@: !07.3)78 5R7"?103 L(?'0. L'?1)70$)"7 9PP
U:D: YQ< A(=$."#7 9P;
A=110?B 9P@
-.+/0$) H5 6//L9'"O ?.)$+# (33L= 1MD
S:9: F123)6)$ -"=23)78& *'$#''7 L0&+& 07. /,'6=$)"7 >"3)6)'& 9PD
S:;: A)G)78 L(?'0. >""3& 9PH
S:@: -"7C)8=?)78 L(?'0.>""3/,'6=$"? 9PJ
S:D: /,$'7.)78 L(?'0.>""3/,'6=$"? 999
S:H: >0?033'3)G)78 E'6=?&)4' 538"?)$(1& 99;
A=110?B 99J
-.+/0$) U5 VW! 6//L',+0'3"= 11S
O:9: K(B 0?' N%F& A)783'$(?'0.'.M 99U
O:;: A("?$?=77)78 N%F L0&+& 99O
O:@: I"78?=77)78 N%F L0&+& 9;9
O:D: A(0?'. T0$0 <".'3& 9;@
O:H: Z$('? V"?1& "C A)783'$(?'0.'. A=R&B&$'1& 9;H
A=110?B 9;J
(+)0 !!!I X'F$"$==Y ($)*3)K+",$Y +"# ?$=0'"O 1>S
-.+/0$) 1M5 6F3'#'"O X'F$"$== <+Z+)#= 1>H
9P:9: T'0.3"6+ 9;S
9P:;: 54").)78 07. T)087"&)78 T'0.3"6+& 9@@
9P:@: Z$('? I)4'7'&& !0G0?.& 9@H
A=110?B 9@J
-.+/0$) 115 ($)*3)K+",$ +"# N,+L+Q'L'09 1@S
99:9: L()7+)78 0R"=$ >'?C"?1076' 9@U
99:;: 51.0(3[& I0# 9@O
99:@: -"&$& F7$?".=6'. RB L(?'0.& 9D;
99:D: E'.=6)78 I"6+ -"7$'7$)"7 9DD
99:H: /,0123'\ -"120?)78 <02 >'?C"?1076' 9HP
99:J: E'.=6)78 -"7$',$ A#)$6( Z4'?('0. 9H9
A=110?B 9H;
-.+/0$) 1>5 ?$=0'"O -3",4))$"0 ()3O)+K= 1C@
9;:9: L'&$)78 C"? -"??'6$7'&& 9H@
9;:;: L'&$)78 C"? >'?C"?1076' 9JP
9;:@: 54").)78 >'?C"?1076' L'&$)78 >)$C033& 9JH
9;:D: -"123'1'7$0?B L'&$)78 522?"06('& 9JU
A=110?B 9JO
(+)0 !8I 6#F+",$# ?3/',= 1SM
-.+/0$) 1@ 2 E%/L','0 X3,B= 1S1
9@:9: I"6+ 07. E''7$?07$I"6+ 9U9
9@:;: >'?C"?1076' -"7&).'?0$)"7& 9UD
9@:@: V0)?7'&& 9UH

lv !ava Concurrency ln racLlce
9@:D: -(""&)78 *'$#''7 AB76(?"7)G'. 07. E''7$?07$I"6+ 9UJ
9@:H: E'0.#?)$' I"6+& 9UJ
A=110?B 9US
-.+/0$) 1D 2 ;4'L#'"O -4=03K N9",.)3"'Z$)= 1SU
9D:9: <0708)78 A$0$' T'2'7.'76' 9UO
9D:;: %&)78 -"7.)$)"7 W='='& 9S@
9D:@: /,23)6)$ -"7.)$)"7 ZR]'6$& 9SS
9D:D: 570$"1B "C 0 AB76(?"7)G'? 9SO
9D:H: 5R&$?06$W='='.AB76(?"7)G'? 9OP
9D:J: 5WA )7 Y040:=$)3:6"76=??'7$ AB76(?"7)G'? -30&&'& 9O;
A=110?B 9OD
-.+/0$) 1C5 603K', 8+)'+QL$= +"# [3"2QL3,B'"O N9",.)3"'Z+0'3" 1UC
9H:9: T)&0.407$08'& "C I"6+)78 9OH
9H:;: !0?.#0?' A=22"?$ C"? -"76=??'76B 9OJ
9H:@: 5$"1)6 Q0?)0R3' -30&&'& 9OS
9H:D: ^"7R3"6+)78 538"?)$(1& ;P9
A=110?B ;PJ
-.+/0$) 1T5 ?.$ \+F+ ]$K3)9 ]3#$L >MS
9J:9: K(0$ )& 0 <'1"?B <".'3X 07. K(B #"=3. F K07$ Z7'M ;PU
9J:;: >=R3)60$)"7 ;99
A=110?B ;9H
6//$"#'% 65 6""30+0'3"= *3) -3",4))$",9 >1T
5:9: -30&& 577"$0$)"7& ;9J
5:;: V)'3. 07. <'$(". 577"$0$)"7& ;9J
;'QL'3O)+/.9 >1S


v 18LlsLlng and lmage lndex
2/3./&4 "&0 ,5"4* ,&0*1
()$*+,$ F
LlsLlng 1. 8ad Way Lo SorL a LlsL. T"7[$ T" $()&: xlv
LlsLlng 2. Less Lhan CpLlmal Way Lo SorL a LlsL. xv
-.+/0$) 15 !"0)3#4,0'3" 11
LlsLlng 1.1. nonLhreadsafe Sequence CeneraLor. 3
llgure 1.1. unlucky LxecuLlon of !"#$%&'&()&"*&+,&-./$0)&. 3
LlsLlng 1.2. 1hreadsafe Sequence CeneraLor. 6
-.+/0$) >5 ?.)$+# N+*$09 11
LlsLlng 2.1. A SLaLeless ServleL. 13
LlsLlng 2.2. ServleL LhaL CounLs 8equesLs wlLhouL Lhe necessary SynchronlzaLlon. T"7[$ T" $()&: 14
LlsLlng 2.3. 8ace CondlLlon ln Lazy lnlLlallzaLlon. T"7[$ T" $()&: 13
LlsLlng 2.4. ServleL LhaL CounLs 8equesLs uslng 1.234*52"6. 16
LlsLlng 2.3. ServleL LhaL ALLempLs Lo Cache lLs LasL 8esulL wlLhouL AdequaLe ALomlclLy. T"7[$ T" $()&: 17
LlsLlng 2.6. ServleL LhaL Caches LasL 8esulL, 8uL wlLh unaccepLably oor Concurrency. T"7[$ T" $()&: 18
LlsLlng 2.7. Code LhaL would ueadlock lf lnLrlnslc Locks were noL 8eenLranL. 18
llgure 2.1. oor Concurrency of '7"*892"4:&;<$*.294:&9. 21
LlsLlng 2.8. ServleL LhaL Caches lLs LasL 8equesL and 8esulL. 21
-.+/0$) @5 N.+)'"O PQR$,0= >@
LlsLlng 3.1. Sharlng varlables wlLhouL SynchronlzaLlon. T"7[$ T" $()&: 23
LlsLlng 3.2. nonLhreadsafe MuLable lnLeger Polder. 24
LlsLlng 3.3. 1hreadsafe MuLable lnLeger Polder. 24
llgure 3.1. vlslblllLy CuaranLees for SynchronlzaLlon. 23
LlsLlng 3.4. CounLlng Sheep. 26
LlsLlng 3.3. ubllshlng an Cb[ecL. 27
LlsLlng 3.6. Allowlng lnLernal MuLable SLaLe Lo Lscape. T"7[$ T" $()&: 27
LlsLlng 3.7. lmpllclLly Allowlng Lhe .84# 8eference Lo Lscape. T"7[$ T" $()&: 28
LlsLlng 3.8. uslng a lacLory MeLhod Lo revenL Lhe .84# 8eference from Lscaplng uurlng ConsLrucLlon. 28
LlsLlng 3.9. 1hread ConflnemenL of Local rlmlLlve and 8eference varlables. 30
LlsLlng 3.10. uslng =89&$;52*$0 Lo Lnsure Lhread ConflnemenL. 30
LlsLlng 3.11. lmmuLable Class 8ullL CuL of MuLable underlylng Cb[ecLs. 32
LlsLlng 3.12. lmmuLable Polder for Cachlng a number and lLs lacLors. 33
LlsLlng 3.13. Cachlng Lhe LasL 8esulL uslng a volaLlle 8eference Lo an lmmuLable Polder Cb[ecL. 33
LlsLlng 3.14. ubllshlng an Cb[ecL wlLhouL AdequaLe SynchronlzaLlon. T"7[$ T" $()&: 33

vl !ava Concurrency ln racLlce
LlsLlng 3.13. Class aL 8lsk of lallure lf noL roperly ubllshed. 34

-.+/0$) D5 -3K/3='"O PQR$,0= @S
LlsLlng 4.1. Slmple 1hreadsafe CounLer uslng Lhe !ava MonlLor aLLern. 37
LlsLlng 4.2. uslng ConflnemenL Lo Lnsure 1hread SafeLy. 39
LlsLlng 4.3. Cuardlng SLaLe wlLh a rlvaLe Lock. 40
LlsLlng 4.4. MonlLorbased vehlcle 1racker lmplemenLaLlon. 42
LlsLlng 4.3. MuLable olnL Class Slmllar Lo >$/$+$?.+@24".. 42
LlsLlng 4.6. lmmuLable @24". class used by A&0&6$.4"6B&84*0&=9$*C&9. 42
LlsLlng 4.7. uelegaLlng 1hread SafeLy Lo a D2"*)99&".E$#8F$G. 43
LlsLlng 4.8. 8eLurnlng a SLaLlc Copy of Lhe LocaLlon SeL lnsLead of a "Llve" Cne. 43
LlsLlng 4.9. uelegaLlng 1hread SafeLy Lo MulLlple underlylng SLaLe varlables. 44
LlsLlng 4.10. number 8ange Class LhaL does noL SufflclenLly roLecL lLs lnvarlanLs. T"7[$ T" $()&: 43
LlsLlng 4.11. 1hreadsafe MuLable olnL Class. 46
LlsLlng 4.12. vehlcle 1racker LhaL Safely ubllshes underlylng SLaLe. 46
LlsLlng 4.13. LxLendlng B&*.29 Lo have a uLlfabsenL MeLhod. 47
LlsLlng 4.14. nonLhreadsafe ALLempL Lo lmplemenL uLlfabsenL. T"7[$ T" $()&: 48
LlsLlng 4.13. lmplemenLlng uLlfabsenL wlLh CllenLslde Locklng. 48
LlsLlng 4.16. lmplemenLlng uLlfabsenL uslng ComposlLlon. 49
-.+/0$) C5 ;4'L#'"O ;L3,B= C1
LlsLlng 3.1. Compound AcLlons on a B&*.29 LhaL may roduce Confuslng 8esulLs. 31
llgure 3.1. lnLerleavlng of H&.0$#. and A&0&.&0$#. LhaL Lhrows
199$7I";&-J).J%K2)";#L-*&G.42". 31
LlsLlng 3.2. Compound AcLlons on B&*.29 uslng CllenLslde Locklng. 32
LlsLlng 3.3. lLeraLlon LhaL may 1hrow 199$7I";&-J).J%K2)";#L-*&G.42". 32
LlsLlng 3.4. lLeraLlon wlLh CllenLslde Locklng. 32
LlsLlng 3.3. lLeraLlng a 54#. wlLh an I.&9$.29. 33
LlsLlng 3.6. lLeraLlon Pldden wlLhln SLrlng ConcaLenaLlon. T"7[$ T" $()&: 34
LlsLlng 3.7. D2"*)99&".F$G lnLerface. 36
LlsLlng 3.8. roducer and Consumer 1asks ln a ueskLop Search AppllcaLlon. 38
LlsLlng 3.9. SLarLlng Lhe ueskLop Search. 38
LlsLlng 3.10. 8esLorlng Lhe lnLerrupLed SLaLus so as noL Lo Swallow Lhe lnLerrupL. 60
LlsLlng 3.11. uslng D2)".A2?"5$.*8 for SLarLlng and SLopplng 1hreads ln 1lmlng 1esLs. 61
LlsLlng 3.12. uslng <).)9&=$#C Lo reload uaLa LhaL ls needed LaLer. 62
LlsLlng 3.13. Coerclng an unchecked =892?$M0& Lo a N)".43&L-*&G.42". 62
LlsLlng 3.14. uslng '&3$G829& Lo 8ound a CollecLlon. 64

vll 18LlsLlng and lmage lndex
LlsLlng 3.13. CoordlnaLlng CompuLaLlon ln a Cellular AuLomaLon wlLh 64
LlsLlng 3.16. lnlLlal Cache ALLempL uslng HashMap and SynchronlzaLlon. 66
llgure 3.2. oor Concurrency of 66
llgure 3.3. 1wo 1hreads CompuLlng Lhe Same value When uslng 67
LlsLlng 3.17. 8eplaclng HashMap wlLh ConcurrentHashMap. 67
llgure 3.4. unlucky 1lmlng LhaL could Cause Memorizer3 Lo CalculaLe Lhe Same value 1wlce. 68
LlsLlng 3.18. Memorlzlng Wrapper uslng FutureTask. 68
LlsLlng 3.19. llnal lmplemenLaLlon of Memorizer. 69
LlsLlng 3.20. lacLorlzlng ServleL LhaL Caches 8esulLs uslng Memorizer. 69
-.+/0$) T5 ?+=B E%$,40'3" S>
LlsLlng 6.1. SequenLlal Web Server. 72
LlsLlng 6.2. Web Server LhaL SLarLs a new 1hread for Lach 8equesL. 73
LlsLlng 6.3. L-&*).29 lnLerface. 74
LlsLlng 6.4. Web Server uslng a 1hread ool. 73
LlsLlng 6.3. L-&*).29 LhaL SLarLs a new 1hread for Lach 1ask. 73
LlsLlng 6.6. L-&*).29 LhaL LxecuLes 1asks Synchronously ln Lhe Calllng 1hread. 73
LlsLlng 6.7. Llfecycle MeLhods ln L-&*).29'&9/4*&. 77
LlsLlng 6.8. Web Server wlLh ShuLdown SupporL. 77
LlsLlng 6.9. Class lllusLraLlng Confuslng =43&9 8ehavlor. 78
LlsLlng 6.10. 8enderlng age LlemenLs SequenLlally. 79
LlsLlng 6.11. D$00$M0& and <).)9& lnLerfaces. 80
LlsLlng 6.12. uefaulL lmplemenLaLlon of "&?=$#C<29 ln =89&$;@220L-&*).29. 80
LlsLlng 6.13. WalLlng for lmage uownload wlLh <).)9&. 81
LlsLlng 6.14. O)&)&4"6<).)9& Class used 8y L-&*).29D23G0&.42"'&9/4*&. 82
LlsLlng 6.13. uslng D23G0&.42"'&9/4*& Lo 8ender age LlemenLs as Lhey 8ecome Avallable. 82
LlsLlng 6.16. leLchlng an AdverLlsemenL wlLh a 1lme 8udgeL. 83
LlsLlng 6.17. 8equesLlng 1ravel CuoLes under a 1lme 8udgeL. 84
-.+/0$) S5 -+",$LL+0'3" +"# N.40#3G" HC
LlsLlng 7.1. uslng a B20$.40& lleld Lo Pold CancellaLlon SLaLe. 86
LlsLlng 7.2. CeneraLlng a Second's WorLh of rlme numbers. 86
LlsLlng 7.3. unrellable CancellaLlon LhaL can Leave roducers SLuck ln a 8locklng CperaLlon. T"7[$ T" $()&: 87
LlsLlng 7.4. lnLerrupLlon MeLhods ln =89&$;. 87
LlsLlng 7.3. uslng lnLerrupLlon for CancellaLlon. 88
LlsLlng 7.6. ropagaLlng I".&99)G.&;L-*&G.42" Lo Callers. 89
LlsLlng 7.7. noncancelable 1ask LhaL 8esLores lnLerrupLlon 8efore LxlL. 90
LlsLlng 7.8. Schedullng an lnLerrupL on a 8orrowed 1hread. T"7[$ T" $()&: 90

vlll !ava Concurrency ln racLlce
LlsLlng 7.9. lnLerrupLlng a 1ask ln a uedlcaLed 1hread. 91
LlsLlng 7.10. Cancelllng a 1ask uslng <).)9&. 92
LlsLlng 7.11. LncapsulaLlng nonsLandard CancellaLlon ln a =89&$; by Cverrldlng I".&99)G.. 93
LlsLlng 7.12. LncapsulaLlng nonsLandard CancellaLlon ln a 1ask wlLh ,&?.$#C%29. 94
LlsLlng 7.13. roducerConsumer Logglng Servlce wlLh no ShuLdown SupporL. 93
LlsLlng 7.14. unrellable Way Lo Add ShuLdown SupporL Lo Lhe Logglng Servlce. 93
LlsLlng 7.13. Addlng 8ellable CancellaLlon Lo 526P94.&9. 96
LlsLlng 7.16. Logglng Servlce LhaL uses an L-&*).29'&9/4*&. 97
LlsLlng 7.17. ShuLdown wlLh olson lll. 97
LlsLlng 7.18. roducer 1hread for I";&-4"6'&9/4*&. 98
LlsLlng 7.19. Consumer 1hread for I";&-4"6'&9/4*&. 98
LlsLlng 7.20. uslng a rlvaLe L-&*).29 Whose LlfeLlme ls 8ounded by a MeLhod Call. 98
LlsLlng 7.21. L-&*).29'&9/4*& LhaL keeps 1rack of Cancelled 1asks AfLer ShuLdown. 99
LlsLlng 7.22. uslng =N$*C4"6L-&*).29'&9/4*& Lo Save unflnlshed 1asks for LaLer LxecuLlon. 100
LlsLlng 7.23. 1yplcal 1hreadpool Worker 1hread SLrucLure. 101
LlsLlng 7.24. !"*$)68.L-*&G.42"E$";0&9 lnLerface. 101
LlsLlng 7.23. !"*$)68.L-*&G.42"E$";0&9 LhaL Logs Lhe LxcepLlon. 101
LlsLlng 7.26. 8eglsLerlng a ShuLdown Pook Lo SLop Lhe Logglng Servlce. 103
-.+/0$) H5 6//L9'"O ?.)$+# (33L= 1MD
LlsLlng 8.1. 1ask LhaL ueadlocks ln a SlngleLhreaded L-&*).29. T"7[$ T" $()&: 103
LlsLlng 8.2. Ceneral ConsLrucLor for =89&$;@220L-&*).29. 107
LlsLlng 8.3. CreaLlng a llxedslzed 1hread ool wlLh a 8ounded Cueue and Lhe Callerruns SaLuraLlon ollcy. 109
LlsLlng 8.4. uslng a '&3$G829& Lo 1hroLLle 1ask Submlsslon. 109
LlsLlng 8.3. =89&$;<$*.297 lnLerface. 109
LlsLlng 8.6. CusLom 1hread lacLory. 110
LlsLlng 8.7. CusLom 1hread 8ase Class. 111
LlsLlng 8.8. Modlfylng an L-&*).29 CreaLed wlLh Lhe SLandard lacLorles. 111
LlsLlng 8.9. 1hread ool LxLended wlLh Logglng and 1lmlng. 112
LlsLlng 8.10. 1ransformlng SequenLlal LxecuLlon lnLo arallel LxecuLlon. 112
LlsLlng 8.11. 1ransformlng SequenLlal 1allrecurslon lnLo arallellzed 8ecurslon. 113
LlsLlng 8.12. WalLlng for 8esulLs Lo be CalculaLed ln arallel. 113
LlsLlng 8.13. AbsLracLlon for uzzles Llke Lhe "Slldlng 8locks uzzle". 113
LlsLlng 8.14. Llnk node for Lhe uzzle Solver lramework. 114
LlsLlng 8.13. SequenLlal uzzle Solver. 113
LlsLlng 8.16. ConcurrenL verslon of uzzle Solver. 113
LlsLlng 8.17. 8esulLbearlng LaLch used by D2"*)99&".@)::0&'20/&9. 116

lx 18LlsLlng and lmage lndex
LlsLlng 8.18. Solver LhaL 8ecognlzes when no SoluLlon LxlsLs. 116
-.+/0$) U5 VW! 6//L',+0'3"= 11S
llgure 9.1. ConLrol llow of a Slmple 8uLLon Cllck. 119
LlsLlng 9.1. lmplemenLlng SwingUtilities uslng an Executor. 120
LlsLlng 9.2. Executor 8ullL ALop SwingUtilities. 120
LlsLlng 9.3. Slmple LvenL LlsLener. 120
llgure 9.2. ConLrol llow wlLh SeparaLe Model and vlew Cb[ecLs. 121
LlsLlng 9.4. 8lndlng a Longrunnlng 1ask Lo a vlsual ComponenL. 121
LlsLlng 9.3. Longrunnlng 1ask wlLh user leedback. 122
LlsLlng 9.6. Cancelllng a Longrunnlng 1ask. 122
LlsLlng 9.7. 8ackground 1ask Class SupporLlng CancellaLlon, CompleLlon noLlflcaLlon, and rogress noLlflcaLlon.
124
LlsLlng 9.8. lnlLlaLlng a Longrunnlng, Cancellable 1ask wlLh K$*C692)";=$#C. 124
-.+/0$) 1M5 6F3'#'"O X'F$"$== <+Z+)#= 1>H
llgure 10.1. unlucky 1lmlng ln 5&%.N468.A&$;02*C. 128
LlsLlng 10.1. Slmple Lockorderlng ueadlock. T"7[$ T" $()&: 129
LlsLlng 10.2. uynamlc Lockorderlng ueadlock. T"7[$ T" $()&: 129
LlsLlng 10.3. lnduclng a Lock Crderlng Lo Avold ueadlock. 130
LlsLlng 10.4. urlver Loop LhaL lnduces ueadlock under 1yplcal CondlLlons. 131
LlsLlng 10.3. Lockorderlng ueadlock 8eLween CooperaLlng Cb[ecLs. uon'L uo Lhls. 132
LlsLlng 10.6. uslng Cpen Calls Lo Avoldlng ueadlock 8eLween CooperaLlng Cb[ecLs. 133
LlsLlng 10.7. orLlon of 1hread uump AfLer ueadlock. 133
-.+/0$) 115 ($)*3)K+",$ +"# N,+L+Q'L'09 1@S
llgure 11.1. Maxlmum uLlllzaLlon under Amdahl's Law for varlous SerlallzaLlon ercenLages. 140
LlsLlng 11.1. Serlallzed Access Lo a 1ask Cueue. 141
llgure 11.2. Comparlng Cueue lmplemenLaLlons. 141
LlsLlng 11.2. SynchronlzaLlon LhaL has no LffecL. T"7[$ T" $()&: 142
LlsLlng 11.3. CandldaLe for Lock Lllslon. 143
LlsLlng 11.4. Poldlng a Lock Longer Lhan necessary. 143
LlsLlng 11.3. 8educlng Lock uuraLlon. 143
LlsLlng 11.6. CandldaLe for Lock SpllLLlng. 146
LlsLlng 11.7. '&9/&9'.$.)# 8efacLored Lo use SpllL Locks. 146
LlsLlng 11.8. Pashbased Map uslng Lock SLrlplng. 148
llgure 11.3. Comparlng ScalablllLy of F$G lmplemenLaLlons. 130
-.+/0$) 1>5 ?$=0'"O -3",4))$"0 ()3O)+K= 1C@
LlsLlng 12.1. 8ounded 8uffer uslng '&3$G829&. 134

x !ava Concurrency ln racLlce
LlsLlng 12.2. 8aslc unlL 1esLs for K2)";&;K)%%&9. 134
LlsLlng 12.3. 1esLlng 8locklng and 8esponslveness Lo lnLerrupLlon. 136
LlsLlng 12.4. MedlumquallLy 8andom number CeneraLor SulLable for 1esLlng. 137
LlsLlng 12.3. roducerconsumer 1esL rogram for K2)";&;K)%%&9. 138
LlsLlng 12.6. roducer and Consumer Classes used ln @).=$C&=&#.. 138
LlsLlng 12.7. 1esLlng for 8esource Leaks. 139
LlsLlng 12.8. 1hread lacLory for 1esLlng =89&$;@220L-&*).29. 160
LlsLlng 12.9. 1esL MeLhod Lo verlfy 1hread ool Lxpanslon. 160
LlsLlng 12.10. uslng =89&$;+74&0; Lo CeneraLe More lnLerleavlngs. 160
LlsLlng 12.11. 8arrlerbased 1lmer. 161
llgure 12.1. =43&;@).=$C&=&#. wlLh varlous 8uffer CapaclLles. 162
LlsLlng 12.12. 1esLlng wlLh a 8arrlerbased 1lmer. 162
LlsLlng 12.13. urlver rogram for =43&;@).=$C&=&#.. 163
llgure 12.2. Comparlng 8locklng Cueue lmplemenLaLlons. 163
llgure 12.3. CompleLlon 1lme PlsLogram for =43&;@).=$C&=&#. wlLh uefaulL (nonfalr) and lalr
Semaphores. 164
llgure 12.4. CompleLlon 1lme PlsLogram for =43&;@).=$C&=&#. wlLh SlnglelLem 8uffers. 164
llgure 12.3. 8esulLs 8lased by uynamlc CompllaLlon. 163
-.+/0$) 1@ 2 E%/L','0 X3,B= 1S1
LlsLlng 13.1. 52*C lnLerface. 171
LlsLlng 13.2. Cuardlng Cb[ecL SLaLe uslng N&&".9$".52*C. 171
LlsLlng 13.3. Avoldlng Lockorderlng ueadlock uslng .9702*C. 173
LlsLlng 13.4. Locklng wlLh a 1lme 8udgeL. 173
LlsLlng 13.3. lnLerrupLlble Lock AcqulslLlon. 173
llgure 13.1. lnLrlnslc Locklng versus N&&".9$".52*C erformance on !ava 3.0 and !ava 6. 174
llgure 13.2. lalr versus nonfalr Lock erformance. 173
LlsLlng 13.6. N&$;P94.&52*C lnLerface. 176
LlsLlng 13.7. Wrapplng a F$G wlLh a 8eadwrlLe Lock. 178
llgure 13.3. 8eadwrlLe Lock erformance. 178
-.+/0$) 1D 2 ;4'L#'"O -4=03K N9",.)3"'Z$)= 1SU
LlsLlng 14.1. SLrucLure of 8locklng SLaLedependenL AcLlons. 179
LlsLlng 14.2. 8ase Class for 8ounded 8uffer lmplemenLaLlons. 180
LlsLlng 14.3. 8ounded 8uffer LhaL 8alks When recondlLlons are noL MeL. 180
LlsLlng 14.4. CllenL Loglc for Calllng H9)3G7K2)";&;K)%%&9. 181
llgure 14.1. 1hread Cversleeplng 8ecause Lhe CondlLlon 8ecame 1rue !usL AfLer lL WenL Lo Sleep. 181
LlsLlng 14.3. 8ounded 8uffer uslng Crude 8locklng. 182

xl 18LlsLlng and lmage lndex
LlsLlng 14.6. 8ounded 8uffer uslng CondlLlon Cueues. 183
LlsLlng 14.7. Canonlcal lorm for SLaLedependenL MeLhods. 184
LlsLlng 14.8. uslng CondlLlonal noLlflcaLlon ln K2)";&;K)%%&9+G).. 186
LlsLlng 14.9. 8ecloseable CaLe uslng P$4. and ,2.4%7$00. 187
LlsLlng 14.10. D2";4.42" lnLerface. 188
LlsLlng 14.11. 8ounded 8uffer uslng LxpllclL CondlLlon varlables. 189
LlsLlng 14.12. CounLlng Semaphore lmplemenLed uslng 52*C. 190
LlsLlng 14.13. Canonlcal lorms for AcqulslLlon and 8elease ln ACS. 191
LlsLlng 14.14. 8lnary LaLch uslng 1M#.9$*.O)&)&;'7"*892"4:&9. 192
LlsLlng 14.13. .971*()49& lmplemenLaLlon lrom nonfalr N&&".9$".52*C. 193
LlsLlng 14.16. .97$*()49&#8$9&; and .979&0&$#&#8$9&; from '&3$G829&. 193
-.+/0$) 1C5 603K', 8+)'+QL$= +"# [3"2QL3,B'"O N9",.)3"'Z+0'3" 1UC
LlsLlng 13.1. SlmulaLed CAS CperaLlon. 197
LlsLlng 13.2. nonblocklng CounLer uslng CAS. 197
LlsLlng 13.3. reservlng MulLlvarlable lnvarlanLs uslng CAS. 199
llgure 13.1. 52*C and 1.234*I".&6&9 erformance under Plgh ConLenLlon. 200
llgure 13.2. 52*C and 1.234*I".&6&9 erformance under ModeraLe ConLenLlon. 200
LlsLlng 13.4. 8andom number CeneraLor uslng N&&".9$".52*C. 200
LlsLlng 13.3. 8andom number CeneraLor uslng 1.234*I".&6&9. 201
LlsLlng 13.6. nonblocklng SLack uslng 1relber's AlgorlLhm (1relber, 1986). 203
llgure 13.3. Cueue wlLh 1wo LlemenLs ln CulescenL SLaLe. 203
llgure 13.4. Cueue ln lnLermedlaLe SLaLe uurlng lnserLlon. 204
llgure 13.3. Cueue Agaln ln CulescenL SLaLe AfLer lnserLlon ls CompleLe. 204
LlsLlng 13.7. lnserLlon ln Lhe MlchaelScoLL nonblocklng Cueue AlgorlLhm (Mlchael and ScoLL, 1996). 203
LlsLlng 13.8. uslng ALomlc lleld updaLers ln D2"*)99&".54"C&;O)&)&. 203
-.+/0$) 1T5 ?.$ \+F+ ]$K3)9 ]3#$L >MS
llgure 16.1. lnLerleavlng Showlng 8eorderlng ln @2##4M0&N&29;&94"6. 208
LlsLlng 16.1. lnsufflclenLly Synchronlzed rogram LhaL can have Surprlslng 8esulLs. T"7[$ T" $()&: 209
llgure 16.2. lllusLraLlon of Pappensbefore ln Lhe !ava Memory Model. 210
LlsLlng 16.2. lnner Class of <).)9&=$#C lllusLraLlng SynchronlzaLlon lggybacklng. 211
LlsLlng 16.3. unsafe Lazy lnlLlallzaLlon. uon'L uo Lhls. 212
LlsLlng 16.4. 1hreadsafe Lazy lnlLlallzaLlon. 213
LlsLlng 16.3. Lager lnlLlallzaLlon. 213
LlsLlng 16.6. Lazy lnlLlallzaLlon Polder Class ldlom. 213
LlsLlng 16.7. uoublecheckedlocklng AnLlpaLLern. T"7[$ T" $()&: 214
LlsLlng 16.8. lnlLlallzaLlon SafeLy for lmmuLable Cb[ecLs. 213

xll !ava Concurrency ln racLlce


xlll 28reface
-)*6"'*
AL Lhls wrlLlng, mulLlcore processors are [usL now becomlng lnexpenslve enough for mldrange deskLop sysLems. noL
colncldenLally, many developmenL Leams are noLlclng more and more LhreadlngrelaLed bug reporLs ln Lhelr pro[ecLs. ln
a recenL posL on Lhe neL8eans developer slLe, one of Lhe core malnLalners observed LhaL a slngle class had been
paLched over 14 Llmes Lo flx LhreadlngrelaLed problems. ulon Almaer, former edlLor of 1heServerSlde, recenLly blogged
(afLer a palnful debugglng sesslon LhaL ulLlmaLely revealed a Lhreadlng bug) LhaL mosL !ava programs are so rlfe wlLh
concurrency bugs LhaL Lhey work only "by accldenL".
lndeed, developlng, LesLlng and debugglng mulLlLhreaded programs can be exLremely dlfflculL because concurrency bugs
do noL manlfesL Lhemselves predlcLably. And when Lhey do surface, lL ls ofLen aL Lhe worsL posslble Llme ln producLlon,
under heavy load.
Cne of Lhe challenges of developlng concurrenL programs ln !ava ls Lhe mlsmaLch beLween Lhe concurrency feaLures
offered by Lhe plaLform and how developers need Lo Lhlnk abouL concurrency ln Lhelr programs. 1he language provldes
lowlevel mechanlsms such as synchronlzaLlon and condlLlon walLs, buL Lhese mechanlsms musL be used conslsLenLly Lo
lmplemenL appllcaLlonlevel proLocols or pollcles. WlLhouL such pollcles, lL ls all Loo easy Lo creaLe programs LhaL
complle and appear Lo work buL are neverLheless broken. Many oLherwlse excellenL books on concurrency fall shorL of
Lhelr goal by focuslng excesslvely on lowlevel mechanlsms and Als raLher Lhan deslgnlevel pollcles and paLLerns.
!ava 3.0 ls a huge sLep forward for Lhe developmenL of concurrenL appllcaLlons ln !ava, provldlng new hlgherlevel
componenLs and addlLlonal lowlevel mechanlsms LhaL make lL easler for novlces and experLs allke Lo bulld concurrenL
appllcaLlons. 1he auLhors are Lhe prlmary members of Lhe !C LxperL Croup LhaL creaLed Lhese faclllLles, ln addlLlon Lo
descrlblng Lhelr behavlor and feaLures, we presenL Lhe underlylng deslgn paLLerns and anLlclpaLed usage scenarlos LhaL
moLlvaLed Lhelr lncluslon ln Lhe plaLform llbrarles.
Cur goal ls Lo glve readers a seL of deslgn rules and menLal models LhaL make lL easler and more fun Lo bulld correcL,
performanL concurrenL classes and appllcaLlons ln !ava.
We hope you en[oy !ava Concurrency ln racLlce.
8rlan CoeLz
WllllsLon, v1
March 2006
7%8 .% 93* .:/3 ;%%<
1o address Lhe absLracLlon mlsmaLch beLween !ava's lowlevel mechanlsms and Lhe necessary deslgnlevel pollcles, we
presenL a slmpllfled seL of rules for wrlLlng concurrenL programs. LxperLs may look aL Lhese rules and say "Pmm, LhaL's
noL enLlrely Lrue: class C ls Lhreadsafe even Lhough lL vlolaLes rule 8." Whlle lL ls posslble Lo wrlLe correcL programs LhaL
break our rules, dolng so requlres a deep undersLandlng of Lhe lowlevel deLalls of Lhe !ava Memory Model, and we
wanL developers Lo be able Lo wrlLe correcL concurrenL programs wlLhouL havlng Lo masLer Lhese deLalls. ConslsLenLly
followlng our slmpllfled rules wlll produce correcL and malnLalnable concurrenL programs.
We assume Lhe reader already has some famlllarlLy wlLh Lhe baslc mechanlsms for concurrency ln !ava. !ava
Concurrency ln racLlce ls noL an lnLroducLlon Lo concurrency for LhaL, see Lhe Lhreadlng chapLer of any decenL
lnLroducLory volume, such as 1he !ava rogrammlng Language (Arnold eL al., 2003). nor ls lL an encyclopedlc reference
for All 1hlngs Concurrency for LhaL, see ConcurrenL rogrammlng ln !ava (Lea, 2000). 8aLher, lL offers pracLlcal deslgn
rules Lo asslsL developers ln Lhe dlfflculL process of creaLlng safe and performanL concurrenL classes. Where approprlaLe,
we crossreference relevanL secLlons of 1he !ava rogrammlng Language, ConcurrenL rogrammlng ln !ava, 1he !ava
Language SpeclflcaLlon (Cosllng eL al., 2003), and LffecLlve !ava (8loch, 2001) uslng Lhe convenLlons [!L n.m], [C! n.m],
[!LS n.m], and [L! lLem n].

AfLer Lhe lnLroducLlon (ChapLer 1), Lhe book ls dlvlded lnLo four parLs:
lundamenLals. arL l (ChapLers 23) focuses on Lhe baslc concepLs of concurrency and Lhread safeLy, and how Lo
compose Lhreadsafe classes ouL of Lhe concurrenL bulldlng blocks provlded by Lhe class llbrary. A "cheaL sheeL"
summarlzlng Lhe mosL lmporLanL of Lhe rules presenLed ln arL l appears on page 110.

xlv !ava Concurrency ln racLlce
ChapLers 2 (1hread SafeLy) and 3 (Sharlng Cb[ecLs) form Lhe foundaLlon for Lhe book. nearly all of Lhe rules on avoldlng
concurrency hazards, consLrucLlng Lhreadsafe classes, and verlfylng Lhread safeLy are here. 8eaders who prefer
"pracLlce" Lo "Lheory" may be LempLed Lo sklp ahead Lo arL ll, buL make sure Lo come back and read ChapLers 2 and 3
before wrlLlng any concurrenL code!
ChapLer 4 (Composlng Cb[ecLs) covers Lechnlques for composlng Lhreadsafe classes lnLo larger Lhreadsafe classes.
ChapLer 3 (8ulldlng 8locks) covers Lhe concurrenL bulldlng blocks Lhreadsafe collecLlons and synchronlzers provlded
by Lhe plaLform llbrarles.
SLrucLurlng ConcurrenL AppllcaLlons. arL ll (ChapLers 69) descrlbes how Lo explolL Lhreads Lo lmprove Lhe LhroughpuL
or responslveness of concurrenL appllcaLlons. ChapLer 6 (1ask LxecuLlon) covers ldenLlfylng parallellzable Lasks and
execuLlng Lhem wlLhln Lhe LaskexecuLlon framework. ChapLer 7 (CancellaLlon and ShuLdown) deals wlLh Lechnlques for
convlnclng Lasks and Lhreads Lo LermlnaLe before Lhey would normally do so, how programs deal wlLh cancellaLlon and
shuLdown ls ofLen one of Lhe facLors LhaL separaLe Lruly robusL concurrenL appllcaLlons from Lhose LhaL merely work.
ChapLer 8 (Applylng 1hread ools) addresses some of Lhe more advanced feaLures of Lhe LaskexecuLlon framework.
ChapLer 9 (Cul AppllcaLlons) focuses on Lechnlques for lmprovlng responslveness ln slngleLhreaded subsysLems.
Llveness, erformance, and 1esLlng. arL lll (ChapLers 1012) concerns lLself wlLh ensurlng LhaL concurrenL programs
acLually do whaL you wanL Lhem Lo do and do so wlLh accepLable performance. ChapLer 10 (Avoldlng Llveness Pazards)
descrlbes how Lo avold llveness fallures LhaL can prevenL programs from maklng forward progress. ChapLer 11
(erformance and ScalablllLy) covers Lechnlques for lmprovlng Lhe performance and scalablllLy of concurrenL code.
ChapLer 12 (1esLlng ConcurrenL rograms) covers Lechnlques for LesLlng concurrenL code for boLh correcLness and
performance.
Advanced 1oplcs. arL lv (ChapLers 1316) covers Loplcs LhaL are llkely Lo be of lnLeresL only Lo experlenced developers:
expllclL locks, aLomlc varlables, nonblocklng algorlLhms, and developlng cusLom synchronlzers.
$%0* =1"5>?*3
Whlle many of Lhe general concepLs ln Lhls book are appllcable Lo verslons of !ava prlor Lo !ava 3.0 and even Lo non!ava
envlronmenLs, mosL of Lhe code examples (and all Lhe sLaLemenLs abouL Lhe !ava Memory Model) assume !ava 3.0 or
laLer. Some of Lhe code examples may use llbrary feaLures added ln !ava 6.
1he code examples have been compressed Lo reduce Lhelr slze and Lo hlghllghL Lhe relevanL porLlons. 1he full verslons
of Lhe code examples, as well as supplemenLary examples and erraLa, are avallable from Lhe book's webslLe,
http://www.javaconcurrencyinpractice.com.
1he code examples are of Lhree sorLs: "good" examples, "noL so good" examples, and "bad" examples. Cood examples
lllusLraLe Lechnlques LhaL should be emulaLed. 8ad examples lllusLraLe Lechnlques LhaL should deflnlLely noL be
emulaLed, and are ldenLlfled wlLh a "Mr. ?uk" lcon
[1]
Lo make lL clear LhaL Lhls ls "Loxlc" code (see LlsLlng 1). noLsogood
examples lllusLraLe Lechnlques LhaL are noL necessarlly wrong buL are fraglle, rlsky, or perform poorly, and are decoraLed
wlLh a "Mr. Could8ePappler" lcon as ln LlsLlng 2.
[1] Mr. ?uk ls a reglsLered Lrademark of Lhe Chlldren's PosplLal of lLLsburgh and appears by permlsslon.
2/3./&4 @A ;"0 B"+ .% C%). " 2/3.A !"#$% !" %&'()


G)M04* R= &-.&";# D23G$9$M0&RS #)G&9 =TT /24; #29.U54#.R=T 04#.V W
XX ,&/&9 9&.)9"# .8& ?92"6 $"#?&9Y
'7#.&3+&-4.UZV[
\
Some readers may quesLlon Lhe role of Lhe "bad" examples ln Lhls book, afLer all, a book should show how Lo do Lhlngs
rlghL, noL wrong. 1he bad examples have Lwo purposes. 1hey lllusLraLe common plLfalls, buL more lmporLanLly Lhey
demonsLraLe how Lo analyze a program for Lhread safeLy and Lhe besL way Lo do LhaL ls Lo see Lhe ways ln whlch
Lhread safeLy ls compromlsed.

xv 28reface
2/3./&4 DA 2*33 .:"& E>./5"? B"+ .% C%). " 2/3.A


G)M04* R= &-.&";# D23G$9$M0&RS #)G&9 =TT /24; #29.U54#.R=T 04#.V W
%29 U4". 4]Z[ 4R^ZZZZZZ[ 4__V
;2,2.84"6UV[
D200&*.42"#+#29.U04#.V[
\
F'<&%8?*045*&.3
1hls book grew ouL of Lhe developmenL process for Lhe `$/$+).40+*2"*)99&". package LhaL was creaLed by Lhe !ava
CommunlLy rocess !S8 166 for lncluslon ln !ava 3.0. Many oLhers conLrlbuLed Lo !S8 166, ln parLlcular we Lhank MarLln
8uchholz for dolng all Lhe work relaLed Lo geLLlng Lhe code lnLo Lhe !uk, and all Lhe readers of Lhe *2"*)99&"*7a
4".&9&#. malllng llsL who offered Lhelr suggesLlons and feedback on Lhe drafL Als.
1hls book has been Lremendously lmproved by Lhe suggesLlons and asslsLance of a small army of revlewers, advlsors,
cheerleaders, and armchalr crlLlcs. We would llke Lo Lhank ulon Almaer, 1racy 8lallk, Clndy 8loch, MarLln 8uchholz, aul
ChrlsLmann, Cllff Cllck, SLuarL Palloway, uavld Povemeyer, !ason PunLer, Mlchael PunLer, !eremy PylLon, Pelnz kabuLz,
8oberL kuhar, 8amnlvas Laddad, !ared Levy, nlcole Lewls, vlcLor Luchangco, !eremy Manson, aul MarLln, 8erna
Masslnglll, Mlchael Maurer, 1ed neward, klrk epperdlne, 8lll ugh, Sam ullara, 8uss 8ufer, 8lll Scherer, !effrey Slegal,
8ruce 1aLe, Cll 1ene, aul 1yma, and members of Lhe Slllcon valley aLLerns Croup who, Lhrough many lnLeresLlng
Lechnlcal conversaLlons, offered guldance and made suggesLlons LhaL helped make Lhls book beLLer.
We are especlally graLeful Lo Cllff 8lffle, 8arry Payes, uawld kurzynlec, Angellka Langer, uoron 8a[wan, and 8lll venners,
who revlewed Lhe enLlre manuscrlpL ln excruclaLlng deLall, found bugs ln Lhe code examples, and suggesLed numerous
lmprovemenLs.
We Lhank kaLrlna Avery for a greaL copyedlLlng [ob and 8osemary Slmpson for produclng Lhe lndex under unreasonable
Llme pressure. We Lhank Aml uewar for dolng Lhe lllusLraLlons.
1hanks Lo Lhe whole Leam aL AddlsonWesley who helped make Lhls book a reallLy. Ann Sellers goL Lhe pro[ecL launched
and Creg uoench shepherded lL Lo a smooLh compleLlon, LllzabeLh 8yan gulded lL Lhrough Lhe producLlon process.
We would also llke Lo Lhank Lhe Lhousands of sofLware englneers who conLrlbuLed lndlrecLly by creaLlng Lhe sofLware
used Lo creaLe Lhls book, lncludlng 1Lx, LA1Lx, Adobe AcrobaL, G4*, 69$G, Adobe lllusLraLor, erl, Apache AnL, lnLelll!
luLA, Cnu emacs, Subverslon, 1orLolseSvn, and of course, Lhe !ava plaLform and class llbrarles.


1 38ChapLer 1 lnLroducLlon 1081.1. A (very) 8rlef PlsLory of Concurrency
$:">.*) @ ,&.)%0('./%&
WrlLlng correcL programs ls hard, wrlLlng correcL concurrenL programs ls harder. 1here are slmply more Lhlngs LhaL can
go wrong ln a concurrenL program Lhan ln a sequenLlal one. So, why do we boLher wlLh concurrency? 1hreads are an
lnescapable feaLure of Lhe !ava language, and Lhey can slmpllfy Lhe developmenL of complex sysLems by Lurnlng
compllcaLed asynchronous code lnLo slmpler sLralghLllne code. ln addlLlon, Lhreads are Lhe easlesL way Lo Lap Lhe
compuLlng power of mulLlprocessor sysLems. And, as processor counLs lncrease, explolLlng concurrency effecLlvely wlll
only become more lmporLanL.

2 !ava Concurrency ln racLlce
@A@A F GH*)+I ;)/*6 7/3.%)+ %6 $%&'())*&'+
ln Lhe anclenL pasL, compuLers dldn'L have operaLlng sysLems, Lhey execuLed a slngle program from beglnnlng Lo end,
and LhaL program had dlrecL access Lo all Lhe resources of Lhe machlne. noL only was lL dlfflculL Lo wrlLe programs LhaL
ran on Lhe bare meLal, buL runnlng only a slngle program aL a Llme was an lnefflclenL use of expenslve and scarce
compuLer resources.
CperaLlng sysLems evolved Lo allow more Lhan one program Lo run aL once, runnlng lndlvldual programs ln processes:
lsolaLed, lndependenLly execuLlng programs Lo whlch Lhe operaLlng sysLem allocaLes resources such as memory, flle
handles, and securlLy credenLlals. lf Lhey needed Lo, processes could communlcaLe wlLh one anoLher Lhrough a varleLy
of coarsegralned communlcaLlon mechanlsms: sockeLs, slgnal handlers, shared memory, semaphores, and flles.
Several moLlvaLlng facLors led Lo Lhe developmenL of operaLlng sysLems LhaL allowed mulLlple programs Lo execuLe
slmulLaneously:
8esource uLlllzaLlon. rograms someLlmes have Lo walL for exLernal operaLlons such as lnpuL or ouLpuL, and whlle
walLlng can do no useful work. lL ls more efflclenL Lo use LhaL walL Llme Lo leL anoLher program run.
lalrness. MulLlple users and programs may have equal clalms on Lhe machlne's resources. lL ls preferable Lo leL Lhem
share Lhe compuLer vla flnergralned Llme sllclng Lhan Lo leL one program run Lo compleLlon and Lhen sLarL anoLher.
Convenlence. lL ls ofLen easler or more deslrable Lo wrlLe several programs LhaL each perform a slngle Lask and have
Lhem coordlnaLe wlLh each oLher as necessary Lhan Lo wrlLe a slngle program LhaL performs all Lhe Lasks.
ln early Llmesharlng sysLems, each process was a vlrLual von neumann compuLer, lL had a memory space sLorlng boLh
lnsLrucLlons and daLa, execuLlng lnsLrucLlons sequenLlally accordlng Lo Lhe semanLlcs of Lhe machlne language, and
lnLeracLlng wlLh Lhe ouLslde world vla Lhe operaLlng sysLem Lhrough a seL of l/C prlmlLlves. lor each lnsLrucLlon
execuLed Lhere was a clearly deflned "nexL lnsLrucLlon", and conLrol flowed Lhrough Lhe program accordlng Lo Lhe rules
of Lhe lnsLrucLlon seL. nearly all wldely used programmlng languages Loday follow Lhls sequenLlal programmlng model,
where Lhe language speclflcaLlon clearly deflnes "whaL comes nexL" afLer a glven acLlon ls execuLed.
1he sequenLlal programmlng model ls lnLulLlve and naLural, as lL models Lhe way humans work: do one Lhlng aL a Llme,
ln sequence mosLly. CeL ouL of bed, puL on your baLhrobe, go downsLalrs and sLarL Lhe Lea. As ln programmlng
languages, each of Lhese realworld acLlons ls an absLracLlon for a sequence of flnergralned acLlons open Lhe
cupboard, selecL a flavor of Lea, measure some Lea lnLo Lhe poL, see lf Lhere's enough waLer ln Lhe LeakeLLle, lf noL puL
some more waLer ln, seL lL on Lhe sLove, Lurn Lhe sLove on, walL for Lhe waLer Lo boll, and so on. 1hls lasL sLep walLlng
for Lhe waLer Lo boll also lnvolves a degree of asynchrony. Whlle Lhe waLer ls heaLlng, you have a cholce of whaL Lo do
[usL walL, or do oLher Lasks ln LhaL Llme such as sLarLlng Lhe LoasL (anoLher asynchronous Lask) or feLchlng Lhe
newspaper, whlle remalnlng aware LhaL your aLLenLlon wlll soon be needed by Lhe LeakeLLle. 1he manufacLurers of
LeakeLLles and LoasLers know Lhelr producLs are ofLen used ln an asynchronous manner, so Lhey ralse an audlble slgnal
when Lhey compleLe Lhelr Lask. llndlng Lhe rlghL balance of sequenLlallLy and asynchrony ls ofLen a characLerlsLlc of
efflclenL people and Lhe same ls Lrue of programs.
1he same concerns (resource uLlllzaLlon, falrness, and convenlence) LhaL moLlvaLed Lhe developmenL of processes also
moLlvaLed Lhe developmenL of Lhreads. 1hreads allow mulLlple sLreams of program conLrol flow Lo coexlsL wlLhln a
process. 1hey share processwlde resources such as memory and flle handles, buL each Lhread has lLs own program
counLer, sLack, and local varlables. 1hreads also provlde a naLural decomposlLlon for explolLlng hardware parallellsm on
mulLlprocessor sysLems, mulLlple Lhreads wlLhln Lhe same program can be scheduled slmulLaneously on mulLlple Cus.
1hreads are someLlmes called llghLwelghL processes, and mosL modern operaLlng sysLems LreaL Lhreads, noL processes,
as Lhe baslc unlLs of schedullng. ln Lhe absence of expllclL coordlnaLlon, Lhreads execuLe slmulLaneously and
asynchronously wlLh respecL Lo one anoLher. Slnce Lhreads share Lhe memory address space of Lhelr ownlng process, all
Lhreads wlLhln a process have access Lo Lhe same varlables and allocaLe ob[ecLs from Lhe same heap, whlch allows flner
gralned daLa sharlng Lhan lnLerprocess mechanlsms. 8uL wlLhouL expllclL synchronlzaLlon Lo coordlnaLe access Lo
shared daLa, a Lhread may modlfy varlables LhaL anoLher Lhread ls ln Lhe mlddle of uslng, wlLh unpredlcLable resulLs.

3 38ChapLer 1 lnLroducLlon 1181.2. 8eneflLs of 1hreads
@ADA ;*&*6/.3 %6 J:)*"03
When used properly, Lhreads can reduce developmenL and malnLenance cosLs and lmprove Lhe performance of complex
appllcaLlons. 1hreads make lL easler Lo model how humans work and lnLeracL, by Lurnlng asynchronous workflows lnLo
mosLly sequenLlal ones. 1hey can also Lurn oLherwlse convoluLed code lnLo sLralghLllne code LhaL ls easler Lo wrlLe,
read, and malnLaln.
1hreads are useful ln Cul appllcaLlons for lmprovlng Lhe responslveness of Lhe user lnLerface, and ln server appllcaLlons
for lmprovlng resource uLlllzaLlon and LhroughpuL. 1hey also slmpllfy Lhe lmplemenLaLlon of Lhe !vM Lhe garbage
collecLor usually runs ln one or more dedlcaLed Lhreads. MosL nonLrlvlal !ava appllcaLlons rely Lo some degree on
Lhreads for Lhelr organlzaLlon.
@ADA@A =1>?%/./&4 K(?./>?* -)%'*33%)3
MulLlprocessor sysLems used Lo be expenslve and rare, found only ln large daLa cenLers and sclenLlflc compuLlng
faclllLles. 1oday Lhey are cheap and plenLlful, even lowend server and mldrange deskLop sysLems ofLen have mulLlple
processors. 1hls Lrend wlll only acceleraLe, as lL geLs harder Lo scale up clock raLes, processor manufacLurers wlll lnsLead
puL more processor cores on a slngle chlp. All Lhe ma[or chlp manufacLurers have begun Lhls LranslLlon, and we are
already seelng machlnes wlLh dramaLlcally hlgher processor counLs.
Slnce Lhe baslc unlL of schedullng ls Lhe Lhread, a program wlLh only one Lhread can run on aL mosL one processor aL a
Llme. Cn a Lwoprocessor sysLem, a slngleLhreaded program ls glvlng up access Lo half Lhe avallable Cu resources, on a
100processor sysLem, lL ls glvlng up access Lo 99. Cn Lhe oLher hand, programs wlLh mulLlple acLlve Lhreads can
execuLe slmulLaneously on mulLlple processors. When properly deslgned, mulLlLhreaded programs can lmprove
LhroughpuL by uLlllzlng avallable processor resources more effecLlvely.
uslng mulLlple Lhreads can also help achleve beLLer LhroughpuL on slngleprocessor sysLems. lf a program ls slngle
Lhreaded, Lhe processor remalns ldle whlle lL walLs for a synchronous l/C operaLlon Lo compleLe. ln a mulLlLhreaded
program, anoLher Lhread can sLlll run whlle Lhe flrsL Lhread ls walLlng for Lhe l/C Lo compleLe, allowlng Lhe appllcaLlon Lo
sLlll make progress durlng Lhe blocklng l/C. (1hls ls llke readlng Lhe newspaper whlle walLlng for Lhe waLer Lo boll, raLher
Lhan walLlng for Lhe waLer Lo boll before sLarLlng Lo read.)
@ADADA C/5>?/'/.+ %6 K%0*?/&4
lL ls ofLen easler Lo manage your Llme when you have only one Lype of Lask Lo perform (flx Lhese Lwelve bugs) Lhan
when you have several (flx Lhe bugs, lnLervlew replacemenL candldaLes for Lhe sysLem admlnlsLraLor, compleLe your
Leam's performance evaluaLlons, and creaLe Lhe slldes for your presenLaLlon nexL week). When you have only one Lype
of Lask Lo do, you can sLarL aL Lhe Lop of Lhe plle and keep worklng unLll Lhe plle ls exhausLed (or you are), you don'L
have Lo spend any menLal energy flgurlng ouL whaL Lo work on nexL. Cn Lhe oLher hand, managlng mulLlple prlorlLles
and deadllnes and swlLchlng from Lask Lo Lask usually carrles some overhead.
1he same ls Lrue for sofLware: a program LhaL processes one Lype of Lask sequenLlally ls slmpler Lo wrlLe, less error
prone, and easler Lo LesL Lhan one managlng mulLlple dlfferenL Lypes of Lasks aL once. Asslgnlng a Lhread Lo each Lype of
Lask or Lo each elemenL ln a slmulaLlon affords Lhe llluslon of sequenLlallLy and lnsulaLes domaln loglc from Lhe deLalls of
schedullng, lnLerleaved operaLlons, asynchronous l/C, and resource walLs. A compllcaLed, asynchronous workflow can
be decomposed lnLo a number of slmpler, synchronous workflows each runnlng ln a separaLe Lhread, lnLeracLlng only
wlLh each oLher aL speclflc synchronlzaLlon polnLs.
1hls beneflL ls ofLen explolLed by frameworks such as servleLs or 8Ml (8emoLe MeLhod lnvocaLlon). 1he framework
handles Lhe deLalls of requesL managemenL, Lhread creaLlon, and load balanclng, dlspaLchlng porLlons of Lhe requesL
handllng Lo Lhe approprlaLe appllcaLlon componenL aL Lhe approprlaLe polnL ln Lhe workflow. ServleL wrlLers do noL
need Lo worry abouL how many oLher requesLs are belng processed aL Lhe same Llme or wheLher Lhe sockeL lnpuL and
ouLpuL sLreams block, when a servleL's #&9/4*& meLhod ls called ln response Lo a web requesL, lL can process Lhe
requesL synchronously as lf lL were a slngleLhreaded program. 1hls can slmpllfy componenL developmenL and reduce
Lhe learnlng curve for uslng such frameworks.
@ADALA C/5>?/6/*0 7"&0?/&4 %6 F3+&':)%&%(3 =#*&.3
A server appllcaLlon LhaL accepLs sockeL connecLlons from mulLlple remoLe cllenLs may be easler Lo develop when each
connecLlon ls allocaLed lLs own Lhread and allowed Lo use synchronous l/C.

4 !ava Concurrency ln racLlce
lf an appllcaLlon goes Lo read from a sockeL when no daLa ls avallable, 9&$; blocks unLll some daLa ls avallable. ln a
slngleLhreaded appllcaLlon, Lhls means LhaL noL only does processlng Lhe correspondlng requesL sLall, buL processlng of
all requesLs sLalls whlle Lhe slngle Lhread ls blocked. 1o avold Lhls problem, slngleLhreaded server appllcaLlons are
forced Lo use nonblocklng l/C, whlch ls far more compllcaLed and errorprone Lhan synchronous l/C. Powever, lf each
requesL has lLs own Lhread, Lhen blocklng does noL affecL Lhe processlng of oLher requesLs.
PlsLorlcally, operaLlng sysLems placed relaLlvely low llmlLs on Lhe number of Lhreads LhaL a process could creaLe, as few
as several hundred (or even less). As a resulL, operaLlng sysLems developed efflclenL faclllLles for mulLlplexed l/C, such
as Lhe unlx #&0&*. and G200 sysLem calls, and Lo access Lhese faclllLles, Lhe !ava class llbrarles acqulred a seL of
packages (`$/$+"42) for nonblocklng l/C. Powever, operaLlng sysLem supporL for larger numbers of Lhreads has
lmproved slgnlflcanLly, maklng Lhe LhreadpercllenL model pracLlcal even for large numbers of cllenLs on some
plaLforms.
[1]

[1] 1he n1L Lhreads package, now parL of mosL Llnux dlsLrlbuLlons, was deslgned Lo supporL hundreds of Lhousands of Lhreads. nonblocklng l/C
has lLs own beneflLs, buL beLLer CS supporL for Lhreads means LhaL Lhere are fewer slLuaLlons for whlch lL ls essenLlal.
@ADAMA K%)* N*3>%&3/#* 93*) ,&.*)6"'*3
Cul appllcaLlons used Lo be slngleLhreaded, whlch meanL LhaL you had Lo elLher frequenLly poll LhroughouL Lhe code for
lnpuL evenLs (whlch ls messy and lnLruslve) or execuLe all appllcaLlon code lndlrecLly Lhrough a "maln evenL loop". lf
code called from Lhe maln evenL loop Lakes Loo long Lo execuLe, Lhe user lnLerface appears Lo "freeze" unLll LhaL code
flnlshes, because subsequenL user lnLerface evenLs cannoL be processed unLll conLrol ls reLurned Lo Lhe maln evenL loop.
Modern Cul frameworks, such as Lhe AW1 and Swlng LoolklLs, replace Lhe maln evenL loop wlLh an evenL dlspaLch
Lhread (Lu1). When a user lnLerface evenL such as a buLLon press occurs, appllcaLlondeflned evenL handlers are called
ln Lhe evenL Lhread. MosL Cul frameworks are slngleLhreaded subsysLems, so Lhe maln evenL loop ls effecLlvely sLlll
presenL, buL lL runs ln lLs own Lhread under Lhe conLrol of Lhe Cul LoolklL raLher Lhan Lhe appllcaLlon.
lf only shorLllved Lasks execuLe ln Lhe evenL Lhread, Lhe lnLerface remalns responslve slnce Lhe evenL Lhread ls always
able Lo process user acLlons reasonably qulckly. Powever, processlng a longrunnlng Lask ln Lhe evenL Lhread, such as
spellchecklng a large documenL or feLchlng a resource over Lhe neLwork, lmpalrs responslveness. lf Lhe user performs
an acLlon whlle Lhls Lask ls runnlng, Lhere ls a long delay before Lhe evenL Lhread can process or even acknowledge lL. 1o
add lnsulL Lo ln[ury, noL only does Lhe ul become unresponslve, buL lL ls lmposslble Lo cancel Lhe offendlng Lask even lf
Lhe ul provldes a cancel buLLon because Lhe evenL Lhread ls busy and cannoL handle Lhe cancel buLLonpress evenL unLll
Lhe lengLhy Lask compleLes! lf, however, Lhe longrunnlng Lask ls lnsLead execuLed ln a separaLe Lhread, Lhe evenL
Lhread remalns free Lo process ul evenLs, maklng Lhe ul more responslve.

3 38ChapLer 1 lnLroducLlon 1281.3. 8lsks of 1hreads
@ALA N/3<3 %6 J:)*"03
!ava's bullLln supporL for Lhreads ls a doubleedged sword. Whlle lL slmpllfles Lhe developmenL of concurrenL
appllcaLlons by provldlng language and llbrary supporL and a formal crossplaLform memory model (lL ls Lhls formal
crossplaLform memory model LhaL makes posslble Lhe developmenL of wrlLeonce, runanywhere concurrenL
appllcaLlons ln !ava), lL also ralses Lhe bar for developers because more programs wlll use Lhreads. When Lhreads were
more esoLerlc, concurrency was an "advanced" Loplc, now, malnsLream developers musL be aware of LhreadsafeLy
lssues.
@ALA@A C"6*.+ 7"O")03
1hread safeLy can be unexpecLedly subLle because, ln Lhe absence of sufflclenL synchronlzaLlon, Lhe orderlng of
operaLlons ln mulLlple Lhreads ls unpredlcLable and someLlmes surprlslng. !"#$%&'&()&"*& ln LlsLlng 1.1, whlch ls
supposed Lo generaLe a sequence of unlque lnLeger values, offers a slmple lllusLraLlon of how Lhe lnLerleavlng of acLlons
ln mulLlple Lhreads can lead Lo undeslrable resulLs. lL behaves correcLly ln a slngleLhreaded envlronmenL, buL ln a
mulLlLhreaded envlronmenL does noL.
2/3./&4 @A@A P%&.:)*"03"6* C*Q(*&'* R*&*)".%)A

b,2.=89&$;'$%&
G)M04* *0$## !"#$%&'&()&"*& W
G94/$.& 4". /$0)&[

Xcc N&.)9"# $ )"4()& /$0)&+ cX
G)M04* 4". 6&.,&-.UV W
9&.)9" /$0)&__[
\
\
1he problem wlLh !"#$%&'&()&"*& ls LhaL wlLh some unlucky Llmlng, Lwo Lhreads could call 6&.,&-. and recelve Lhe
same value. llgure 1.1 shows how Lhls can happen. 1he lncremenL noLaLlon, "&-.B$0)&__, may appear Lo be a slngle
operaLlon, buL ls ln facL Lhree separaLe operaLlons: read Lhe value, add one Lo lL, and wrlLe ouL Lhe new value. Slnce
operaLlons ln mulLlple Lhreads may be arblLrarlly lnLerleaved by Lhe runLlme, lL ls posslble for Lwo Lhreads Lo read Lhe
value aL Lhe same Llme, boLh see Lhe same value, and Lhen boLh add one Lo lL. 1he resulL ls LhaL Lhe same sequence
number ls reLurned from mulLlple calls ln dlfferenL Lhreads.
S/4()* @A@A 9&?('<+ =1*'(./%& %6 !"#$%&'&()&"*&+,&-./$0)&A


ulagrams llke llgure 1.1 deplcL posslble lnLerleavlngs of operaLlons ln dlfferenL Lhreads. ln Lhese dlagrams, Llme runs
from lefL Lo rlghL, and each llne represenLs Lhe acLlvlLles of a dlfferenL Lhread. 1hese lnLerleavlng dlagrams usually deplcL
Lhe worsL case
[2]
and are lnLended Lo show Lhe danger of lncorrecLly assumlng Lhlngs wlll happen ln a parLlcular order.
[2] AcLually, as we'll see ln ChapLer 3, Lhe worsL case can be even worse Lhan Lhese dlagrams usually show because of Lhe posslblllLy of reorderlng.
!"#$%&'&()&"*& uses a nonsLandard annoLaLlon: b,2.=89&$;'$%&. 1hls ls one of several cusLom annoLaLlons used
LhroughouL Lhls book Lo documenL concurrency properLles of classes and class members. (CLher classlevel annoLaLlons
used ln Lhls way are b=89&$;'$%& and bI33).$M0&, see Appendlx A for deLalls.) AnnoLaLlons documenLlng Lhread safeLy
are useful Lo mulLlple audlences. lf a class ls annoLaLed wlLh b=89&$;'$%&, users can use lL wlLh confldence ln a
mulLlLhreaded envlronmenL, malnLalners are puL on noLlce LhaL lL makes Lhread safeLy guaranLees LhaL musL be
preserved, and sofLware analysls Lools can ldenLlfy posslble codlng errors.

6 !ava Concurrency ln racLlce
!"#$%&'&()&"*& lllusLraLes a common concurrency hazard called a race condlLlon. WheLher or noL "&-.B$0)& reLurns a
unlque value when called from mulLlple Lhreads, as requlred by lLs speclflcaLlon, depends on how Lhe runLlme
lnLerleaves Lhe operaLlons whlch ls noL a deslrable sLaLe of affalrs.
8ecause Lhreads share Lhe same memory address space and run concurrenLly, Lhey can access or modlfy varlables LhaL
oLher Lhreads mlghL be uslng. 1hls ls a Lremendous convenlence, because lL makes daLa sharlng much easler Lhan would
oLher lnLerLhread communlcaLlons mechanlsms. 8uL lL ls also a slgnlflcanL rlsk: Lhreads can be confused by havlng daLa
change unexpecLedly. Allowlng mulLlple Lhreads Lo access and modlfy Lhe same varlables lnLroduces an elemenL of non
sequenLlallLy lnLo an oLherwlse sequenLlal programmlng model, whlch can be confuslng and dlfflculL Lo reason abouL.
lor a mulLlLhreaded program's behavlor Lo be predlcLable, access Lo shared varlables musL be properly coordlnaLed so
LhaL Lhreads do noL lnLerfere wlLh one anoLher. lorLunaLely, !ava provldes synchronlzaLlon mechanlsms Lo coordlnaLe
such access.
!"#$%&'&()&"*& can be flxed by maklng 6&.,&-. a #7"*892"4:&; meLhod, as shown ln '&()&"*& ln LlsLlng 1.2,
[3]
Lhus
prevenLlng Lhe unforLunaLe lnLeracLlon ln llgure 1.1. (LxacLly why Lhls works ls Lhe sub[ecL of ChapLers 2 and 3.)
[3] [Cuarded8y ls descrlbed ln SecLlon 2.4, lL documenLs Lhe synchronlzaLlon pollcy for Sequence.
2/3./&4 @ADA J:)*"03"6* C*Q(*&'* R*&*)".%)A
b=89&$;'$%&
G)M04* *0$## '&()&"*& W
bH)$9;&;K7Ud.84#dV G94/$.& 4". "&-.B$0)&[

G)M04* #7"*892"4:&; 4". 6&.,&-.UV W
9&.)9" "&-.B$0)&__[
\
\
ln Lhe absence of synchronlzaLlon, Lhe compller, hardware, and runLlme are allowed Lo Lake subsLanLlal llberLles wlLh
Lhe Llmlng and orderlng of acLlons, such as cachlng varlables ln reglsLers or processorlocal caches where Lhey are
Lemporarlly (or even permanenLly) lnvlslble Lo oLher Lhreads. 1hese Lrlcks are ln ald of beLLer performance and are
generally deslrable, buL Lhey place a burden on Lhe developer Lo clearly ldenLlfy where daLa ls belng shared across
Lhreads so LhaL Lhese opLlmlzaLlons do noL undermlne safeLy. (ChapLer 16 glves Lhe gory deLalls on exacLly whaL
orderlng guaranLees Lhe !vM makes and how synchronlzaLlon affecLs Lhose guaranLees, buL lf you follow Lhe rules ln
ChapLers 2 and 3, you can safely avold Lhese lowlevel deLalls.)
@ALADA 2/#*&*33 7"O")03
lL ls crlLlcally lmporLanL Lo pay aLLenLlon Lo Lhread safeLy lssues when developlng concurrenL code: safeLy cannoL be
compromlsed. 1he lmporLance of safeLy ls noL unlque Lo mulLlLhreaded programs slngleLhreaded programs also musL
Lake care Lo preserve safeLy and correcLness buL Lhe use of Lhreads lnLroduces addlLlonal safeLy hazards noL presenL ln
slngleLhreaded programs. Slmllarly, Lhe use of Lhreads lnLroduces addlLlonal forms of llveness fallure LhaL do noL occur
ln slngleLhreaded programs.
Whlle safeLy means "noLhlng bad ever happens", llveness concerns Lhe complemenLary goal LhaL "someLhlng good
evenLually happens". A llveness fallure occurs when an acLlvlLy geLs lnLo a sLaLe such LhaL lL ls permanenLly unable Lo
make forward progress. Cne form of llveness fallure LhaL can occur ln sequenLlal programs ls an lnadverLenL lnflnlLe
loop, where Lhe code LhaL follows Lhe loop never geLs execuLed. 1he use of Lhreads lnLroduces addlLlonal llveness rlsks.
lor example, lf Lhread A ls walLlng for a resource LhaL Lhread 8 holds excluslvely, and 8 never releases lL, A wlll walL
forever. ChapLer 10 descrlbes varlous forms of llveness fallures and how Lo avold Lhem, lncludlng deadlock (SecLlon
10.1), sLarvaLlon (SecLlon 10.3.1), and llvelock (SecLlon 10.3.3). Llke mosL concurrency bugs, bugs LhaL cause llveness
fallures can be eluslve because Lhey depend on Lhe relaLlve Llmlng of evenLs ln dlfferenL Lhreads, and Lherefore do noL
always manlfesL Lhemselves ln developmenL or LesLlng.
@ALALA -*)6%)5"&'* 7"O")03
8elaLed Lo llveness ls performance. Whlle llveness means LhaL someLhlng good evenLually happens, evenLually may noL
be good enough we ofLen wanL good Lhlngs Lo happen qulckly. erformance lssues subsume a broad range of
problems, lncludlng poor servlce Llme, responslveness, LhroughpuL, resource consumpLlon, or scalablllLy. !usL as wlLh
safeLy and llveness, mulLlLhreaded programs are sub[ecL Lo all Lhe performance hazards of slngleLhreaded programs,
and Lo oLhers as well LhaL are lnLroduced by Lhe use of Lhreads.

7 38ChapLer 1 lnLroducLlon 1281.3. 8lsks of 1hreads
ln well deslgned concurrenL appllcaLlons Lhe use of Lhreads ls a neL performance galn, buL Lhreads neverLheless carry
some degree of runLlme overhead. ConLexL swlLches when Lhe scheduler suspends Lhe acLlve Lhread Lemporarlly so
anoLher Lhread can run are more frequenL ln appllcaLlons wlLh many Lhreads, and have slgnlflcanL cosLs: savlng and
resLorlng execuLlon conLexL, loss of locallLy, and Cu Llme spenL schedullng Lhreads lnsLead of runnlng Lhem. When
Lhreads share daLa, Lhey musL use synchronlzaLlon mechanlsms LhaL can lnhlblL compller opLlmlzaLlons, flush or
lnvalldaLe memory caches, and creaLe synchronlzaLlon Lrafflc on Lhe shared memory bus. All Lhese facLors lnLroduce
addlLlonal performance cosLs, ChapLer 11 covers Lechnlques for analyzlng and reduclng Lhese cosLs.

8 !ava Concurrency ln racLlce
@AMA J:)*"03 ")* =#*)+8:*)*
Lven lf your program never expllclLly creaLes a Lhread, frameworks may creaLe Lhreads on your behalf, and code called
from Lhese Lhreads musL be Lhreadsafe. 1hls can place a slgnlflcanL deslgn and lmplemenLaLlon burden on developers,
slnce developlng Lhreadsafe classes requlres more care and analysls Lhan developlng nonLhreadsafe classes.
Lvery !ava appllcaLlon uses Lhreads. When Lhe !vM sLarLs, lL creaLes Lhreads for !vM housekeeplng Lasks (garbage
collecLlon, flnallzaLlon) and a maln Lhread for runnlng Lhe 3$4" meLhod. 1he AW1 (AbsLracL Wlndow 1oolklL) and Swlng
user lnLerface frameworks creaLe Lhreads for managlng user lnLerface evenLs. =43&9 creaLes Lhreads for execuLlng
deferred Lasks. ComponenL frameworks, such as servleLs and 8Ml creaLe pools of Lhreads and lnvoke componenL
meLhods ln Lhese Lhreads.
lf you use Lhese faclllLles as many developers do you have Lo be famlllar wlLh concurrency and Lhread safeLy, because
Lhese frameworks creaLe Lhreads and call your componenLs from Lhem. lL would be nlce Lo belleve LhaL concurrency ls
an "opLlonal" or "advanced" language feaLure, buL Lhe reallLy ls LhaL nearly all !ava appllcaLlons are mulLlLhreaded and
Lhese frameworks do noL lnsulaLe you from Lhe need Lo properly coordlnaLe access Lo appllcaLlon sLaLe.
When concurrency ls lnLroduced lnLo an appllcaLlon by a framework, lL ls usually lmposslble Lo resLrlcL Lhe concurrency
awareness Lo Lhe framework code, because frameworks by Lhelr naLure make callbacks Lo appllcaLlon componenLs LhaL
ln Lurn access appllcaLlon sLaLe. Slmllarly, Lhe need for Lhread safeLy does noL end wlLh Lhe componenLs called by Lhe
framework lL exLends Lo all code paLhs LhaL access Lhe program sLaLe accessed by Lhose componenLs. 1hus, Lhe need
for Lhread safeLy ls conLaglous.
lrameworks lnLroduce concurrency lnLo appllcaLlons by calllng appllcaLlon componenLs from framework Lhreads.
ComponenLs lnvarlably access appllcaLlon sLaLe, Lhus requlrlng LhaL all code paLhs accesslng LhaL sLaLe be Lhreadsafe.
1he faclllLles descrlbed below all cause appllcaLlon code Lo be called from Lhreads noL managed by Lhe appllcaLlon.
Whlle Lhe need for Lhread safeLy may sLarL wlLh Lhese faclllLles, lL rarely ends Lhere, lnsLead, lL rlpples Lhrough Lhe
appllcaLlon.
1lmer. =43&9 ls a convenlence mechanlsm for schedullng Lasks Lo run aL a laLer Llme, elLher once or perlodlcally. 1he
lnLroducLlon of a =43&9 can compllcaLe an oLherwlse sequenLlal program, because =43&9=$#Cs are execuLed ln a Lhread
managed by Lhe =43&9, noL Lhe appllcaLlon. lf a =43&9=$#C accesses daLa LhaL ls also accessed by oLher appllcaLlon
Lhreads, Lhen noL only musL Lhe =43&9=$#C do so ln a Lhreadsafe manner, buL so musL any oLher classes LhaL access
LhaL daLa. CfLen Lhe easlesL way Lo achleve Lhls ls Lo ensure LhaL ob[ecLs accessed by Lhe =43&9=$#C are Lhemselves
Lhreadsafe, Lhus encapsulaLlng Lhe Lhread safeLy wlLhln Lhe shared ob[ecLs.
ServleLs and !avaServer ages (!Ss). 1he servleLs framework ls deslgned Lo handle all Lhe lnfrasLrucLure of deploylng a
web appllcaLlon and dlspaLchlng requesLs from remoLe P11 cllenLs. A requesL arrlvlng aL Lhe server ls dlspaLched,
perhaps Lhrough a chaln of fllLers, Lo Lhe approprlaLe servleL or !S. Lach servleL represenLs a componenL of appllcaLlon
loglc, and ln hlghvolume web slLes, mulLlple cllenLs may requlre Lhe servlces of Lhe same servleL aL once. 1he servleLs
speclflcaLlon requlres LhaL a servleL be prepared Lo be called slmulLaneously from mulLlple Lhreads. ln oLher words,
servleLs need Lo be Lhreadsafe.
Lven lf you could guaranLee LhaL a servleL was only called from one Lhread aL a Llme, you would sLlll have Lo pay
aLLenLlon Lo Lhread safeLy when bulldlng a web appllcaLlon. ServleLs ofLen access sLaLe lnformaLlon shared wlLh oLher
servleLs, such as appllcaLlonscoped ob[ecLs (Lhose sLored ln Lhe '&9/0&.D2".&-.) or sesslonscoped ob[ecLs (Lhose
sLored ln Lhe percllenL E..G'&##42"). When a servleL accesses ob[ecLs shared across servleLs or requesLs, lL musL
coordlnaLe access Lo Lhese ob[ecLs properly, slnce mulLlple requesLs could be accesslng Lhem slmulLaneously from
separaLe Lhreads. ServleLs and !Ss, as well as servleL fllLers and ob[ecLs sLored ln scoped conLalners llke
'&9/0&.D2".&-. and E..G'&##42", slmply have Lo be Lhreadsafe.
8emoLe MeLhod lnvocaLlon. 8Ml leLs you lnvoke meLhods on ob[ecLs runnlng ln anoLher !vM. When you call a remoLe
meLhod wlLh 8Ml, Lhe meLhod argumenLs are packaged (marshaled) lnLo a byLe sLream and shlpped over Lhe neLwork Lo
Lhe remoLe !vM, where Lhey are unpacked (unmarshaled) and passed Lo Lhe remoLe meLhod.
When Lhe 8Ml code calls your remoLe ob[ecL, ln whaL Lhread does LhaL call happen? ?ou don'L know, buL lL's deflnlLely
noL ln a Lhread you creaLed your ob[ecL geLs called ln a Lhread managed by 8Ml. Pow many Lhreads does 8Ml creaLe?
Could Lhe same remoLe meLhod on Lhe same remoLe ob[ecL be called slmulLaneously ln mulLlple 8Ml Lhreads?
[4]


9 38ChapLer 1 lnLroducLlon 1381.4. 1hreads are Lverywhere
[4] Answer: yes, buL lL's noL all LhaL clear from Lhe !avadoc you have Lo read Lhe 8Ml spec.
A remoLe ob[ecL musL guard agalnsL Lwo Lhread safeLy hazards: properly coordlnaLlng access Lo sLaLe LhaL may be
shared wlLh oLher ob[ecLs, and properly coordlnaLlng access Lo Lhe sLaLe of Lhe remoLe ob[ecL lLself (slnce Lhe same
ob[ecL may be called ln mulLlple Lhreads slmulLaneously). Llke servleLs, 8Ml ob[ecLs should be prepared for mulLlple
slmulLaneous calls and musL provlde Lhelr own Lhread safeLy.
Swlng and AW1. Cul appllcaLlons are lnherenLly asynchronous. users may selecL a menu lLem or press a buLLon aL any
Llme, and Lhey expecL LhaL Lhe appllcaLlon wlll respond prompLly even lf lL ls ln Lhe mlddle of dolng someLhlng else.
Swlng and AW1 address Lhls problem by creaLlng a separaLe Lhread for handllng userlnlLlaLed evenLs and updaLlng Lhe
graphlcal vlew presenLed Lo Lhe user.
Swlng componenLs, such as >=$M0&, are noL Lhreadsafe. lnsLead, Swlng programs achleve Lhelr Lhread safeLy by
conflnlng all access Lo Cul componenLs Lo Lhe evenL Lhread. lf an appllcaLlon wanLs Lo manlpulaLe Lhe Cul from ouLslde
Lhe evenL Lhread, lL musL cause Lhe code LhaL wlll manlpulaLe Lhe Cul Lo run ln Lhe evenL Lhread lnsLead.
When Lhe user performs a ul acLlon, an evenL handler ls called ln Lhe evenL Lhread Lo perform whaLever operaLlon Lhe
user requesLed. lf Lhe handler needs Lo access appllcaLlon sLaLe LhaL ls also accessed from oLher Lhreads (such as a
documenL belng edlLed), Lhen Lhe evenL handler, along wlLh any oLher code LhaL accesses LhaL sLaLe, musL do so ln a
Lhreadsafe manner.


10 !ava Concurrency ln racLlce
-"). ,T S(&0"5*&."?3

=>.?@(, 7A 1hread SafeLy
=>.?@(, 6A Sharlng Cb[ecLs
=>.?@(, 8A Composlng Cb[ecLs
=>.?@(, BA 8ulldlng 8locks

11
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 148ChapLer 2. 1hread
SafeLy
$:">.*) DA J:)*"0 C"6*.+
erhaps surprlslngly, concurrenL programmlng lsn'L so much abouL Lhreads or locks, any more Lhan clvll englneerlng ls
abouL rlveLs and lbeams. Cf course, bulldlng brldges LhaL don'L fall down requlres Lhe correcL use of a loL of rlveLs and l
beams, [usL as bulldlng concurrenL programs requlre Lhe correcL use of Lhreads and locks. 8uL Lhese are [usL mechanlsms
means Lo an end. WrlLlng Lhreadsafe code ls, aL lLs core, abouL managlng access Lo sLaLe, and ln parLlcular Lo shared,
muLable sLaLe.

lnformally, an ob[ecL's sLaLe ls lLs daLa, sLored ln sLaLe varlables such as lnsLance or sLaLlc flelds. An ob[ecL's sLaLe may
lnclude flelds from oLher, dependenL ob[ecLs, a E$#8F$G's sLaLe ls parLlally sLored ln Lhe E$#8F$G ob[ecL lLself, buL also ln
many F$G+L".97 ob[ecLs. An ob[ecL's sLaLe encompasses any daLa LhaL can affecL lLs exLernally vlslble behavlor.
8y shared, we mean LhaL a varlable could be accessed by mulLlple Lhreads, by muLable, we mean LhaL lLs value could
change durlng lLs llfeLlme. We may Lalk abouL Lhread safeLy as lf lL were abouL code, buL whaL we are really Lrylng Lo do
ls proLecL daLa from unconLrolled concurrenL access.
WheLher an ob[ecL needs Lo be Lhreadsafe depends on wheLher lL wlll be accessed from mulLlple Lhreads. 1hls ls a
properLy of how Lhe ob[ecL ls used ln a program, noL whaL lL does. Maklng an ob[ecL Lhreadsafe requlres uslng
synchronlzaLlon Lo coordlnaLe access Lo lLs muLable sLaLe, falllng Lo do so could resulL ln daLa corrupLlon and oLher
undeslrable consequences.
Whenever more Lhan one Lhread accesses a glven sLaLe varlable, and one of Lhem mlghL wrlLe Lo lL, Lhey all musL
coordlnaLe Lhelr access Lo lL uslng synchronlzaLlon. 1he prlmary mechanlsm for synchronlzaLlon ln !ava ls Lhe
#7"*892"4:&; keyword, whlch provldes excluslve locklng, buL Lhe Lerm "synchronlzaLlon" also lncludes Lhe use of
/20$.40& varlables, expllclL locks, and aLomlc varlables.
?ou should avold Lhe LempLaLlon Lo Lhlnk LhaL Lhere are "speclal" slLuaLlons ln whlch Lhls rule does noL apply. A program
LhaL omlLs needed synchronlzaLlon mlghL appear Lo work, passlng lLs LesLs and performlng well for years, buL lL ls sLlll
broken and may fall aL any momenL.
lf mulLlple Lhreads access Lhe same muLable sLaLe varlable wlLhouL approprlaLe synchronlzaLlon, your program ls
broken. 1here are Lhree ways Lo flx lL:
uon'L share Lhe sLaLe varlable across Lhreads,
Make Lhe sLaLe varlable lmmuLable, or
use synchronlzaLlon whenever accesslng Lhe sLaLe varlable.
lf you haven'L consldered concurrenL access ln your class deslgn, some of Lhese approaches can requlre slgnlflcanL
deslgn modlflcaLlons, so flxlng Lhe problem mlghL noL be as Lrlvlal as Lhls advlce makes lL sound. lL ls far easler Lo deslgn
a class Lo be Lhreadsafe Lhan Lo reLroflL lL for Lhread safeLy laLer.
ln a large program, ldenLlfylng wheLher mulLlple Lhreads mlghL access a glven varlable can be compllcaLed. lorLunaLely,
Lhe same ob[ecLorlenLed Lechnlques LhaL help you wrlLe wellorganlzed, malnLalnable classes such as encapsulaLlon
and daLa hldlng can also help you creaLe Lhreadsafe classes. 1he less code LhaL has access Lo a parLlcular varlable, Lhe
easler lL ls Lo ensure LhaL all of lL uses Lhe proper synchronlzaLlon, and Lhe easler lL ls Lo reason abouL Lhe condlLlons
under whlch a glven varlable mlghL be accessed. 1he !ava language doesn'L force you Lo encapsulaLe sLaLe lL ls
perfecLly allowable Lo sLore sLaLe ln publlc flelds (even publlc sLaLlc flelds) or publlsh a reference Lo an oLherwlse
lnLernal ob[ecL buL Lhe beLLer encapsulaLed your program sLaLe, Lhe easler lL ls Lo make your program Lhreadsafe and
Lo help malnLalners keep lL LhaL way.
When deslgnlng Lhreadsafe classes, good ob[ecLorlenLed Lechnlques encapsulaLlon, lmmuLablllLy, and clear
speclflcaLlon of lnvarlanLs are your besL frlends.
1here wlll be Llmes when good ob[ecLorlenLed deslgn Lechnlques are aL odds wlLh realworld requlremenLs, lL may be
necessary ln Lhese cases Lo compromlse Lhe rules of good deslgn for Lhe sake of performance or for Lhe sake of
backward compaLlblllLy wlLh legacy code. SomeLlmes absLracLlon and encapsulaLlon are aL odds wlLh performance

12 !ava Concurrency ln racLlce
alLhough noL nearly as ofLen as many developers belleve buL lL ls always a good pracLlce flrsL Lo make your code rlghL,
and Lhen make lL fasL. Lven Lhen, pursue opLlmlzaLlon only lf your performance measuremenLs and requlremenLs Lell
you LhaL you musL, and lf Lhose same measuremenLs Lell you LhaL your opLlmlzaLlons acLually made a dlfference under
reallsLlc condlLlons.
[1]

[1] ln concurrenL code, Lhls pracLlce should be adhered Lo even more Lhan usual. 8ecause concurrency bugs are so dlfflculL Lo reproduce and
debug, Lhe beneflL of a small performance galn on some lnfrequenLly used code paLh may well be dwarfed by Lhe rlsk LhaL Lhe program wlll fall ln
Lhe fleld.
lf you declde LhaL you slmply musL break encapsulaLlon, all ls noL losL. lL ls sLlll posslble Lo make your program Lhread
safe, lL ls [usL a loL harder. Moreover, Lhe Lhread safeLy of your program wlll be more fraglle, lncreaslng noL only
developmenL cosL and rlsk buL malnLenance cosL and rlsk as well. ChapLer 4 characLerlzes Lhe condlLlons under whlch lL
ls safe Lo relax encapsulaLlon of sLaLe varlables.
We've used Lhe Lerms "Lhreadsafe class" and "Lhreadsafe program" nearly lnLerchangeably Lhus far. ls a Lhreadsafe
program one LhaL ls consLrucLed enLlrely of Lhreadsafe classes? noL necessarlly a program LhaL conslsLs enLlrely of
Lhreadsafe classes may noL be Lhreadsafe, and a Lhreadsafe program may conLaln classes LhaL are noL Lhreadsafe.
1he lssues surroundlng Lhe composlLlon of Lhreadsafe classes are also Laken up ln ChapLer 4. ln any case, Lhe concepL of
a Lhreadsafe class makes sense only lf Lhe class encapsulaLes lLs own sLaLe. 1hread safeLy may be a Lerm LhaL ls applled
Lo code, buL lL ls abouL sLaLe, and lL can only be applled Lo Lhe enLlre body of code LhaL encapsulaLes lLs sLaLe, whlch may
be an ob[ecL or an enLlre program.
DA@A B:". /3 J:)*"0 C"6*.+U
ueflnlng Lhread safeLy ls surprlslngly Lrlcky. 1he more formal aLLempLs are so compllcaLed as Lo offer llLLle pracLlcal
guldance or lnLulLlve undersLandlng, and Lhe resL are lnformal descrlpLlons LhaL can seem downrlghL clrcular. A qulck
Coogle search Lurns up numerous "deflnlLlons" llke Lhese:
. . . can be called from mulLlple program Lhreads wlLhouL unwanLed lnLeracLlons beLween Lhe Lhreads.
. . .may be called by more Lhan one Lhread aL a Llme wlLhouL requlrlng any oLher acLlon on Lhe caller's parL.
Clven deflnlLlons llke Lhese, lL's no wonder we flnd Lhread safeLy confuslng! 1hey sound susplclously llke "a class ls
Lhreadsafe lf lL can be used safely from mulLlple Lhreads." ?ou can'L really argue wlLh such a sLaLemenL, buL lL doesn'L
offer much pracLlcal help elLher. Pow do we Lell a Lhreadsafe class from an unsafe one? WhaL do we even mean by
"safe"?
AL Lhe hearL of any reasonable deflnlLlon of Lhread safeLy ls Lhe concepL of correcLness. lf our deflnlLlon of Lhread safeLy
ls fuzzy, lL ls because we lack a clear deflnlLlon of correcLness.
CorrecLness means LhaL a class conforms Lo lLs speclflcaLlon. A good speclflcaLlon deflnes lnvarlanLs consLralnlng an
ob[ecL's sLaLe and posL condlLlons descrlblng Lhe effecLs of lLs operaLlons. Slnce we ofLen don'L wrlLe adequaLe
speclflcaLlons for our classes, how can we posslbly know Lhey are correcL? We can'L, buL LhaL doesn'L sLop us from uslng
Lhem anyway once we've convlnced ourselves LhaL "Lhe code works". 1hls "code confldence" ls abouL as close as many
of us geL Lo correcLness, so leL's [usL assume LhaL slngleLhreaded correcLness ls someLhlng LhaL "we know lL when we
see lL". Pavlng opLlmlsLlcally deflned "correcLness" as someLhlng LhaL can be recognlzed, we can now deflne Lhread
safeLy ln a somewhaL less clrcular way: a class ls Lhreadsafe when lL conLlnues Lo behave correcLly when accessed from
mulLlple Lhreads.
A class ls Lhreadsafe lf lL behaves correcLly when accessed from mulLlple Lhreads, regardless of Lhe schedullng or
lnLerleavlng of Lhe execuLlon of Lhose Lhreads by Lhe runLlme envlronmenL, and wlLh no addlLlonal synchronlzaLlon or
oLher coordlnaLlon on Lhe parL of Lhe calllng code.
Slnce any slngleLhreaded program ls also a valld mulLlLhreaded program, lL cannoL be Lhreadsafe lf lL ls noL even
correcL ln a slngleLhreaded envlronmenL.
[2]
lf an ob[ecL ls correcLly lmplemenLed, no sequence of operaLlons calls Lo
publlc meLhods and reads or wrlLes of publlc flelds should be able Lo vlolaLe any of lLs lnvarlanLs or posLcondlLlons. no
seL of operaLlons performed sequenLlally or concurrenLly on lnsLances of a Lhreadsafe class can cause an lnsLance Lo be
ln an lnvalld sLaLe.
[2] lf Lhe loose use of "correcLness" here boLhers you, you may prefer Lo Lhlnk of a Lhreadsafe class as one LhaL ls no more broken ln a concurrenL
envlronmenL Lhan ln a slngleLhreaded envlronmenL.

13
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 148ChapLer 2. 1hread
SafeLy
1hreadsafe classes encapsulaLe any needed synchronlzaLlon so LhaL cllenLs need noL provlde Lhelr own.
DA@A@A =1"5>?*T F C.".*?*33 C*)#?*.
ln ChapLer 1, we llsLed a number of frameworks LhaL creaLe Lhreads and call your componenLs from Lhose Lhreads,
leavlng you wlLh Lhe responslblllLy of maklng your componenLs Lhreadsafe. very ofLen, LhreadsafeLy requlremenLs
sLem noL from a declslon Lo use Lhreads dlrecLly buL from a declslon Lo use a faclllLy llke Lhe ServleLs framework. We're
golng Lo develop a slmple example a servleLbased facLorlzaLlon servlce and slowly exLend lL Lo add feaLures whlle
preservlng lLs Lhread safeLy.
LlsLlng 2.1 shows our slmple facLorlzaLlon servleL. lL unpacks Lhe number Lo be facLored from Lhe servleL requesL, facLors
lL, and packages Lhe resulLs lnLo Lhe servleL response.
2/3./&4 DA@A F C.".*?*33 C*)#?*.A
b=89&$;'$%&
G)M04* *0$## '.$.&0&##<$*.294:&9 43G0&3&".# '&9/0&. W
G)M04* /24; #&9/4*&U'&9/0&.N&()&#. 9&(e '&9/0&.N&#G2"#& 9&#GV W
K46I".&6&9 4 ] &-.9$*.<923N&()&#.U9&(V[
K46I".&6&9fg %$*.29# ] %$*.29U4V[
&"*2;&I".2N&#G2"#&U9&#Ge %$*.29#V[
\
\
'.$.&0&##<$*.294:&9 ls, llke mosL servleLs, sLaLeless: lL has no flelds and references no flelds from oLher classes. 1he
LranslenL sLaLe for a parLlcular compuLaLlon exlsLs solely ln local varlables LhaL are sLored on Lhe Lhread's sLack and are
accesslble only Lo Lhe execuLlng Lhread. Cne Lhread accesslng a '.$.&0&##<$*.294:&9 cannoL lnfluence Lhe resulL of
anoLher Lhread accesslng Lhe same '.$.&0&##<$*.294:&9, because Lhe Lwo Lhreads do noL share sLaLe, lL ls as lf Lhey
were accesslng dlfferenL lnsLances. Slnce Lhe acLlons of a Lhread accesslng a sLaLeless ob[ecL cannoL affecL Lhe
correcLness of operaLlons ln oLher Lhreads, sLaLeless ob[ecLs are Lhreadsafe.
SLaLeless ob[ecLs are always Lhreadsafe.

1he facL LhaL mosL servleLs can be lmplemenLed wlLh no sLaLe greaLly reduces Lhe burden of maklng servleLs Lhread
safe. lL ls only when servleLs wanL Lo remember Lhlngs from one requesL Lo anoLher LhaL Lhe Lhread safeLy requlremenL
becomes an lssue.
DADA F.%5/'/.+
WhaL happens when we add one elemenL of sLaLe Lo whaL was a sLaLeless ob[ecL? Suppose we wanL Lo add a "hlL
counLer" LhaL measures Lhe number of requesLs processed. 1he obvlous approach ls Lo add a 02"6 fleld Lo Lhe servleL
and lncremenL lL on each requesL, as shown ln !"#$%&D2)".4"6<$*.294:&9 ln LlsLlng 2.2.
unforLunaLely, !"#$%&D2)".4"6<$*.294:&9 ls noL Lhreadsafe, even Lhough lL would work [usL flne ln a slngleLhreaded
envlronmenL. !usL llke !"#$%&'&()&"*& on page 6, lL ls suscepLlble Lo losL updaLes. Whlle Lhe lncremenL operaLlon,
__*2)"., may look llke a slngle acLlon because of lLs compacL synLax, lL ls noL aLomlc, whlch means LhaL lL does noL
execuLe as a slngle, lndlvlslble operaLlon. lnsLead, lL ls shorLhand for a sequence of Lhree dlscreLe operaLlons: feLch Lhe
currenL value, add one Lo lL, and wrlLe Lhe new value back. 1hls ls an example of a readmodlfywrlLe operaLlon, ln whlch
Lhe resulLlng sLaLe ls derlved from Lhe prevlous sLaLe.


14 !ava Concurrency ln racLlce
2/3./&4 DADA C*)#?*. .:". $%(&.3 N*Q(*3.3 8/.:%(. .:* P*'*33")+ C+&':)%&/O"./%&A !"#$% !" %&'()


b,2.=89&$;'$%&
G)M04* *0$## !"#$%&D2)".4"6<$*.294:&9 43G0&3&".# '&9/0&. W
G94/$.& 02"6 *2)". ] Z[

G)M04* 02"6 6&.D2)".UV W 9&.)9" *2)".[ \

G)M04* /24; #&9/4*&U'&9/0&.N&()&#. 9&(e '&9/0&.N&#G2"#& 9&#GV W
K46I".&6&9 4 ] &-.9$*.<923N&()&#.U9&(V[
K46I".&6&9fg %$*.29# ] %$*.29U4V[
__*2)".[
&"*2;&I".2N&#G2"#&U9&#Ge %$*.29#V[
\

\
llgure 1.1 on page 6 shows whaL can happen lf Lwo Lhreads Lry Lo lncremenL a counLer slmulLaneously wlLhouL
synchronlzaLlon. lf Lhe counLer ls lnlLlally 9, wlLh some unlucky Llmlng each Lhread could read Lhe value, see LhaL lL ls 9,
add one Lo lL, and each seL Lhe counLer Lo 10. 1hls ls clearly noL whaL ls supposed Lo happen, an lncremenL goL losL along
Lhe way, and Lhe hlL counLer ls now permanenLly off by one.
?ou mlghL Lhlnk LhaL havlng a sllghLly lnaccuraLe counL of hlLs ln a webbased servlce ls an accepLable loss of accuracy,
and someLlmes lL ls. 8uL lf Lhe counLer ls belng used Lo generaLe sequences or unlque ob[ecL ldenLlflers, reLurnlng Lhe
same value from mulLlple lnvocaLlons could cause serlous daLa lnLegrlLy problems.
[3]
1he posslblllLy of lncorrecL resulLs
ln Lhe presence of unlucky Llmlng ls so lmporLanL ln concurrenL programmlng LhaL lL has a name: a race condlLlon.
[3] 1he approach Laken by unsafeSequence and unsafeCounLlnglacLorlzer has oLher serlous problems, lncludlng Lhe posslblllLy of sLale daLa
(SecLlon 3.1.1).
DADA@A N"'* $%&0/./%&3
!"#$%&D2)".4"6<$*.294:&9 has several race condlLlons LhaL make lLs resulLs unrellable. A race condlLlon occurs when
Lhe correcLness of a compuLaLlon depends on Lhe relaLlve Llmlng or lnLerleavlng of mulLlple Lhreads by Lhe runLlme, ln
oLher words, when geLLlng Lhe rlghL answer relles on lucky Llmlng.
[4]
1he mosL common Lype of race condlLlon ls check
LhenacL, where a poLenLlally sLale observaLlon ls used Lo make a declslon on whaL Lo do nexL.
[4] 1he Lerm race condlLlon ls ofLen confused wlLh Lhe relaLed Lerm daLa race, whlch arlses when synchronlzaLlon ls noL used Lo coordlnaLe all
access Lo a shared nonflnal fleld. ?ou rlsk a daLa race whenever a Lhread wrlLes a varlable LhaL mlghL nexL be read by anoLher Lhread or reads a
varlable LhaL mlghL have lasL been wrlLLen by anoLher Lhread lf boLh Lhreads do noL use synchronlzaLlon, code wlLh daLa races has no useful
deflned semanLlcs under Lhe !ava Memory Model. noL all race condlLlons are daLa races, and noL all daLa races are race condlLlons, buL Lhey boLh
can cause concurrenL programs Lo fall ln unpredlcLable ways. !"#$%&D2)".4"6<$*.294:&9 has boLh race condlLlons and daLa races. See ChapLer
16 for more on daLa races.
We ofLen encounLer race condlLlons ln real llfe. LeL's say you planned Lo meeL a frlend aL noon aL Lhe SLarbucks on
unlverslLy Avenue. 8uL when you geL Lhere, you reallze Lhere are Lwo SLarbucks on unlverslLy Avenue, and you're noL
sure whlch one you agreed Lo meeL aL. AL 12:10, you don'L see your frlend aL SLarbucks A, so you walk over Lo SLarbucks
8 Lo see lf he's Lhere, buL he lsn'L Lhere elLher. 1here are a few posslblllLles: your frlend ls laLe and noL aL elLher
SLarbucks, your frlend arrlved aL SLarbucks A afLer you lefL, or your frlend was aL SLarbucks 8, buL wenL Lo look for you,
and ls now en rouLe Lo SLarbucks A. LeL's assume Lhe worsL and say lL was Lhe lasL posslblllLy. now lL's 12:13, you've boLh
been Lo boLh SLarbucks, and you're boLh wonderlng lf you've been sLood up. WhaL do you do now? Co back Lo Lhe oLher
SLarbucks? Pow many Llmes are you golng Lo go back and forLh? unless you have agreed on a proLocol, you could boLh
spend Lhe day walklng up and down unlverslLy Avenue, frusLraLed and undercaffelnaLed.
1he problem wlLh Lhe "l'll [usL nlp up Lhe sLreeL and see lf he's aL Lhe oLher one" approach ls LhaL whlle you're walklng up
Lhe sLreeL, your frlend mlghL have moved. ?ou look around SLarbucks A, observe "he's noL here", and go looklng for hlm.
And you can do Lhe same for SLarbucks 8, buL noL aL Lhe same Llme. lL Lakes a few mlnuLes Lo walk up Lhe sLreeL, and
durlng Lhose few mlnuLes, Lhe sLaLe of Lhe sysLem may have changed.
1he SLarbucks example lllusLraLes a race condlLlon because reachlng Lhe deslred ouLcome (meeLlng your frlend) depends
on Lhe relaLlve Llmlng of evenLs (when each of you arrlves aL one SLarbucks or Lhe oLher, how long you walL Lhere before
swlLchlng, eLc). 1he observaLlon LhaL he ls noL aL SLarbucks A becomes poLenLlally lnvalld as soon as you walk ouL Lhe
fronL door, he could have come ln Lhrough Lhe back door and you wouldn'L know. lL ls Lhls lnvalldaLlon of observaLlons
LhaL characLerlzes mosL race condlLlons uslng a poLenLlally sLale observaLlon Lo make a declslon or perform a
compuLaLlon. 1hls Lype of race condlLlon ls called checkLhenacL: you observe someLhlng Lo be Lrue (flle x doesn'L exlsL)

13
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 148ChapLer 2. 1hread
SafeLy
and Lhen Lake acLlon based on LhaL observaLlon (creaLe x), buL ln facL Lhe observaLlon could have become lnvalld
beLween Lhe Llme you observed lL and Lhe Llme you acLed on lL (someone else creaLed x ln Lhe meanLlme), causlng a
problem (unexpecLed excepLlon, overwrlLLen daLa, flle corrupLlon).
DADADA =1"5>?*T N"'* $%&0/./%&3 /& 2"O+ ,&/./"?/O"./%&
A common ldlom LhaL uses checkLhenacL ls lazy lnlLlallzaLlon. 1he goal of lazy lnlLlallzaLlon ls Lo defer lnlLlallzlng an
ob[ecL unLll lL ls acLually needed whlle aL Lhe same Llme ensurlng LhaL lL ls lnlLlallzed only once. 5$:7I"4.N$*& ln LlsLlng
2.3 lllusLraLes Lhe lazy lnlLlallzaLlon ldlom. 1he 6&.I"#.$"*& meLhod flrsL checks wheLher Lhe L-G&"#4/&JM`&*. has
already been lnlLlallzed, ln whlch case lL reLurns Lhe exlsLlng lnsLance, oLherwlse lL creaLes a new lnsLance and reLurns lL
afLer reLalnlng a reference Lo lL so LhaL fuLure lnvocaLlons can avold Lhe more expenslve code paLh.
2/3./&4 DALA N"'* $%&0/./%& /& 2"O+ ,&/./"?/O"./%&A !"#$% !" %&'()


b,2.=89&$;'$%&
G)M04* *0$## 5$:7I"4.N$*& W
G94/$.& L-G&"#4/&JM`&*. 4"#.$"*& ] ")00[

G)M04* L-G&"#4/&JM`&*. 6&.I"#.$"*&UV W
4% U4"#.$"*& ]] ")00V
4"#.$"*& ] "&? L-G&"#4/&JM`&*.UV[
9&.)9" 4"#.$"*&[
\
\
5$:7I"4.N$*& has race condlLlons LhaL can undermlne lLs correcLness. Say LhaL Lhreads A and 8 execuLe 6&.I"#.$"*& aL
Lhe same Llme. A sees LhaL 4"#.$"*& ls ")00, and lnsLanLlaLes a new L-G&"#4/&JM`&*.. 8 also checks lf 4"#.$"*& ls
")00. WheLher 4"#.$"*& ls ")00 aL Lhls polnL depends unpredlcLably on Llmlng, lncludlng Lhe vagarles of schedullng and
how long A Lakes Lo lnsLanLlaLe Lhe L-G&"#4/&JM`&*. and seL Lhe 4"#.$"*& fleld. lf 4"#.$"*& ls ")00 when 8 examlnes
lL, Lhe Lwo callers Lo 6&.I"#.$"*& may recelve Lwo dlfferenL resulLs, even Lhough 6&.I"#.$"*& ls always supposed Lo
reLurn Lhe same lnsLance.
1he hlLcounLlng operaLlon ln !"#$%&D2)".4"6<$*.294:&9 has anoLher sorL of race condlLlon. 8eadmodlfywrlLe
operaLlons, llke lncremenLlng a counLer, deflne a LransformaLlon of an ob[ecL's sLaLe ln Lerms of lLs prevlous sLaLe. 1o
lncremenL a counLer, you have Lo know lLs prevlous value and make sure no one else changes or uses LhaL value whlle
you are ln mldupdaLe.
Llke mosL concurrency errors, race condlLlons don'L always resulL ln fallure: some unlucky Llmlng ls also requlred. 8uL
race condlLlons can cause serlous problems. lf 5$:7I"4.N$*& ls used Lo lnsLanLlaLe an appllcaLlonwlde reglsLry, havlng lL
reLurn dlfferenL lnsLances from mulLlple lnvocaLlons could cause reglsLraLlons Lo be losL or mulLlple acLlvlLles Lo have
lnconslsLenL vlews of Lhe seL of reglsLered ob[ecLs. lf !"#$%&'&()&"*& ls used Lo generaLe enLlLy ldenLlflers ln a
perslsLence framework, Lwo dlsLlncL ob[ecLs could end up wlLh Lhe same lu, vlolaLlng ldenLlLy lnLegrlLy consLralnLs.
DADALA $%5>%(&0 F'./%&3
8oLh 5$:7I"4.N$*& and !"#$%&D2)".4"6<$*.294:&9 conLalned a sequence of operaLlons LhaL needed Lo be aLomlc, or
lndlvlslble, relaLlve Lo oLher operaLlons on Lhe same sLaLe. 1o avold race condlLlons, Lhere musL be a way Lo prevenL
oLher Lhreads from uslng a varlable whlle we're ln Lhe mlddle of modlfylng lL, so we can ensure LhaL oLher Lhreads can
observe or modlfy Lhe sLaLe only before we sLarL or afLer we flnlsh, buL noL ln Lhe mlddle.
CperaLlons A and 8 are aLomlc wlLh respecL Lo each oLher lf, from Lhe perspecLlve of a Lhread execuLlng A, when
anoLher Lhread execuLes 8, elLher all of 8 has execuLed or none of lL has. An aLomlc operaLlon ls one LhaL ls aLomlc wlLh
respecL Lo all operaLlons, lncludlng lLself, LhaL operaLe on Lhe same sLaLe.

lf Lhe lncremenL operaLlon ln !"#$%&'&()&"*& were aLomlc, Lhe race condlLlon lllusLraLed ln llgure 1.1 on page 6 could
noL occur, and each execuLlon of Lhe lncremenL operaLlon would have Lhe deslred effecL of lncremenLlng Lhe counLer by
exacLly one. 1o ensure Lhread safeLy, checkLhenacL operaLlons (llke lazy lnlLlallzaLlon) and readmodlfywrlLe
operaLlons (llke lncremenL) musL always be aLomlc. We refer collecLlvely Lo checkLhenacL and readmodlfywrlLe
sequences as compound acLlons: sequences of operaLlons LhaL musL be execuLed aLomlcally ln order Lo remaln Lhread

16 !ava Concurrency ln racLlce
safe. ln Lhe nexL secLlon, we'll conslder locklng, !ava's bullLln mechanlsm for ensurlng aLomlclLy. lor now, we're golng Lo
flx Lhe problem anoLher way, by uslng an exlsLlng Lhreadsafe class, as shown ln D2)".4"6<$*.294:&9 ln LlsLlng 2.4.
2/3./&4 DAMA C*)#?*. .:". $%(&.3 N*Q(*3.3 93/&4 1.234*52"6A
b=89&$;'$%&
G)M04* *0$## D2)".4"6<$*.294:&9 43G0&3&".# '&9/0&. W
G94/$.& %4"$0 1.234*52"6 *2)". ] "&? 1.234*52"6UZV[

G)M04* 02"6 6&.D2)".UV W 9&.)9" *2)".+6&.UV[ \

G)M04* /24; #&9/4*&U'&9/0&.N&()&#. 9&(e '&9/0&.N&#G2"#& 9&#GV W
K46I".&6&9 4 ] &-.9$*.<923N&()&#.U9&(V[
K46I".&6&9fg %$*.29# ] %$*.29U4V[
*2)".+4"*9&3&".1";H&.UV[
&"*2;&I".2N&#G2"#&U9&#Ge %$*.29#V[
\
\
1he `$/$+).40+*2"*)99&".+$.234* package conLalns aLomlc varlable classes for effecLlng aLomlc sLaLe LranslLlons on
numbers and ob[ecL references. 8y replaclng Lhe 02"6 counLer wlLh an 1.234*52"6, we ensure LhaL all acLlons LhaL
access Lhe counLer sLaLe are aLomlc.
[3]
8ecause Lhe sLaLe of Lhe servleL ls Lhe sLaLe of Lhe counLer and Lhe counLer ls
Lhreadsafe, our servleL ls once agaln Lhreadsafe.
[3] CounLlnglacLorlzer calls lncremenLAndCeL Lo lncremenL Lhe counLer, whlch also reLurns Lhe lncremenLed value, ln Lhls case Lhe reLurn value ls
lgnored.
We were able Lo add a counLer Lo our facLorlng servleL and malnLaln Lhread safeLy by uslng an exlsLlng Lhreadsafe class
Lo manage Lhe counLer sLaLe, 1.234*52"6. When a slngle elemenL of sLaLe ls added Lo a sLaLeless class, Lhe resulLlng
class wlll be Lhreadsafe lf Lhe sLaLe ls enLlrely managed by a Lhreadsafe ob[ecL. 8uL, as we'll see ln Lhe nexL secLlon,
golng from one sLaLe varlable Lo more Lhan one ls noL necessarlly as slmple as golng from zero Lo one.
Where pracLlcal, use exlsLlng Lhreadsafe ob[ecLs, llke 1.234*52"6, Lo manage your class's sLaLe. lL ls slmpler Lo reason
abouL Lhe posslble sLaLes and sLaLe LranslLlons for exlsLlng Lhreadsafe ob[ecLs Lhan lL ls for arblLrary sLaLe varlables, and
Lhls makes lL easler Lo malnLaln and verlfy Lhread safeLy.
DALA 2%'</&4
We were able Lo add one sLaLe varlable Lo our servleL whlle malnLalnlng Lhread safeLy by uslng a Lhreadsafe ob[ecL Lo
manage Lhe enLlre sLaLe of Lhe servleL. 8uL lf we wanL Lo add more sLaLe Lo our servleL, can we [usL add more Lhread
safe sLaLe varlables?
lmaglne LhaL we wanL Lo lmprove Lhe performance of our servleL by cachlng Lhe mosL recenLly compuLed resulL, [usL ln
case Lwo consecuLlve cllenLs requesL facLorlzaLlon of Lhe same number. (1hls ls unllkely Lo be an effecLlve cachlng
sLraLegy, we offer a beLLer one ln SecLlon 3.6.) 1o lmplemenL Lhls sLraLegy, we need Lo remember Lwo Lhlngs: Lhe lasL
number facLored, and lLs facLors.
We used 1.234*52"6 Lo manage Lhe counLer sLaLe ln a Lhreadsafe manner, could we perhaps use lLs cousln,
1.234*N&%&9&"*&,
[6]
Lo manage Lhe lasL number and lLs facLors? An aLLempL aL Lhls ls shown ln
!"#$%&D$*84"6<$*.294:&9 ln LlsLlng 2.3.
[6] !usL as ALomlcLong ls a Lhreadsafe holder class for a long lnLeger, ALomlc8eference ls a Lhread safe holder class for an ob[ecL reference. ALomlc
varlables and Lhelr beneflLs are covered ln ChapLer 13.

17
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 148ChapLer 2. 1hread
SafeLy
2/3./&4 DAVA C*)#?*. .:". F..*5>.3 .% $"':* /.3 2"3. N*3(?. 8/.:%(. F0*Q(".* F.%5/'/.+A !"#$% !" %&'()


b,2.=89&$;'$%&
G)M04* *0$## !"#$%&D$*84"6<$*.294:&9 43G0&3&".# '&9/0&. W
G94/$.& %4"$0 1.234*N&%&9&"*&RK46I".&6&9T 0$#.,)3M&9
] "&? 1.234*N&%&9&"*&RK46I".&6&9TUV[
G94/$.& %4"$0 1.234*N&%&9&"*&RK46I".&6&9fgT 0$#.<$*.29#
] "&? 1.234*N&%&9&"*&RK46I".&6&9fgTUV[

G)M04* /24; #&9/4*&U'&9/0&.N&()&#. 9&(e '&9/0&.N&#G2"#& 9&#GV W
K46I".&6&9 4 ] &-.9$*.<923N&()&#.U9&(V[
4% U4+&()$0#U0$#.,)3M&9+6&.UVVV
&"*2;&I".2N&#G2"#&U9&#Ge 0$#.<$*.29#+6&.UV V[
&0#& W
K46I".&6&9fg %$*.29# ] %$*.29U4V[
0$#.,)3M&9+#&.U4V[
0$#.<$*.29#+#&.U%$*.29#V[
&"*2;&I".2N&#G2"#&U9&#Ge %$*.29#V[
\
\
\
unforLunaLely, Lhls approach does noL work. Lven Lhough Lhe aLomlc references are lndlvldually Lhreadsafe,
!"#$%&D$*84"6<$*.294:&9 has race condlLlons LhaL could make lL produce Lhe wrong answer.
1he deflnlLlon of Lhread safeLy requlres LhaL lnvarlanLs be preserved regardless of Llmlng or lnLerleavlng of operaLlons ln
mulLlple Lhreads. Cne lnvarlanL of !"#$%&D$*84"6<$*.294:&9 ls LhaL Lhe producL of Lhe facLors cached ln 0$#.<$*.29#
equal Lhe value cached ln 0$#.,)3M&9, our servleL ls correcL only lf Lhls lnvarlanL always holds. When mulLlple varlables
parLlclpaLe ln an lnvarlanL, Lhey are noL lndependenL: Lhe value of one consLralns Lhe allowed value(s) of Lhe oLhers.
1hus when updaLlng one, you musL updaLe Lhe oLhers ln Lhe same aLomlc operaLlon.
WlLh some unlucky Llmlng, !"#$%&D$*84"6<$*.294:&9 can vlolaLe Lhls lnvarlanL. uslng aLomlc references, we cannoL
updaLe boLh 0$#.,)3M&9 and 0$#.<$*.29# slmulLaneously, even Lhough each call Lo #&. ls aLomlc, Lhere ls sLlll a
wlndow of vulnerablllLy when one has been modlfled and Lhe oLher has noL, and durlng LhaL Llme oLher Lhreads could
see LhaL Lhe lnvarlanL does noL hold. Slmllarly, Lhe Lwo values cannoL be feLched slmulLaneously: beLween Lhe Llme
when Lhread A feLches Lhe Lwo values, Lhread 8 could have changed Lhem, and agaln A may observe LhaL Lhe lnvarlanL
does noL hold.
1o preserve sLaLe conslsLency, updaLe relaLed sLaLe varlables ln a slngle aLomlc operaLlon.
DALA@A ,&.)/&3/' 2%'<3
!ava provldes a bullLln locklng mechanlsm for enforclng aLomlclLy: Lhe #7"*892"4:&; block. (1here ls also anoLher
crlLlcal aspecL Lo locklng and oLher synchronlzaLlon mechanlsmsvlslblllLy whlch ls covered ln ChapLer 3.) A
#7"*892"4:&; block has Lwo parLs: a reference Lo an ob[ecL LhaL wlll serve as Lhe lock, and a block of code Lo be
guarded by LhaL lock. A #7"*892"4:&; meLhod ls shorLhand for a #7"*892"4:&; block LhaL spans an enLlre meLhod
body, and whose lock ls Lhe ob[ecL on whlch Lhe meLhod ls belng lnvoked. (SLaLlc #7"*892"4:&; meLhods use Lhe D0$##
ob[ecL for Lhe lock.)

#7"*892"4:&; U02*CV W
XX 1**&## 29 32;4%7 #8$9&; #.$.& 6)$9;&; M7 02*C
\
Lvery !ava ob[ecL can lmpllclLly acL as a lock for purposes of synchronlzaLlon, Lhese bullLln locks are called lnLrlnslc locks
or monlLor locks. 1he lock ls auLomaLlcally acqulred by Lhe execuLlng Lhread before enLerlng a #7"*892"4:&; block and
auLomaLlcally released when conLrol exlLs Lhe #7"*892"4:&; block, wheLher by Lhe normal conLrol paLh or by Lhrowlng
an excepLlon ouL of Lhe block. 1he only way Lo acqulre an lnLrlnslc lock ls Lo enLer a #7"*892"4:&; block or meLhod
guarded by LhaL lock.
lnLrlnslc locks ln !ava acL as muLexes (or muLual excluslon locks), whlch means LhaL aL mosL one Lhread may own Lhe
lock. When Lhread A aLLempLs Lo acqulre a lock held by Lhread 8, A musL walL, or block, unLll 8 releases lL. lf 8 never
releases Lhe lock, A walLs forever.

18 !ava Concurrency ln racLlce
Slnce only one Lhread aL a Llme can execuLe a block of code guarded by a glven lock, Lhe #7"*892"4:&; blocks guarded
by Lhe same lock execuLe aLomlcally wlLh respecL Lo one anoLher. ln Lhe conLexL of concurrency, aLomlclLy means Lhe
same Lhlng as lL does ln LransacLlonal appllcaLlons LhaL a group of sLaLemenLs appear Lo execuLe as a slngle, lndlvlslble
unlL. no Lhread execuLlng a #7"*892"4:&; block can observe anoLher Lhread Lo be ln Lhe mlddle of a #7"*892"4:&;
block guarded by Lhe same lock.
1he machlnery of synchronlzaLlon makes lL easy Lo resLore Lhread safeLy Lo Lhe facLorlng servleL. LlsLlng 2.6 makes Lhe
#&9/4*& meLhod #7"*892"4:&;, so only one Lhread may enLer #&9/4*& aL a Llme. '7"*892"4:&;<$*.294:&9 ls now
Lhreadsafe, however, Lhls approach ls falrly exLreme, slnce lL lnhlblLs mulLlple cllenLs from uslng Lhe facLorlng servleL
slmulLaneously aL all resulLlng ln unaccepLably poor responslveness. 1hls problem whlch ls a performance problem,
noL a Lhread safeLy problem ls addressed ln SecLlon 2.3.
2/3./&4 DAWA C*)#?*. .:". $"':*3 2"3. N*3(?.X ;(. 8/.: 9&"''*>."Y?+ -%%) $%&'())*&'+A !"#$% !" %&'()


b=89&$;'$%&
G)M04* *0$## '7"*892"4:&;<$*.294:&9 43G0&3&".# '&9/0&. W
bH)$9;&;K7Ud.84#dV G94/$.& K46I".&6&9 0$#.,)3M&9[
bH)$9;&;K7Ud.84#dV G94/$.& K46I".&6&9fg 0$#.<$*.29#[

G)M04* #7"*892"4:&; /24; #&9/4*&U'&9/0&.N&()&#. 9&(e
'&9/0&.N&#G2"#& 9&#GV W
K46I".&6&9 4 ] &-.9$*.<923N&()&#.U9&(V[
4% U4+&()$0#U0$#.,)3M&9VV
&"*2;&I".2N&#G2"#&U9&#Ge 0$#.<$*.29#V[
&0#& W
K46I".&6&9fg %$*.29# ] %$*.29U4V[
0$#.,)3M&9 ] 4[
0$#.<$*.29# ] %$*.29#[
&"*2;&I".2N&#G2"#&U9&#Ge %$*.29#V[
\
\
\
DALADA N**&.)"&'+
When a Lhread requesLs a lock LhaL ls already held by anoLher Lhread, Lhe requesLlng Lhread blocks. 8uL because
lnLrlnslc locks are reenLranL, lf a Lhread Lrles Lo acqulre a lock LhaL lL already holds, Lhe requesL succeeds. 8eenLrancy
means LhaL locks are acqulred on a perLhread raLher Lhan perlnvocaLlon basls.
[7]
8eenLrancy ls lmplemenLed by
assoclaLlng wlLh each lock an acqulslLlon counL and an ownlng Lhread. When Lhe counL ls zero, Lhe lock ls consldered
unheld. When a Lhread acqulres a prevlously unheld lock, Lhe !vM records Lhe owner and seLs Lhe acqulslLlon counL Lo
one. lf LhaL same Lhread acqulres Lhe lock agaln, Lhe counL ls lncremenLed, and when Lhe ownlng Lhread exlLs Lhe
#7"*892"4:&; block, Lhe counL ls decremenLed. When Lhe counL reaches zero, Lhe lock ls released.
[7] 1hls dlffers from Lhe defaulL locklng behavlor for pLhreads (CSlx Lhreads) muLexes, whlch are granLed on a perlnvocaLlon basls.
8eenLrancy faclllLaLes encapsulaLlon of locklng behavlor, and Lhus slmpllfles Lhe developmenL of ob[ecLorlenLed
concurrenL code. WlLhouL reenLranL locks, Lhe very naLurallooklng code ln LlsLlng 2.7, ln whlch a subclass overrldes a
#7"*892"4:&; meLhod and Lhen calls Lhe superclass meLhod, would deadlock. 8ecause Lhe ;2'23&.84"6 meLhods ln
P4;6&. and 52664"6P4;6&. are boLh #7"*892"4:&;, each Lrles Lo acqulre Lhe lock on Lhe P4;6&. before proceedlng.
8uL lf lnLrlnslc locks were noL reenLranL, Lhe call Lo #)G&9+;2'23&.84"6 would never be able Lo acqulre Lhe lock because
lL would be consldered already held, and Lhe Lhread would permanenLly sLall walLlng for a lock lL can never acqulre.
8eenLrancy saves us from deadlock ln slLuaLlons llke Lhls.
2/3./&4 DAZA $%0* .:". 8%(?0 [*"0?%'< /6 ,&.)/&3/' 2%'<3 8*)* P%. N**&.)"&.A
G)M04* *0$## P4;6&. W
G)M04* #7"*892"4:&; /24; ;2'23&.84"6UV W
+++
\
\

G)M04* *0$## 52664"6P4;6&. &-.&";# P4;6&. W
G)M04* #7"*892"4:&; /24; ;2'23&.84"6UV W
'7#.&3+2).+G94".0"U.2'.94"6UV _ dh *$004"6 ;2'23&.84"6dV[
#)G&9+;2'23&.84"6UV[
\
\


19
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 148ChapLer 2. 1hread
SafeLy
DAMA R(")0/&4 C.".* 8/.: 2%'<3
8ecause locks enable serlallzed
[8]
access Lo Lhe code paLhs Lhey guard, we can use Lhem Lo consLrucL proLocols for
guaranLeelng excluslve access Lo shared sLaLe. lollowlng Lhese proLocols conslsLenLly can ensure sLaLe conslsLency.
[8] Serlallzlng access Lo an ob[ecL has noLhlng Lo do wlLh ob[ecL serlallzaLlon (Lurnlng an ob[ecL lnLo a byLe sLream), serlallzlng access means LhaL
Lhreads Lake Lurns accesslng Lhe ob[ecL excluslvely, raLher Lhan dolng so concurrenLly.
Compound acLlons on shared sLaLe, such as lncremenLlng a hlL counLer (readmodlfywrlLe) or lazy lnlLlallzaLlon (check
LhenacL), musL be made aLomlc Lo avold race condlLlons. Poldlng a lock for Lhe enLlre duraLlon of a compound acLlon
can make LhaL compound acLlon aLomlc. Powever, [usL wrapplng Lhe compound acLlon wlLh a #7"*892"4:&; block ls noL
sufflclenL, lf synchronlzaLlon ls used Lo coordlnaLe access Lo a varlable, lL ls needed everywhere LhaL varlable ls accessed.
lurLher, when uslng locks Lo coordlnaLe access Lo a varlable, Lhe same lock musL be used wherever LhaL varlable ls
accessed.
lL ls a common mlsLake Lo assume LhaL synchronlzaLlon needs Lo be used only when wrlLlng Lo shared varlables, Lhls ls
slmply noL Lrue. (1he reasons for Lhls wlll become clearer ln SecLlon 3.1.)
lor each muLable sLaLe varlable LhaL may be accessed by more Lhan one Lhread, all accesses Lo LhaL varlable musL be
performed wlLh Lhe same lock held. ln Lhls case, we say LhaL Lhe varlable ls guarded by LhaL lock.
ln '7"*892"4:&;<$*.294:&9 ln LlsLlng 2.6, 0$#.,)3M&9 and 0$#.<$*.29# are guarded by Lhe servleL ob[ecL's lnLrlnslc
lock, Lhls ls documenLed by Lhe bH)$9;&;K7 annoLaLlon.
1here ls no lnherenL relaLlonshlp beLween an ob[ecL's lnLrlnslc lock and lLs sLaLe, an ob[ecL's flelds need noL be guarded
by lLs lnLrlnslc lock, Lhough Lhls ls a perfecLly valld locklng convenLlon LhaL ls used by many classes. Acqulrlng Lhe lock
assoclaLed wlLh an ob[ecL does noL prevenL oLher Lhreads from accesslng LhaL ob[ecL Lhe only Lhlng LhaL acqulrlng a
lock prevenLs any oLher Lhread from dolng ls acqulrlng LhaL same lock. 1he facL LhaL every ob[ecL has a bullLln lock ls
[usL a convenlence so LhaL you needn'L expllclLly creaLe lock ob[ecLs.
[9]
lL ls up Lo you Lo consLrucL locklng proLocols or
synchronlzaLlon pollcles LhaL leL you access shared sLaLe safely, and Lo use Lhem conslsLenLly LhroughouL your program.
[9] ln reLrospecL, Lhls deslgn declslon was probably a bad one: noL only can lL be confuslng, buL lL forces !vM lmplemenLers Lo make Lradeoffs
beLween ob[ecL slze and locklng performance.
Lvery shared, muLable varlable should be guarded by exacLly one lock. Make lL clear Lo malnLalners whlch lock LhaL ls.
A common locklng convenLlon ls Lo encapsulaLe all muLable sLaLe wlLhln an ob[ecL and Lo proLecL lL from concurrenL
access by synchronlzlng any code paLh LhaL accesses muLable sLaLe uslng Lhe ob[ecL's lnLrlnslc lock. 1hls paLLern ls used
by many Lhreadsafe classes, such as B&*.29 and oLher synchronlzed collecLlon classes. ln such cases, all Lhe varlables ln
an ob[ecL's sLaLe are guarded by Lhe ob[ecL's lnLrlnslc lock. Powever, Lhere ls noLhlng speclal abouL Lhls paLLern, and
nelLher Lhe compller nor Lhe runLlme enforces Lhls (or any oLher) paLLern of locklng.
[10]
lL ls also easy Lo subverL Lhls
locklng proLocol accldenLally by addlng a new meLhod or code paLh and forgeLLlng Lo use synchronlzaLlon.
[10] Code audlLlng Lools llke llnd8ugs can ldenLlfy when a varlable ls frequenLly buL noL always accessed wlLh a lock held, whlch may lndlcaLe a
bug.
noL all daLa needs Lo be guarded by locks only muLable daLa LhaL wlll be accessed from mulLlple Lhreads. ln ChapLer 1,
we descrlbed how addlng a slmple asynchronous evenL such as a =43&9=$#C can creaLe Lhread safeLy requlremenLs LhaL
rlpple LhroughouL your program, especlally lf your program sLaLe ls poorly encapsulaLed. Conslder a slngleLhreaded
program LhaL processes a large amounL of daLa. SlngleLhreaded programs requlre no synchronlzaLlon, because no daLa
ls shared across Lhreads. now lmaglne you wanL Lo add a feaLure Lo creaLe perlodlc snapshoLs of lLs progress, so LhaL lL
does noL have Lo sLarL agaln from Lhe beglnnlng lf lL crashes or musL be sLopped. ?ou mlghL choose Lo do Lhls wlLh a
=43&9=$#C LhaL goes off every Len mlnuLes, savlng Lhe program sLaLe Lo a flle.
Slnce Lhe =43&9=$#C wlll be called from anoLher Lhread (one managed by =43&9), any daLa lnvolved ln Lhe snapshoL ls
now accessed by Lwo Lhreads: Lhe maln program Lhread and Lhe =43&9 Lhread. 1hls means LhaL noL only musL Lhe
=43&9=$#C code use synchronlzaLlon when accesslng Lhe program sLaLe, buL so musL any code paLh ln Lhe resL of Lhe
program LhaL Louches LhaL same daLa. WhaL used Lo requlre no synchronlzaLlon now requlres synchronlzaLlon
LhroughouL Lhe program.

20 !ava Concurrency ln racLlce
When a varlable ls guarded by a lock meanlng LhaL every access Lo LhaL varlable ls performed wlLh LhaL lock held
you've ensured LhaL only one Lhread aL a Llme can access LhaL varlable. When a class has lnvarlanLs LhaL lnvolve more
Lhan one sLaLe varlable, Lhere ls an addlLlonal requlremenL: each varlable parLlclpaLlng ln Lhe lnvarlanL musL be guarded
by Lhe same lock. 1hls allows you Lo access or updaLe Lhem ln a slngle aLomlc operaLlon, preservlng Lhe lnvarlanL.
'7"*892"4:&;<$*.294:&9 demonsLraLes Lhls rule: boLh Lhe cached number and Lhe cached facLors are guarded by Lhe
servleL ob[ecL's lnLrlnslc lock.
lor every lnvarlanL LhaL lnvolves more Lhan one varlable, all Lhe varlables lnvolved ln LhaL lnvarlanL musL be guarded by
Lhe same lock.
lf synchronlzaLlon ls Lhe cure for race condlLlons, why noL [usL declare every meLhod #7"*892"4:&;? lL Lurns ouL LhaL
such lndlscrlmlnaLe appllcaLlon of #7"*892"4:&; mlghL be elLher Loo much or Loo llLLle synchronlzaLlon. Merely
synchronlzlng every meLhod, as B&*.29 does, ls noL enough Lo render compound acLlons on a B&*.29 aLomlc:
4% UY/&*.29+*2".$4"#U&0&3&".VV
/&*.29+$;;U&0&3&".V[
1hls aLLempL aL a puLlfabsenL operaLlon has a race condlLlon, even Lhough boLh *2".$4"# and $;; are aLomlc. Whlle
synchronlzed meLhods can make lndlvldual operaLlons aLomlc, addlLlonal locklng ls requlred when mulLlple operaLlons
are comblned lnLo a compound acLlon. (See SecLlon 4.4 for some Lechnlques for safely addlng addlLlonal aLomlc
operaLlons Lo Lhreadsafe ob[ecLs.) AL Lhe same Llme, synchronlzlng every meLhod can lead Lo llveness or performance
problems, as we saw ln '7"*892"4:&;<$*.294:&9.
DAVA 2/#*&*33 "&0 -*)6%)5"&'*
ln !"#$%&D$*84"6<$*.294:&9, we lnLroduced some cachlng lnLo our facLorlng servleL ln Lhe hope of lmprovlng
performance. Cachlng requlred some shared sLaLe, whlch ln Lurn requlred synchronlzaLlon Lo malnLaln Lhe lnLegrlLy of
LhaL sLaLe. 8uL Lhe way we used synchronlzaLlon ln '7"*892"4:&;<$*.294:&9 makes lL perform badly. 1he
synchronlzaLlon pollcy for '7"*892"4:&;<$*.294:&9 ls Lo guard each sLaLe varlable wlLh Lhe servleL ob[ecL's lnLrlnslc
lock, and LhaL pollcy was lmplemenLed by synchronlzlng Lhe enLlreLy of Lhe #&9/4*& meLhod. 1hls slmple, coarse
gralned approach resLored safeLy, buL aL a hlgh prlce.
8ecause #&9/4*& ls #7"*892"4:&;, only one Lhread may execuLe lL aL once. 1hls subverLs Lhe lnLended use of Lhe servleL
framework LhaL servleLs be able Lo handle mulLlple requesLs slmulLaneously and can resulL ln frusLraLed users lf Lhe
load ls hlgh enough. lf Lhe servleL ls busy facLorlng a large number, oLher cllenLs have Lo walL unLll Lhe currenL requesL ls
compleLe before Lhe servleL can sLarL on Lhe new number. lf Lhe sysLem has mulLlple Cus, processors may remaln ldle
even lf Lhe load ls hlgh. ln any case, even shorLrunnlng requesLs, such as Lhose for whlch Lhe value ls cached, may Lake
an unexpecLedly long Llme because Lhey musL walL for prevlous longrunnlng requesLs Lo compleLe.
llgure 2.1 shows whaL happens when mulLlple requesLs arrlve for Lhe synchronlzed facLorlng servleL: Lhey queue up and
are handled sequenLlally. We would descrlbe Lhls web appllcaLlon as exhlblLlng poor concurrency: Lhe number of
slmulLaneous lnvocaLlons ls llmlLed noL by Lhe avallablllLy of processlng resources, buL by Lhe sLrucLure of Lhe
appllcaLlon lLself. lorLunaLely, lL ls easy Lo lmprove Lhe concurrency of Lhe servleL whlle malnLalnlng Lhread safeLy by
narrowlng Lhe scope of Lhe #7"*892"4:&; block. ?ou should be careful noL Lo make Lhe scope of Lhe #7"*892"4:&;
block Loo small, you would noL wanL Lo dlvlde an operaLlon LhaL should be aLomlc lnLo more Lhan one #7"*892"4:&;
block. 8uL lL ls reasonable Lo Lry Lo exclude from #7"*892"4:&; blocks longrunnlng operaLlons LhaL do noL affecL shared
sLaLe, so LhaL oLher Lhreads are noL prevenLed from accesslng Lhe shared sLaLe whlle Lhe longrunnlng operaLlon ls ln
progress.

21
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 148ChapLer 2. 1hread
SafeLy
S/4()* DA@A -%%) $%&'())*&'+ %6 '7"*892"4:&;<$*.294:&9A

D$*8&;<$*.294:&9 ln LlsLlng 2.8 resLrucLures Lhe servleL Lo use Lwo separaLe #7"*892"4:&; blocks, each llmlLed Lo a
shorL secLlon of code. Cne guards Lhe checkLhenacL sequence LhaL LesLs wheLher we can [usL reLurn Lhe cached resulL,
and Lhe oLher guards updaLlng boLh Lhe cached number and Lhe cached facLors. As a bonus, we've relnLroduced Lhe hlL
counLer and added a "cache hlL" counLer as well, updaLlng Lhem wlLhln Lhe lnlLlal #7"*892"4:&; block. 8ecause Lhese
counLers consLlLuLe shared muLable sLaLe as well, we musL use synchronlzaLlon everywhere Lhey are accessed. 1he
porLlons of code LhaL are ouLslde Lhe #7"*892"4:&; blocks operaLe excluslvely on local (sLackbased) varlables, whlch
are noL shared across Lhreads and Lherefore do noL requlre synchronlzaLlon.
2/3./&4 DA\A C*)#?*. .:". $"':*3 /.3 2"3. N*Q(*3. "&0 N*3(?.A
b=89&$;'$%&
G)M04* *0$## D$*8&;<$*.294:&9 43G0&3&".# '&9/0&. W
bH)$9;&;K7Ud.84#dV G94/$.& K46I".&6&9 0$#.,)3M&9[
bH)$9;&;K7Ud.84#dV G94/$.& K46I".&6&9fg 0$#.<$*.29#[
bH)$9;&;K7Ud.84#dV G94/$.& 02"6 84.#[
bH)$9;&;K7Ud.84#dV G94/$.& 02"6 *$*8&E4.#[

G)M04* #7"*892"4:&; 02"6 6&.E4.#UV W 9&.)9" 84.#[ \
G)M04* #7"*892"4:&; ;2)M0& 6&.D$*8&E4.N$.42UV W
9&.)9" U;2)M0&V *$*8&E4.# X U;2)M0&V 84.#[
\

G)M04* /24; #&9/4*&U'&9/0&.N&()&#. 9&(e '&9/0&.N&#G2"#& 9&#GV W
K46I".&6&9 4 ] &-.9$*.<923N&()&#.U9&(V[
K46I".&6&9fg %$*.29# ] ")00[
#7"*892"4:&; U.84#V W
__84.#[
4% U4+&()$0#U0$#.,)3M&9VV W
__*$*8&E4.#[
%$*.29# ] 0$#.<$*.29#+*02"&UV[
\
\
4% U%$*.29# ]] ")00V W
%$*.29# ] %$*.29U4V[
#7"*892"4:&; U.84#V W
0$#.,)3M&9 ] 4[
0$#.<$*.29# ] %$*.29#+*02"&UV[
\
\
&"*2;&I".2N&#G2"#&U9&#Ge %$*.29#V[
\
\
D$*8&;<$*.294:&9 no longer uses 1.234*52"6 for Lhe hlL counLer, lnsLead reverLlng Lo uslng a 02"6 fleld. lL would be
safe Lo use 1.234*52"6 here, buL Lhere ls less beneflL Lhan Lhere was ln D2)".4"6<$*.294:&9. ALomlc varlables are
useful for effecLlng aLomlc operaLlons on a slngle varlable, buL slnce we are already uslng #7"*892"4:&; blocks Lo
consLrucL aLomlc operaLlons, uslng Lwo dlfferenL synchronlzaLlon mechanlsms would be confuslng and would offer no
performance or safeLy beneflL.
1he resLrucLurlng of D$*8&;<$*.294:&9 provldes a balance beLween slmpllclLy (synchronlzlng Lhe enLlre meLhod) and
concurrency (synchronlzlng Lhe shorLesL posslble code paLhs). Acqulrlng and releaslng a lock has some overhead, so lL ls
undeslrable Lo break down #7"*892"4:&; blocks Loo far (such as facLorlng __84.# lnLo lLs own #7"*892"4:&; block),
even lf Lhls would noL compromlse aLomlclLy. D$*8&;<$*.294:&9 holds Lhe lock when accesslng sLaLe varlables and for
Lhe duraLlon of compound acLlons, buL releases lL before execuLlng Lhe poLenLlally longrunnlng facLorlzaLlon operaLlon.

22 !ava Concurrency ln racLlce
1hls preserves Lhread safeLy wlLhouL unduly affecLlng concurrency, Lhe code paLhs ln each of Lhe #7"*892"4:&; blocks
are "shorL enough".
uecldlng how blg or small Lo make #7"*892"4:&; blocks may requlre Lradeoffs among compeLlng deslgn forces,
lncludlng safeLy (whlch musL noL be compromlsed), slmpllclLy, and performance. SomeLlmes slmpllclLy and performance
are aL odds wlLh each oLher, alLhough as D$*8&;<$*.294:&9 lllusLraLes, a reasonable balance can usually be found.
1here ls frequenLly a Lenslon beLween slmpllclLy and performance. When lmplemenLlng a synchronlzaLlon pollcy, reslsL
Lhe LempLaLlon Lo premaLurely sacrlflce slmpllclLy (poLenLlally compromlslng safeLy) for Lhe sake of performance.
Whenever you use locklng, you should be aware of whaL Lhe code ln Lhe block ls dolng and how llkely lL ls Lo Lake a long
Llme Lo execuLe. Poldlng a lock for a long Llme, elLher because you are dolng someLhlng compuLelnLenslve or because
you execuLe a poLenLlally blocklng operaLlon, lnLroduces Lhe rlsk of llveness or performance problems.
Avold holdlng locks durlng lengLhy compuLaLlons or operaLlons aL rlsk of noL compleLlng qulckly such as neLwork or
console l/C.


23
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 138ChapLer 3. Sharlng
Cb[ecLs
$:">.*) LA C:")/&4 EY]*'.3
We sLaLed aL Lhe beglnnlng of ChapLer 2 LhaL wrlLlng correcL concurrenL programs ls prlmarlly abouL managlng access Lo
shared, muLable sLaLe. 1haL chapLer was abouL uslng synchronlzaLlon Lo prevenL mulLlple Lhreads from accesslng Lhe
same daLa aL Lhe same Llme, Lhls chapLer examlnes Lechnlques for sharlng and publlshlng ob[ecLs so Lhey can be safely
accessed by mulLlple Lhreads. 1ogeLher, Lhey lay Lhe foundaLlon for bulldlng Lhreadsafe classes and safely sLrucLurlng
concurrenL appllcaLlons uslng Lhe `$/$+).40+*2"*)99&". llbrary classes.
We have seen how #7"*892"4:&; blocks and meLhods can ensure LhaL operaLlons execuLe aLomlcally, buL lL ls a
common mlsconcepLlon LhaL #7"*892"4:&; ls only abouL aLomlclLy or demarcaLlng "crlLlcal secLlons". SynchronlzaLlon
also has anoLher slgnlflcanL, and subLle, aspecL: memory vlslblllLy. We wanL noL only Lo prevenL one Lhread from
modlfylng Lhe sLaLe of an ob[ecL when anoLher ls uslng lL, buL also Lo ensure LhaL when a Lhread modlfles Lhe sLaLe of an
ob[ecL, oLher Lhreads can acLually see Lhe changes LhaL were made. 8uL wlLhouL synchronlzaLlon, Lhls may noL happen.
?ou can ensure LhaL ob[ecLs are publlshed safely elLher by uslng expllclL synchronlzaLlon or by Laklng advanLage of Lhe
synchronlzaLlon bullL lnLo llbrary classes.
LA@A H/3/Y/?/.+
vlslblllLy ls subLle because Lhe Lhlngs LhaL can go wrong are so counLerlnLulLlve. ln a slngleLhreaded envlronmenL, lf you
wrlLe a value Lo a varlable and laLer read LhaL varlable wlLh no lnLervenlng wrlLes, you can expecL Lo geL Lhe same value
back. 1hls seems only naLural. lL may be hard Lo accepL aL flrsL, buL when Lhe reads and wrlLes occur ln dlfferenL Lhreads,
Lhls ls slmply noL Lhe case. ln general, Lhere ls no guaranLee LhaL Lhe readlng Lhread wlll see a value wrlLLen by anoLher
Lhread on a Llmely basls, or even aL all. ln order Lo ensure vlslblllLy of memory wrlLes across Lhreads, you musL use
synchronlzaLlon.
,2B4#4M404.7 ln LlsLlng 3.1 lllusLraLes whaL can go wrong when Lhreads share daLa wlLhouL synchronlzaLlon. 1wo
Lhreads, Lhe maln Lhread and Lhe reader Lhread, access Lhe shared varlables 9&$;7 and ")3M&9. 1he maln Lhread sLarLs
Lhe reader Lhread and Lhen seLs ")3M&9 Lo 42 and 9&$;7 Lo .9)&. 1he reader Lhread splns unLll lL sees 9&$;7 ls .9)&, and
Lhen prlnLs ouL ")3M&9. Whlle lL may seem obvlous LhaL ,2B4#4M404.7 wlll prlnL 42, lL ls ln facL posslble LhaL lL wlll prlnL
zero, or never LermlnaLe aL all! 8ecause lL does noL use adequaLe synchronlzaLlon, Lhere ls no guaranLee LhaL Lhe values
of 9&$;7 and ")3M&9 wrlLLen by Lhe maln Lhread wlll be vlslble Lo Lhe reader Lhread.
2/3./&4 LA@A C:")/&4 H")/"Y?*3 8/.:%(. C+&':)%&/O"./%&A !"#$% !" %&'()


G)M04* *0$## ,2B4#4M404.7 W
G94/$.& #.$.4* M220&$" 9&$;7[
G94/$.& #.$.4* 4". ")3M&9[

G94/$.& #.$.4* *0$## N&$;&9=89&$; &-.&";# =89&$; W
G)M04* /24; 9)"UV W
?840& UY9&$;7V
=89&$;+74&0;UV[
'7#.&3+2).+G94".0"U")3M&9V[
\
\

G)M04* #.$.4* /24; 3$4"U'.94"6fg $96#V W
"&? N&$;&9=89&$;UV+#.$9.UV[
")3M&9 ] ij[
9&$;7 ] .9)&[
\
\
,2B4#4M404.7 could loop forever because Lhe value of 9&$;7 mlghL never become vlslble Lo Lhe reader Lhread. Lven
more sLrangely, ,2B4#4M404.7 could prlnL zero because Lhe wrlLe Lo 9&$;7 mlghL be made vlslble Lo Lhe reader Lhread
before Lhe wrlLe Lo ")3M&9, a phenomenon known as reorderlng. 1here ls no guaranLee LhaL operaLlons ln one Lhread
wlll be performed ln Lhe order glven by Lhe program, as long as Lhe reorderlng ls noL deLecLable from wlLhln LhaL Lhread
even lf Lhe reorderlng ls apparenL Lo oLher Lhreads.
[1]
When Lhe maln Lhread wrlLes flrsL Lo ")3M&9 and Lhen Lo ;2"&
wlLhouL synchronlzaLlon, Lhe reader Lhread could see Lhose wrlLes happen ln Lhe opposlLe order or noL aL all.
[1] 1hls may seem llke a broken deslgn, buL lL ls meanL Lo allow !vMs Lo Lake full advanLage of Lhe performance of modern mulLlprocessor
hardware. lor example, ln Lhe absence of synchronlzaLlon, Lhe !ava Memory Model permlLs Lhe compller Lo reorder operaLlons and cache values ln
reglsLers, and permlLs Cus Lo reorder operaLlons and cache values ln processorspeclflc caches. lor more deLalls, see ChapLer 16.

24 !ava Concurrency ln racLlce
ln Lhe absence of synchronlzaLlon, Lhe compller, processor, and runLlme can do some downrlghL welrd Lhlngs Lo Lhe
order ln whlch operaLlons appear Lo execuLe. ALLempLs Lo reason abouL Lhe order ln whlch memory acLlons "musL"
happen ln lnsufflclenLly synchronlzed mulLlLhreaded programs wlll almosL cerLalnly be lncorrecL.
,2B4#4M404.7 ls abouL as slmple as a concurrenL program can geL Lwo Lhreads and Lwo shared varlables and yeL lL ls
sLlll all Loo easy Lo come Lo Lhe wrong concluslons abouL whaL lL does or even wheLher lL wlll LermlnaLe. 8easonlng
abouL lnsufflclenLly synchronlzed concurrenL programs ls prohlblLlvely dlfflculL.
1hls may all sound a llLLle scary, and lL should. lorLunaLely, Lhere's an easy way Lo avold Lhese complex lssues: always
use Lhe proper synchronlzaLlon whenever daLa ls shared across Lhreads.
LA@A@A C."?* ["."
,2B4#4M404.7 demonsLraLed one of Lhe ways LhaL lnsufflclenLly synchronlzed programs can cause surprlslng resulLs:
sLale daLa. When Lhe reader Lhread examlnes 9&$;7, lL may see an ouLofdaLe value. unless synchronlzaLlon ls used
every Llme a varlable ls accessed, lL ls posslble Lo see a sLale value for LhaL varlable. Worse, sLaleness ls noL allor
noLhlng: a Lhread can see an upLodaLe value of one varlable buL a sLale value of anoLher varlable LhaL was wrlLLen flrsL.
When food ls sLale, lL ls usually sLlll edlble [usL less en[oyable. 8uL sLale daLa can be more dangerous. Whlle an ouLof
daLe hlL counLer ln a web appllcaLlon mlghL noL be so bad,
[2]
sLale values can cause serlous safeLy or llveness fallures. ln
,2B4#4M404.7, sLale values could cause lL Lo prlnL Lhe wrong value or prevenL Lhe program from LermlnaLlng. 1hlngs can
geL even more compllcaLed wlLh sLale values of ob[ecL references, such as Lhe llnk polnLers ln a llnked llsL
lmplemenLaLlon. SLale daLa can cause serlous and confuslng fallures such as unexpecLed excepLlons, corrupLed daLa
sLrucLures, lnaccuraLe compuLaLlons, and lnflnlLe loops.
[2]
8eadlng daLa wlLhouL synchronlzaLlon ls analogous Lo uslng Lhe NL1Ak!,DJFFI==LA lsolaLlon level ln a daLabase, where you are wllllng Lo
Lrade accuracy for performance. Powever, ln Lhe case of unsynchronlzed reads, you are Lradlng away a greaLer degree of accuracy, slnce Lhe vlslble
value for a shared varlable can be arblLrarlly sLale.
F).$M0&I".&6&9 ln LlsLlng 3.2 ls noL Lhreadsafe because Lhe /$0)& fleld ls accessed from boLh 6&. and #&. wlLhouL
synchronlzaLlon. Among oLher hazards, lL ls suscepLlble Lo sLale values: lf one Lhread calls #&., oLher Lhreads calllng 6&.
may or may noL see LhaL updaLe.
We can make F).$M0&I".&6&9 Lhread safe by synchronlzlng Lhe geLLer and seLLer as shown ln '7"*892"4:&;I".&6&9 ln
LlsLlng 3.3. Synchronlzlng only Lhe seLLer would noL be sufflclenL: Lhreads calllng 6&. would sLlll be able Lo see sLale
values.
2/3./&4 LADA P%&.:)*"03"6* K(."Y?* ,&.*4*) 7%?0*)A

b,2.=89&$;'$%&
G)M04* *0$## F).$M0&I".&6&9 W
G94/$.& 4". /$0)&[

G)M04* 4". 6&.UV W 9&.)9" /$0)&[ \
G)M04* /24; #&.U4". /$0)&V W .84#+/$0)& ] /$0)&[ \
\
2/3./&4 LALA J:)*"03"6* K(."Y?* ,&.*4*) 7%?0*)A
b=89&$;'$%&
G)M04* *0$## '7"*892"4:&;I".&6&9 W
bH)$9;&;K7Ud.84#dV G94/$.& 4". /$0)&[

G)M04* #7"*892"4:&; 4". 6&.UV W 9&.)9" /$0)&[ \
G)M04* #7"*892"4:&; /24; #&.U4". /$0)&V W .84#+/$0)& ] /$0)&[ \
\
LA@ADA P%&".%5/' WMY/. E>*)"./%&3
When a Lhread reads a varlable wlLhouL synchronlzaLlon, lL may see a sLale value, buL aL leasL lL sees a value LhaL was
acLually placed Lhere by some Lhread raLher Lhan some random value. 1hls safeLy guaranLee ls called ouLofLhlnalr
safeLy.
CuLofLhlnalr safeLy applles Lo all varlables, wlLh one excepLlon: 64blL numerlc varlables (;2)M0& and 02"6) LhaL are
noL declared /20$.40& (see SecLlon 3.1.4). 1he !ava Memory Model requlres feLch and sLore operaLlons Lo be aLomlc,
buL for nonvolaLlle 02"6 and ;2)M0& varlables, Lhe !vM ls permlLLed Lo LreaL a 64blL read or wrlLe as Lwo separaLe 32
blL operaLlons. lf Lhe reads and wrlLes occur ln dlfferenL Lhreads, lL ls Lherefore posslble Lo read a nonvolaLlle 02"6 and

23
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 138ChapLer 3. Sharlng
Cb[ecLs
geL back Lhe hlgh 32 blLs of one value and Lhe low 32 blLs of anoLher.
[3]
1hus, even lf you don'L care abouL sLale values, lL
ls noL safe Lo use shared muLable 02"6 and ;2)M0& varlables ln mulLlLhreaded programs unless Lhey are declared
/20$.40& or guarded by a lock.
[3] When Lhe !ava vlrLual Machlne SpeclflcaLlon was wrlLLen, many wldely used processor archlLecLures could noL efflclenLly provlde aLomlc 64blL
arlLhmeLlc operaLlons.
LA@ALA 2%'</&4 "&0 H/3/Y/?/.+
lnLrlnslc locklng can be used Lo guaranLee LhaL one Lhread sees Lhe effecLs of anoLher ln a predlcLable manner, as
lllusLraLed by llgure 3.1. When Lhread A execuLes a #7"*892"4:&; block, and subsequenLly Lhread 8 enLers a
#7"*892"4:&; block guarded by Lhe same lock, Lhe values of varlables LhaL were vlslble Lo A prlor Lo releaslng Lhe lock
are guaranLeed Lo be vlslble Lo 8 upon acqulrlng Lhe lock. ln oLher words, everyLhlng A dld ln or prlor Lo a #7"*892"4:&;
block ls vlslble Lo 8 when lL execuLes a #7"*892"4:&; block guarded by Lhe same lock. WlLhouL synchronlzaLlon, Lhere ls
no such guaranLee.
S/4()* LA@A H/3/Y/?/.+ R(")"&.**3 6%) C+&':)%&/O"./%&A


We can now glve Lhe oLher reason for Lhe rule requlrlng all Lhreads Lo synchronlze on Lhe same lock when accesslng a
shared muLable varlable Lo guaranLee LhaL values wrlLLen by one Lhread are made vlslble Lo oLher Lhreads. CLherwlse,
lf a Lhread reads a varlable wlLhouL holdlng Lhe approprlaLe lock, lL mlghL see a sLale value.
Locklng ls noL [usL abouL muLual excluslon, lL ls also abouL memory vlslblllLy. 1o ensure LhaL all Lhreads see Lhe mosL up
LodaLe values of shared muLable varlables, Lhe readlng and wrlLlng Lhreads musL synchronlze on a common lock.
LA@AMA H%?"./?* H")/"Y?*3
1he !ava language also provldes an alLernaLlve, weaker form of synchronlzaLlon, volaLlle varlables, Lo ensure LhaL
updaLes Lo a varlable are propagaLed predlcLably Lo oLher Lhreads. When a fleld ls declared /20$.40&, Lhe compller and
runLlme are puL on noLlce LhaL Lhls varlable ls shared and LhaL operaLlons on lL should noL be reordered wlLh oLher
memory operaLlons. volaLlle varlables are noL cached ln reglsLers or ln caches where Lhey are hldden from oLher
processors, so a read of a volaLlle varlable always reLurns Lhe mosL recenL wrlLe by any Lhread.
A good way Lo Lhlnk abouL volaLlle varlables ls Lo lmaglne LhaL Lhey behave roughly llke Lhe '7"*892"4:&;I".&6&9 class
ln LlsLlng 3.3, replaclng reads and wrlLes of Lhe volaLlle varlable wlLh calls Lo 6&. and #&..
[4]
?eL accesslng a volaLlle
varlable performs no locklng and so cannoL cause Lhe execuLlng Lhread Lo block, maklng volaLlle varlables a llghLer
welghL synchronlzaLlon mechanlsm Lhan #7"*892"4:&;.
[3]


26 !ava Concurrency ln racLlce
[4] 1hls analogy ls noL exacL, Lhe memory vlslblllLy effecLs of SynchronlzedlnLeger are acLually sllghLly sLronger Lhan Lhose of volaLlle varlables. See
ChapLer 16.
[3] volaLlle reads are only sllghLly more expenslve Lhan nonvolaLlle reads on mosL currenL processor archlLecLures.
1he vlslblllLy effecLs of volaLlle varlables exLend beyond Lhe value of Lhe volaLlle varlable lLself. When Lhread A wrlLes Lo
a volaLlle varlable and subsequenLly Lhread 8 reads LhaL same varlable, Lhe values of all varlables LhaL were vlslble Lo A
prlor Lo wrlLlng Lo Lhe volaLlle varlable become vlslble Lo 8 afLer readlng Lhe volaLlle varlable. So from a memory vlslblllLy
perspecLlve, wrlLlng a volaLlle varlable ls llke exlLlng a #7"*892"4:&; block and readlng a volaLlle varlable ls llke enLerlng
a #7"*892"4:&; block. Powever, we do noL recommend relylng Loo heavlly on volaLlle varlables for vlslblllLy, code LhaL
relles on volaLlle varlables for vlslblllLy of arblLrary sLaLe ls more fraglle and harder Lo undersLand Lhan code LhaL uses
locklng.
use /20$.40& varlables only when Lhey slmpllfy lmplemenLlng and verlfylng your synchronlzaLlon pollcy, avold uslng
/20$.40& varlables when verlfylng correcLness would requlre subLle reasonlng abouL vlslblllLy. Cood uses of /20$.40&
varlables lnclude ensurlng Lhe vlslblllLy of Lhelr own sLaLe, LhaL of Lhe ob[ecL Lhey refer Lo, or lndlcaLlng LhaL an
lmporLanL llfecycle evenL (such as lnlLlallzaLlon or shuLdown) has occurred.
LlsLlng 3.4 lllusLraLes a Lyplcal use of volaLlle varlables: checklng a sLaLus flag Lo deLermlne when Lo exlL a loop. ln Lhls
example, our anLhropomorphlzed Lhread ls Lrylng Lo geL Lo sleep by Lhe Llmehonored meLhod of counLlng sheep. lor
Lhls example Lo work, Lhe $#0&&G flag musL be volaLlle. CLherwlse, Lhe Lhread mlghL noL noLlce when $#0&&G has been
seL by anoLher Lhread.
[6]
We could lnsLead have used locklng Lo ensure vlslblllLy of changes Lo $#0&&G, buL LhaL would
have made Lhe code more cumbersome.
[6]
uebugglng Llp: lor server appllcaLlons, be sure Lo always speclfy Lhe a#&9/&9 !vM command llne swlLch when lnvoklng Lhe !vM, even for
developmenL and LesLlng. 1he server !vM performs more opLlmlzaLlon Lhan Lhe cllenL !vM, such as holsLlng varlables ouL of a loop LhaL are noL
modlfled ln Lhe loop, code LhaL mlghL appear Lo work ln Lhe developmenL envlronmenL (cllenL !vM) can break ln Lhe deploymenL envlronmenL
(server !vM). lor example, had we "forgoLLen" Lo declare Lhe varlable $#0&&G as /20$.40& ln LlsLlng 3.4, Lhe server !vM could holsL Lhe LesL ouL
of Lhe loop (Lurnlng lL lnLo an lnflnlLe loop), buL Lhe cllenL !vM would noL. An lnflnlLe loop LhaL shows up ln developmenL ls far less cosLly Lhan one
LhaL only shows up ln producLlon.
2/3./&4 LAMA $%(&./&4 C:**>A
/20$.40& M220&$" $#0&&G[
+++
?840& UY$#0&&GV
*2)".'23&'8&&GUV[
volaLlle varlables are convenlenL, buL Lhey have llmlLaLlons. 1he mosL common use for volaLlle varlables ls as a
compleLlon, lnLerrupLlon, or sLaLus flag, such as Lhe $#0&&G flag ln LlsLlng 3.4. volaLlle varlables can be used for oLher
klnds of sLaLe lnformaLlon, buL more care ls requlred when aLLempLlng Lhls. lor example, Lhe semanLlcs of /20$.40& are
noL sLrong enough Lo make Lhe lncremenL operaLlon (*2)".__) aLomlc, unless you can guaranLee LhaL Lhe varlable ls
wrlLLen only from a slngle Lhread. (ALomlc varlables do provlde aLomlc readmodlfywrlLe supporL and can ofLen be used
as "beLLer volaLlle varlables", see ChapLer 13.)
Locklng can guaranLee boLh vlslblllLy and aLomlclLy, volaLlle varlables can only guaranLee vlslblllLy.
?ou can use volaLlle varlables only when all Lhe followlng crlLerla are meL:
WrlLes Lo Lhe varlable do noL depend on lLs currenL value, or you can ensure LhaL only a slngle Lhread ever
updaLes Lhe value,
1he varlable does noL parLlclpaLe ln lnvarlanLs wlLh oLher sLaLe varlables, and
Locklng ls noL requlred for any oLher reason whlle Lhe varlable ls belng accessed.
LADA -(Y?/'"./%& "&0 =3'">*
ubllshlng an ob[ecL means maklng lL avallable Lo code ouLslde of lLs currenL scope, such as by sLorlng a reference Lo lL
where oLher code can flnd lL, reLurnlng lL from a nonprlvaLe meLhod, or passlng lL Lo a meLhod ln anoLher class. ln many
slLuaLlons, we wanL Lo ensure LhaL ob[ecLs and Lhelr lnLernals are noL publlshed. ln oLher slLuaLlons, we do wanL Lo
publlsh an ob[ecL for general use, buL dolng so ln a Lhreadsafe manner may requlre synchronlzaLlon. ubllshlng lnLernal
sLaLe varlables can compromlse encapsulaLlon and make lL more dlfflculL Lo preserve lnvarlanLs, publlshlng ob[ecLs
before Lhey are fully consLrucLed can compromlse Lhread safeLy. An ob[ecL LhaL ls publlshed when lL should noL have
been ls sald Lo have escaped. SecLlon 3.3 covers ldloms for safe publlcaLlon, rlghL now, we look aL how an ob[ecL can
escape.

27
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 138ChapLer 3. Sharlng
Cb[ecLs
1he mosL blaLanL form of publlcaLlon ls Lo sLore a reference ln a publlc sLaLlc fleld, where any class and Lhread could see
lL, as ln LlsLlng 3.3. 1he 4"4.4$04:& meLhod lnsLanLlaLes a new E$#8'&. and publlshes lL by sLorlng a reference Lo lL lnLo
C"2?"'&*9&.#.
2/3./&4 LAVA -(Y?/3:/&4 "& EY]*'.A
G)M04* #.$.4* '&.R'&*9&.T C"2?"'&*9&.#[

G)M04* /24; 4"4.4$04:&UV W
C"2?"'&*9&.# ] "&? E$#8'&.R'&*9&.TUV[
\
ubllshlng one ob[ecL may lndlrecLly publlsh oLhers. lf you add a '&*9&. Lo Lhe publlshed C"2?"'&*9&.# seL, you've also
publlshed LhaL '&*9&., because any code can lLeraLe Lhe '&. and obLaln a reference Lo Lhe new '&*9&.. Slmllarly,
reLurnlng a reference from a nonprlvaLe meLhod also publlshes Lhe reLurned ob[ecL. !"#$%&'.$.&# ln LlsLlng 3.6
publlshes Lhe supposedly prlvaLe array of sLaLe abbrevlaLlons.
2/3./&4 LAWA F??%8/&4 ,&.*)&"? K(."Y?* C.".* .% =3'">*A !"#$% !" %&'()

*0$## !"#$%&'.$.&# W
G94/$.& '.94"6fg #.$.&# ] "&? '.94"6fg W
d1lde d15d +++
\[
G)M04* '.94"6fg 6&.'.$.&#UV W 9&.)9" #.$.&#[ \
\
ubllshlng #.$.&# ln Lhls way ls problemaLlc because any caller can modlfy lLs conLenLs. ln Lhls case, Lhe #.$.&# array
has escaped lLs lnLended scope, because whaL was supposed Lo be prlvaLe sLaLe has been effecLlvely made publlc.
ubllshlng an ob[ecL also publlshes any ob[ecLs referred Lo by lLs nonprlvaLe flelds. More generally, any ob[ecL LhaL ls
reachable from a publlshed ob[ecL by followlng some chaln of nonprlvaLe fleld references and meLhod calls has also
been publlshed.
lrom Lhe perspecLlve of a class C, an allen meLhod ls one whose behavlor ls noL fully speclfled by C. 1hls lncludes
meLhods ln oLher classes as well as overrldeable meLhods (nelLher G94/$.& nor %4"$0) ln C lLself. asslng an ob[ecL Lo an
allen meLhod musL also be consldered publlshlng LhaL ob[ecL. Slnce you can'L know whaL code wlll acLually be lnvoked,
you don'L know LhaL Lhe allen meLhod won'L publlsh Lhe ob[ecL or reLaln a reference Lo lL LhaL mlghL laLer be used from
anoLher Lhread.
WheLher anoLher Lhread acLually does someLhlng wlLh a publlshed reference doesn'L really maLLer, because Lhe rlsk of
mlsuse ls sLlll presenL.
[7]
Cnce an ob[ecL escapes, you have Lo assume LhaL anoLher class or Lhread may, mallclously or
carelessly, mlsuse lL. 1hls ls a compelllng reason Lo use encapsulaLlon: lL makes lL pracLlcal Lo analyze programs for
correcLness and harder Lo vlolaLe deslgn consLralnLs accldenLally.
[7] lf someone sLeals your password and posLs lL on Lhe alL.freepasswords newsgroup, LhaL lnformaLlon has escaped: wheLher or noL someone has
(yeL) used Lhose credenLlals Lo creaLe mlschlef, your accounL has sLlll been compromlsed. ubllshlng a reference poses Lhe same sorL of rlsk.
A flnal mechanlsm by whlch an ob[ecL or lLs lnLernal sLaLe can be publlshed ls Lo publlsh an lnner class lnsLance, as
shown ln =84#L#*$G& ln LlsLlng 3.7. When =84#L#*$G& publlshes Lhe L/&".54#.&"&9, lL lmpllclLly publlshes Lhe
encloslng =84#L#*$G& lnsLance as well, because lnner class lnsLances conLaln a hldden reference Lo Lhe encloslng
lnsLance.

28 !ava Concurrency ln racLlce
2/3./&4 LAZA ,5>?/'/.?+ F??%8/&4 .:* .84# N*6*)*&'* .% =3'">*A !"#$% !" %&'()


G)M04* *0$## =84#L#*$G& W
G)M04* =84#L#*$G&UL/&".'2)9*& #2)9*&V W
#2)9*&+9&64#.&954#.&"&9U
"&? L/&".54#.&"&9UV W
G)M04* /24; 2"L/&".UL/&". &V W
;2'23&.84"6U&V[
\
\V[
\
\
LADA@A C"6* $%&3.)('./%& -)"'./'*3
=84#L#*$G& lllusLraLes an lmporLanL speclal case of escape when Lhe .84# references escapes durlng consLrucLlon.
When Lhe lnner L/&".54#.&"&9 lnsLance ls publlshed, so ls Lhe encloslng =84#L#*$G& lnsLance. 8uL an ob[ecL ls ln a
predlcLable, conslsLenL sLaLe only afLer lLs consLrucLor reLurns, so publlshlng an ob[ecL from wlLhln lLs consLrucLor can
publlsh an lncompleLely consLrucLed ob[ecL. 1hls ls Lrue even lf Lhe publlcaLlon ls Lhe lasL sLaLemenL ln Lhe consLrucLor. lf
Lhe .84# reference escapes durlng consLrucLlon, Lhe ob[ecL ls consldered noL properly consLrucLed.
[8]

[8] More speclflcally, Lhe Lhls reference should noL escape from Lhe Lhread unLll afLer Lhe consLrucLor reLurns. 1he .84# reference can be sLored
somewhere by Lhe consLrucLor as long as lL ls noL used by anoLher Lhread unLll afLer consLrucLlon. SafeLlsLener ln LlsLlng 3.8 uses Lhls Lechnlque.
uo noL allow Lhe .84# reference Lo escape durlng consLrucLlon.
A common mlsLake LhaL can leL Lhe .84# reference escape durlng consLrucLlon ls Lo sLarL a Lhread from a consLrucLor.
When an ob[ecL creaLes a Lhread from lLs consLrucLor, lL almosL always shares lLs .84# reference wlLh Lhe new Lhread,
elLher expllclLly (by passlng lL Lo Lhe consLrucLor) or lmpllclLly (because Lhe =89&$; or N)""$M0& ls an lnner class of Lhe
ownlng ob[ecL). 1he new Lhread mlghL Lhen be able Lo see Lhe ownlng ob[ecL before lL ls fully consLrucLed. 1here's
noLhlng wrong wlLh creaLlng a Lhread ln a consLrucLor, buL lL ls besL noL Lo sLarL Lhe Lhread lmmedlaLely. lnsLead, expose
a #.$9. or 4"4.4$04:& meLhod LhaL sLarLs Lhe owned Lhread. (See ChapLer 7 for more on servlce llfecycle lssues.)
Calllng an overrldeable lnsLance meLhod (one LhaL ls nelLher G94/$.& nor %4"$0) from Lhe consLrucLor can also allow Lhe
.84# reference Lo escape.
lf you are LempLed Lo reglsLer an evenL llsLener or sLarL a Lhread from a consLrucLor, you can avold Lhe lmproper
consLrucLlon by uslng a prlvaLe consLrucLor and a publlc facLory meLhod, as shown ln '$%&54#.&"&9 ln LlsLlng 3.8.
2/3./&4 LA\A 93/&4 " S"'.%)+ K*.:%0 .% -)*#*&. .:* .84# N*6*)*&'* 6)%5 =3'">/&4 [()/&4 $%&3.)('./%&A
G)M04* *0$## '$%&54#.&"&9 W
G94/$.& %4"$0 L/&".54#.&"&9 04#.&"&9[

G94/$.& '$%&54#.&"&9UV W
04#.&"&9 ] "&? L/&".54#.&"&9UV W
G)M04* /24; 2"L/&".UL/&". &V W
;2'23&.84"6U&V[
\
\[
\

G)M04* #.$.4* '$%&54#.&"&9 "&?I"#.$"*&UL/&".'2)9*& #2)9*&V W
'$%&54#.&"&9 #$%& ] "&? '$%&54#.&"&9UV[
#2)9*&+9&64#.&954#.&"&9U#$%&+04#.&"&9V[
9&.)9" #$%&[
\
\
LALA J:)*"0 $%&6/&*5*&.
Accesslng shared, muLable daLa requlres uslng synchronlzaLlon, one way Lo avold Lhls requlremenL ls Lo noL share. lf
daLa ls only accessed from a slngle Lhread, no synchronlzaLlon ls needed. 1hls Lechnlque, Lhread conflnemenL, ls one of
Lhe slmplesL ways Lo achleve Lhread safeLy. When an ob[ecL ls conflned Lo a Lhread, such usage ls auLomaLlcally Lhread
safe even lf Lhe conflned ob[ecL lLself ls noL [C! 2.3.2].
Swlng uses Lhread conflnemenL exLenslvely. 1he Swlng vlsual componenLs and daLa model ob[ecLs are noL Lhread safe,
lnsLead, safeLy ls achleved by conflnlng Lhem Lo Lhe Swlng evenL dlspaLch Lhread. 1o use Swlng properly, code runnlng ln
Lhreads oLher Lhan Lhe evenL Lhread should noL access Lhese ob[ecLs. (1o make Lhls easler, Swlng provldes Lhe

29
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 138ChapLer 3. Sharlng
Cb[ecLs
4"/2C&5$.&9 mechanlsm Lo schedule a N)""$M0& for execuLlon ln Lhe evenL Lhread.) Many concurrency errors ln Swlng
appllcaLlons sLem from lmproper use of Lhese conflned ob[ecLs from anoLher Lhread.
AnoLher common appllcaLlon of Lhread conflnemenL ls Lhe use of pooled !u8C (!ava uaLabase ConnecLlvlLy) D2""&*.42"
ob[ecLs. 1he !u8C speclflcaLlon does noL requlre LhaL D2""&*.42" ob[ecLs be Lhreadsafe.
[9]
ln Lyplcal server appllcaLlons,
a Lhread acqulres a connecLlon from Lhe pool, uses lL for processlng a slngle requesL, and reLurns lL. Slnce mosL requesLs,
such as servleL requesLs or L!8 (LnLerprlse !ava8eans) calls, are processed synchronously by a slngle Lhread, and Lhe
pool wlll noL dlspense Lhe same connecLlon Lo anoLher Lhread unLll lL has been reLurned, Lhls paLLern of connecLlon
managemenL lmpllclLly conflnes Lhe D2""&*.42" Lo LhaL Lhread for Lhe duraLlon of Lhe requesL.
[9] 1he connecLlon pool lmplemenLaLlons provlded by appllcaLlon servers are Lhreadsafe, connecLlon pools are necessarlly accessed from mulLlple
Lhreads, so a nonLhreadsafe lmplemenLaLlon would noL make sense.
!usL as Lhe language has no mechanlsm for enforclng LhaL a varlable ls guarded by a lock, lL has no means of conflnlng an
ob[ecL Lo a Lhread. 1hread conflnemenL ls an elemenL of your program's deslgn LhaL musL be enforced by lLs
lmplemenLaLlon. 1he language and core llbrarles provlde mechanlsms LhaL can help ln malnLalnlng Lhread conflnemenL
local varlables and Lhe =89&$;52*$0 class buL even wlLh Lhese, lL ls sLlll Lhe programmer's responslblllLy Lo ensure LhaL
Lhreadconflned ob[ecLs do noL escape from Lhelr lnLended Lhread.
LALA@A F0:%' J:)*"0 $%&6/&*5*&.
Adhoc Lhread conflnemenL descrlbes when Lhe responslblllLy for malnLalnlng Lhread conflnemenL falls enLlrely on Lhe
lmplemenLaLlon. Adhoc Lhread conflnemenL can be fraglle because none of Lhe language feaLures, such as vlslblllLy
modlflers or local varlables, helps conflne Lhe ob[ecL Lo Lhe LargeL Lhread. ln facL, references Lo Lhreadconflned ob[ecLs
such as vlsual componenLs or daLa models ln Cul appllcaLlons are ofLen held ln publlc flelds.
1he declslon Lo use Lhread conflnemenL ls ofLen a consequence of Lhe declslon Lo lmplemenL a parLlcular subsysLem,
such as Lhe Cul, as a slngleLhreaded subsysLem. SlngleLhreaded subsysLems can someLlmes offer a slmpllclLy beneflL
LhaL ouLwelghs Lhe fraglllLy of adhoc Lhread conflnemenL.
[10]

[10] AnoLher reason Lo make a subsysLem slngleLhreaded ls deadlock avoldance, Lhls ls one of Lhe prlmary reasons mosL Cul frameworks are
slngleLhreaded. SlngleLhreaded subsysLems are covered ln ChapLer 9.
A speclal case of Lhread conflnemenL applles Lo volaLlle varlables. lL ls safe Lo perform readmodlfywrlLe operaLlons on
shared volaLlle varlables as long as you ensure LhaL Lhe volaLlle varlable ls only wrlLLen from a slngle Lhread. ln Lhls case,
you are conflnlng Lhe modlflcaLlon Lo a slngle Lhread Lo prevenL race condlLlons, and Lhe vlslblllLy guaranLees for volaLlle
varlables ensure LhaL oLher Lhreads see Lhe mosL upLodaLe value.
8ecause of lLs fraglllLy, adhoc Lhread conflnemenL should be used sparlngly, lf posslble, use one of Lhe sLronger forms of
Lhread conflnemenL (sLack conflnemenL or =89&$;52*$0) lnsLead.
LALADA C."'< $%&6/&*5*&.
SLack conflnemenL ls a speclal case of Lhread conflnemenL ln whlch an ob[ecL can only be reached Lhrough local
varlables. !usL as encapsulaLlon can make lL easler Lo preserve lnvarlanLs, local varlables can make lL easler Lo conflne
ob[ecLs Lo a Lhread. Local varlables are lnLrlnslcally conflned Lo Lhe execuLlng Lhread, Lhey exlsL on Lhe execuLlng
Lhread's sLack, whlch ls noL accesslble Lo oLher Lhreads. SLack conflnemenL (also called wlLhlnLhread or Lhreadlocal
usage, buL noL Lo be confused wlLh Lhe =89&$;52*$0 llbrary class) ls slmpler Lo malnLaln and less fraglle Lhan adhoc
Lhread conflnemenL.
lor prlmlLlvely Lyped local varlables, such as ")3@$49# ln 02$;=8&19C ln LlsLlng 3.9, you cannoL vlolaLe sLack
conflnemenL even lf you Lrled. 1here ls no way Lo obLaln a reference Lo a prlmlLlve varlable, so Lhe language semanLlcs
ensure LhaL prlmlLlve local varlables are always sLack conflned.

30 !ava Concurrency ln racLlce
2/3./&4 LA^A J:)*"0 $%&6/&*5*&. %6 2%'"? -)/5/./#* "&0 N*6*)*&'* H")/"Y?*3A
G)M04* 4". 02$;=8&19CUD200&*.42"R1"43$0T *$";4;$.&#V W
'29.&;'&.R1"43$0T $"43$0#[
4". ")3@$49# ] Z[
1"43$0 *$";4;$.& ] ")00[

XX $"43$0# *2"%4"&; .2 3&.82;e ;2"m. 0&. .8&3 &#*$G&Y
$"43$0# ] "&? =9&&'&.R1"43$0TU"&? 'G&*4&#H&";&9D23G$9$.29UVV[
$"43$0#+$;;100U*$";4;$.&#V[
%29 U1"43$0 $ h $"43$0#V W
4% U*$";4;$.& ]] ")00 nn Y*$";4;$.&+4#@2.&".4$0F$.&U$VV
*$";4;$.& ] $[
&0#& W
$9C+02$;U"&? 1"43$0@$49U*$";4;$.&e $VV[
__")3@$49#[
*$";4;$.& ] ")00[
\
\
9&.)9" ")3@$49#[
\
MalnLalnlng sLack conflnemenL for ob[ecL references requlres a llLLle more asslsLance from Lhe programmer Lo ensure
LhaL Lhe referenL does noL escape. ln 02$;=8&19C, we lnsLanLlaLe a .9&&'&. and sLore a reference Lo lL ln $"43$0#. AL
Lhls polnL, Lhere ls exacLly one reference Lo Lhe '&., held ln a local varlable and Lherefore conflned Lo Lhe execuLlng
Lhread. Powever, lf we were Lo publlsh a reference Lo Lhe '&. (or any of lLs lnLernals), Lhe conflnemenL would be
vlolaLed and Lhe anlmals would escape.
uslng a nonLhreadsafe ob[ecL ln a wlLhlnLhread conLexL ls sLlll Lhreadsafe. Powever, be careful: Lhe deslgn
requlremenL LhaL Lhe ob[ecL be conflned Lo Lhe execuLlng Lhread, or Lhe awareness LhaL Lhe conflned ob[ecL ls noL
Lhreadsafe, ofLen exlsLs only ln Lhe head of Lhe developer when Lhe code ls wrlLLen. lf Lhe assumpLlon of wlLhlnLhread
usage ls noL clearly documenLed, fuLure malnLalners mlghL mlsLakenly allow Lhe ob[ecL Lo escape.
LALALA J:)*"02%'"?
A more formal means of malnLalnlng Lhread conflnemenL ls =89&$;52*$0, whlch allows you Lo assoclaLe a perLhread
value wlLh a valueholdlng ob[ecL. =89&$;a52*$0 provldes 6&. and #&. accessor meLhods LhaL malnLaln a separaLe copy
of Lhe value for each Lhread LhaL uses lL, so a 6&. reLurns Lhe mosL recenL value passed Lo #&. from Lhe currenLly
execuLlng Lhread.
1hreadlocal varlables are ofLen used Lo prevenL sharlng ln deslgns based on muLable SlngleLons or global varlables. lor
example, a slngleLhreaded appllcaLlon mlghL malnLaln a global daLabase connecLlon LhaL ls lnlLlallzed aL sLarLup Lo avold
havlng Lo pass a D2""&*.42" Lo every meLhod. Slnce !u8C connecLlons may noL be Lhreadsafe, a mulLlLhreaded
appllcaLlon LhaL uses a global connecLlon wlLhouL addlLlonal coordlnaLlon ls noL Lhreadsafe elLher. 8y uslng a
=89&$;52*$0 Lo sLore Lhe !u8C connecLlon, as ln D2""&*.42"E20;&9 ln LlsLlng 3.10, each Lhread wlll have lLs own
connecLlon.
2/3./&4 LA@_A 93/&4 =89&$;52*$0 .% =&3()* .:)*"0 $%&6/&*5*&.A
G94/$.& #.$.4* =89&$;52*$0RD2""&*.42"T *2""&*.42"E20;&9
] "&? =89&$;52*$0RD2""&*.42"TUV W
G)M04* D2""&*.42" 4"4.4$0B$0)&UV W
9&.)9" A94/&9F$"$6&9+6&.D2""&*.42"UAKk!N5V[
\
\[

G)M04* #.$.4* D2""&*.42" 6&.D2""&*.42"UV W
9&.)9" *2""&*.42"E20;&9+6&.UV[
\
1hls Lechnlque can also be used when a frequenLly used operaLlon requlres a Lemporary ob[ecL such as a buffer and
wanLs Lo avold reallocaLlng Lhe Lemporary ob[ecL on each lnvocaLlon. lor example, before !ava 3.0, I".&6&9+.2'.94"6
used a =89&$;52*$0 Lo sLore Lhe 12byLe buffer used for formaLLlng lLs resulL, raLher Lhan uslng a shared sLaLlc buffer
(whlch would requlre locklng) or allocaLlng a new buffer for each lnvocaLlon.
[11]

[11] 1hls Lechnlque ls unllkely Lo be a performance wln unless Lhe operaLlon ls performed very frequenLly or Lhe allocaLlon ls unusually expenslve.
ln !ava 3.0, lL was replaced wlLh Lhe more sLralghLforward approach of allocaLlng a new buffer for every lnvocaLlon, suggesLlng LhaL for someLhlng
as mundane as a Lemporary buffer, lL ls noL a performance wln.
When a Lhread calls =89&$;52*$0+6&. for Lhe flrsL Llme, 4"4.4$0B$0)& ls consulLed Lo provlde Lhe lnlLlal value for LhaL
Lhread. ConcepLually, you can Lhlnk of a =89&$;52*$0R=T as holdlng a F$GR=89&$;e=T LhaL sLores Lhe Lhreadspeclflc
values, Lhough Lhls ls noL how lL ls acLually lmplemenLed. 1he Lhreadspeclflc values are sLored ln Lhe =89&$; ob[ecL
lLself, when Lhe Lhread LermlnaLes, Lhe Lhreadspeclflc values can be garbage collecLed.

31
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 138ChapLer 3. Sharlng
Cb[ecLs
lf you are porLlng a slngleLhreaded appllcaLlon Lo a mulLlLhreaded envlronmenL, you can preserve Lhread safeLy by
converLlng shared global varlables lnLo =89&$;52*$0s, lf Lhe semanLlcs of Lhe shared globals permlLs Lhls, an appllcaLlon
wlde cache would noL be as useful lf lL were Lurned lnLo a number of Lhreadlocal caches.
=89&$;52*$0 ls wldely used ln lmplemenLlng appllcaLlon frameworks. lor example, !2LL conLalners assoclaLe a
LransacLlon conLexL wlLh an execuLlng Lhread for Lhe duraLlon of an L!8 call. 1hls ls easlly lmplemenLed uslng a sLaLlc
=89&$;a52*$0 holdlng Lhe LransacLlon conLexL: when framework code needs Lo deLermlne whaL LransacLlon ls currenLly
runnlng, lL feLches Lhe LransacLlon conLexL from Lhls =89&$;52*$0. 1hls ls convenlenL ln LhaL lL reduces Lhe need Lo pass
execuLlon conLexL lnformaLlon lnLo every meLhod, buL couples any code LhaL uses Lhls mechanlsm Lo Lhe framework.
lL ls easy Lo abuse =89&$;52*$0 by LreaLlng lLs Lhread conflnemenL properLy as a llcense Lo use global varlables or as a
means of creaLlng "hldden" meLhod argumenLs. Llke global varlables, Lhreadlocal varlables can deLracL from reusablllLy
and lnLroduce hldden coupllngs among classes, and should Lherefore be used wlLh care.
LAMA ,55(."Y/?/.+
1he oLher endrun around Lhe need Lo synchronlze ls Lo use lmmuLable ob[ecLs [L! lLem 13]. nearly all Lhe aLomlclLy and
vlslblllLy hazards we've descrlbed so far, such as seelng sLale values, loslng updaLes, or observlng an ob[ecL Lo be ln an
lnconslsLenL sLaLe, have Lo do wlLh Lhe vagarles of mulLlple Lhreads Lrylng Lo access Lhe same muLable sLaLe aL Lhe same
Llme. lf an ob[ecL's sLaLe cannoL be modlfled, Lhese rlsks and complexlLles slmply go away.
An lmmuLable ob[ecL ls one whose sLaLe cannoL be changed afLer consLrucLlon. lmmuLable ob[ecLs are lnherenLly
Lhreadsafe, Lhelr lnvarlanLs are esLabllshed by Lhe consLrucLor, and lf Lhelr sLaLe cannoL be changed, Lhese lnvarlanLs
always hold.
lmmuLable ob[ecLs are always Lhreadsafe.
lmmuLable ob[ecLs are slmple. 1hey can only be ln one sLaLe, whlch ls carefully conLrolled by Lhe consLrucLor. Cne of Lhe
mosL dlfflculL elemenLs of program deslgn ls reasonlng abouL Lhe posslble sLaLes of complex ob[ecLs. 8easonlng abouL
Lhe sLaLe of lmmuLable ob[ecLs, on Lhe oLher hand, ls Lrlvlal.
lmmuLable ob[ecLs are also safer. asslng a muLable ob[ecL Lo unLrusLed code, or oLherwlse publlshlng lL where
unLrusLed code could flnd lL, ls dangerous Lhe unLrusLed code mlghL modlfy lLs sLaLe, or, worse, reLaln a reference Lo lL
and modlfy lLs sLaLe laLer from anoLher Lhread. Cn Lhe oLher hand, lmmuLable ob[ecLs cannoL be subverLed ln Lhls
manner by mallclous or buggy code, so Lhey are safe Lo share and publlsh freely wlLhouL Lhe need Lo make defenslve
coples [L! lLem 24].
nelLher Lhe !ava Language SpeclflcaLlon nor Lhe !ava Memory Model formally deflnes lmmuLablllLy, buL lmmuLablllLy ls
noL equlvalenL Lo slmply declarlng all flelds of an ob[ecL %4"$0. An ob[ecL whose flelds are all flnal may sLlll be muLable,
slnce flnal flelds can hold references Lo muLable ob[ecLs.
An ob[ecL ls lmmuLable lf:
lLs sLaLe cannoL be modlfled afLer consLrucLlon,
All lLs flelds are %4"$0,
[12]
and
lL ls properly consLrucLed (Lhe .84# reference does noL escape durlng consLrucLlon).
[12] lL ls Lechnlcally posslble Lo have an lmmuLable ob[ecL wlLhouL all flelds belng flnal. SLrlng ls such a class buL Lhls relles on dellcaLe reasonlng
abouL benlgn daLa races LhaL requlres a deep undersLandlng of Lhe !ava Memory Model. (lor Lhe curlous: SLrlng lazlly compuLes Lhe hash code Lhe
flrsL Llme hashCode ls called and caches lL ln a nonflnal fleld, buL Lhls works only because LhaL fleld can Lake on only one nondefaulL value LhaL ls
Lhe same every Llme lL ls compuLed because lL ls derlved deLermlnlsLlcally from lmmuLable sLaLe. uon'L Lry Lhls aL home.)
lmmuLable ob[ecLs can sLlll use muLable ob[ecLs lnLernally Lo manage Lhelr sLaLe, as lllusLraLed by =89&&'.226&# ln
LlsLlng 3.11. Whlle Lhe '&. LhaL sLores Lhe names ls muLable, Lhe deslgn of =89&&'.226&# makes lL lmposslble Lo modlfy
LhaL '&. afLer consLrucLlon. 1he #.226&# reference ls %4"$0, so all ob[ecL sLaLe ls reached Lhrough a %4"$0 fleld. 1he lasL
requlremenL, proper consLrucLlon, ls easlly meL slnce Lhe consLrucLor does noLhlng LhaL would cause Lhe .84# reference
Lo become accesslble Lo code oLher Lhan Lhe consLrucLor and lLs caller.

32 !ava Concurrency ln racLlce
2/3./&4 LA@@A ,55(."Y?* $?"33 ;(/?. E(. %6 K(."Y?* 9&0*)?+/&4 EY]*'.3A
bI33).$M0&
G)M04* %4"$0 *0$## =89&&'.226&# W
G94/$.& %4"$0 '&.R'.94"6T #.226&# ] "&? E$#8'&.R'.94"6TUV[

G)M04* =89&&'.226&#UV W
#.226&#+$;;UdF2&dV[
#.226&#+$;;Ud5$997dV[
#.226&#+$;;UdD)907dV[
\

G)M04* M220&$" 4#'.226&U'.94"6 "$3&V W
9&.)9" #.226&#+*2".$4"#U"$3&V[
\
\
8ecause program sLaLe changes all Lhe Llme, you mlghL be LempLed Lo Lhlnk LhaL lmmuLable ob[ecLs are of llmlLed use,
buL Lhls ls noL Lhe case. 1here ls a dlfference beLween an ob[ecL belng lmmuLable and Lhe reference Lo lL belng
lmmuLable. rogram sLaLe sLored ln lmmuLable ob[ecLs can sLlll be updaLed by "replaclng" lmmuLable ob[ecLs wlLh a new
lnsLance holdlng new sLaLe, Lhe nexL secLlon offers an example of Lhls Lechnlque.
[13]

[13] Many developers fear LhaL Lhls approach wlll creaLe performance problems, buL Lhese fears are usually unwarranLed. AllocaLlon ls cheaper
Lhan you mlghL Lhlnk, and lmmuLable ob[ecLs offer addlLlonal performance advanLages such as reduced need for locklng or defenslve coples and
reduced lmpacL on generaLlonal garbage collecLlon.
LAMA@A S/&"? S/*?03
1he %4"$0 keyword, a more llmlLed verslon of Lhe *2"#. mechanlsm from C++, supporLs Lhe consLrucLlon of lmmuLable
ob[ecLs. llnal flelds can'L be modlfled (alLhough Lhe ob[ecLs Lhey refer Lo can be modlfled lf Lhey are muLable), buL Lhey
also have speclal semanLlcs under Lhe !ava Memory Model. lL ls Lhe use of flnal flelds LhaL makes posslble Lhe guaranLee
of lnlLlallzaLlon safeLy (see SecLlon 3.3.2) LhaL leLs lmmuLable ob[ecLs be freely accessed and shared wlLhouL
synchronlzaLlon.
Lven lf an ob[ecL ls muLable, maklng some flelds %4"$0 can sLlll slmpllfy reasonlng abouL lLs sLaLe, slnce llmlLlng Lhe
muLablllLy of an ob[ecL resLrlcLs lLs seL of posslble sLaLes. An ob[ecL LhaL ls "mosLly lmmuLable" buL has one or Lwo
muLable sLaLe varlables ls sLlll slmpler Lhan one LhaL has many muLable varlables. ueclarlng flelds %4"$0 also documenLs
Lo malnLalners LhaL Lhese flelds are noL expecLed Lo change.
!usL as lL ls a good pracLlce Lo make all flelds G94/$.& unless Lhey need greaLer vlslblllLy [L! lLem 12], lL ls a good pracLlce
Lo make all flelds %4"$0 unless Lhey need Lo be muLable.
LAMADA =1"5>?*T 93/&4 H%?"./?* .% -(Y?/3: ,55(."Y?* EY]*'.3
ln !"#$%&D$*84"6<$*.294:&9 on page 24,we Lrled Lo use Lwo 1.234*N&%&9&"*&s Lo sLore Lhe lasL number and lasL
facLors, buL Lhls was noL Lhreadsafe because we could noL feLch or updaLe Lhe Lwo relaLed values aLomlcally. uslng
volaLlle varlables for Lhese values would noL be Lhreadsafe for Lhe same reason. Powever, lmmuLable ob[ecLs can
someLlmes provlde a weak form of aLomlclLy.
1he facLorlng servleL performs Lwo operaLlons LhaL musL be aLomlc: updaLlng Lhe cached resulL and condlLlonally
feLchlng Lhe cached facLors lf Lhe cached number maLches Lhe requesLed number. Whenever a group of relaLed daLa
lLems musL be acLed on aLomlcally, conslder creaLlng an lmmuLable holder class for Lhem, such as J"&B$0)&D$*8&
[14]
ln
LlsLlng 3.12.
[14] CnevalueCache wouldn'L be lmmuLable wlLhouL Lhe copyCf calls ln Lhe consLrucLor and geLLer. Arrays.copyCf was added as a convenlence ln
!ava 6, clone would also work.
8ace condlLlons ln accesslng or updaLlng mulLlple relaLed varlables can be ellmlnaLed by uslng an lmmuLable ob[ecL Lo
hold all Lhe varlables. WlLh a muLable holder ob[ecL, you would have Lo use locklng Lo ensure aLomlclLy, wlLh an
lmmuLable one, once a Lhread acqulres a reference Lo lL, lL need never worry abouL anoLher Lhread modlfylng lLs sLaLe. lf
Lhe varlables are Lo be updaLed, a new holder ob[ecL ls creaLed, buL any Lhreads worklng wlLh Lhe prevlous holder sLlll
see lL ln a conslsLenL sLaLe.

33
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 138ChapLer 3. Sharlng
Cb[ecLs
2/3./&4 LA@DA ,55(."Y?* 7%?0*) 6%) $"':/&4 " P(5Y*) "&0 /.3 S"'.%)3A
bI33).$M0&
*0$## J"&B$0)&D$*8& W
G94/$.& %4"$0 K46I".&6&9 0$#.,)3M&9[
G94/$.& %4"$0 K46I".&6&9fg 0$#.<$*.29#[

G)M04* J"&B$0)&D$*8&UK46I".&6&9 4e
K46I".&6&9fg %$*.29#V W
0$#.,)3M&9 ] 4[
0$#.<$*.29# ] 199$7#+*2G7J%U%$*.29#e %$*.29#+0&"6.8V[
\

G)M04* K46I".&6&9fg 6&.<$*.29#UK46I".&6&9 4V W
4% U0$#.,)3M&9 ]] ")00 nn Y0$#.,)3M&9+&()$0#U4VV
9&.)9" ")00[
&0#&
9&.)9" 199$7#+*2G7J%U0$#.<$*.29#e 0$#.<$*.29#+0&"6.8V[
\
\
B20$.40&D$*8&;<$*.294:&9 ln LlsLlng 3.13 uses a J"&B$0)&D$*8& Lo sLore Lhe cached number and facLors. When a
Lhread seLs Lhe volaLlle *$*8& fleld Lo reference a new J"&B$0)&D$*8&, Lhe new cached daLa becomes lmmedlaLely
vlslble Lo oLher Lhreads.
1he cacherelaLed operaLlons cannoL lnLerfere wlLh each oLher because J"&aB$0)&D$*8& ls lmmuLable and Lhe *$*8&
fleld ls accessed only once ln each of Lhe relevanL code paLhs. 1hls comblnaLlon of an lmmuLable holder ob[ecL for
mulLlple sLaLe varlables relaLed by an lnvarlanL, and a volaLlle reference used Lo ensure lLs Llmely vlslblllLy, allows
B20$.40&D$*8&;<$*.294:&9 Lo be Lhreadsafe even Lhough lL does no expllclL locklng.
LAVA C"6* -(Y?/'"./%&
So far we have focused on ensurlng LhaL an ob[ecL noL be publlshed, such as when lL ls supposed Lo be conflned Lo a
Lhread or wlLhln anoLher ob[ecL. Cf course, someLlmes we do wanL Lo share ob[ecLs across Lhreads, and ln Lhls case we
musL do so safely. unforLunaLely, slmply sLorlng a reference Lo an ob[ecL lnLo a publlc fleld, as ln LlsLlng 3.14, ls noL
enough Lo publlsh LhaL ob[ecL safely.
2/3./&4 LA@LA $"':/&4 .:* 2"3. N*3(?. 93/&4 " H%?"./?* N*6*)*&'* .% "& ,55(."Y?* 7%?0*) EY]*'.A
b=89&$;'$%&
G)M04* *0$## B20$.40&D$*8&;<$*.294:&9 43G0&3&".# '&9/0&. W
G94/$.& /20$.40& J"&B$0)&D$*8& *$*8& ]
"&? J"&B$0)&D$*8&U")00e ")00V[

G)M04* /24; #&9/4*&U'&9/0&.N&()&#. 9&(e '&9/0&.N&#G2"#& 9&#GV W
K46I".&6&9 4 ] &-.9$*.<923N&()&#.U9&(V[
K46I".&6&9fg %$*.29# ] *$*8&+6&.<$*.29#U4V[
4% U%$*.29# ]] ")00V W
%$*.29# ] %$*.29U4V[
*$*8& ] "&? J"&B$0)&D$*8&U4e %$*.29#V[
\
&"*2;&I".2N&#G2"#&U9&#Ge %$*.29#V[
\
\
2/3./&4 LA@MA -(Y?/3:/&4 "& EY]*'. 8/.:%(. F0*Q(".* C+&':)%&/O"./%&A !"#$% !" %&'()

XX !"#$%& G)M04*$.42"
G)M04* E20;&9 820;&9[

G)M04* /24; 4"4.4$04:&UV W
820;&9 ] "&? E20;&9UijV[
\
?ou may be surprlsed aL how badly Lhls harmlesslooklng example could fall. 8ecause of vlslblllLy problems, Lhe E20;&9
could appear Lo anoLher Lhread Lo be ln an lnconslsLenL sLaLe, even Lhough lLs lnvarlanLs were properly esLabllshed by lLs
consLrucLor! 1hls lmproper publlcaLlon could allow anoLher Lhread Lo observe a parLlally consLrucLed ob[ecL.
LAVA@A ,5>)%>*) -(Y?/'"./%&T B:*& R%%0 EY]*'.3 R% ;"0
?ou cannoL rely on Lhe lnLegrlLy of parLlally consLrucLed ob[ecLs. An observlng Lhread could see Lhe ob[ecL ln an
lnconslsLenL sLaLe, and Lhen laLer see lLs sLaLe suddenly change, even Lhough lL has noL been modlfled slnce publlcaLlon.

34 !ava Concurrency ln racLlce
ln facL, lf Lhe E20;&9 ln LlsLlng 3.13 ls publlshed uslng Lhe unsafe publlcaLlon ldlom ln LlsLlng 3.14, and a Lhread oLher
Lhan Lhe publlshlng Lhread were Lo call $##&9.'$"4.7, lL could Lhrow 1##&9.42"L9929!
[13]

[13] 1he problem here ls noL Lhe Polder class lLself, buL LhaL Lhe Polder ls noL properly publlshed. Powever, Polder can be made lmmune Lo
lmproper publlcaLlon by declarlng Lhe n fleld Lo be flnal, whlch would make Polder lmmuLable, see SecLlon 3.3.2.
2/3./&4 LA@VA $?"33 ". N/3< %6 S"/?()* /6 P%. -)%>*)?+ -(Y?/3:*0A

G)M04* *0$## E20;&9 W
G94/$.& 4". "[

G)M04* E20;&9U4". "V W .84#+" ] "[ \

G)M04* /24; $##&9.'$"4.7UV W
4% U" Y] "V
.892? "&? 1##&9.42"L9929Ud=84# #.$.&3&". 4# %$0#&+dV[
\
\
8ecause synchronlzaLlon was noL used Lo make Lhe E20;&9 vlslble Lo oLher Lhreads, we say Lhe E20;&9 was noL properly
publlshed. 1wo Lhlngs can go wrong wlLh lmproperly publlshed ob[ecLs. CLher Lhreads could see a sLale value for Lhe
820;&9 fleld, and Lhus see a ")00 reference or oLher older value even Lhough a value has been placed ln 820;&9. 8uL far
worse, oLher Lhreads could see an up Lo daLe value for Lhe 820;&9 reference, buL sLale values for Lhe sLaLe of Lhe
E20;&9.
[16]
1o make Lhlngs even less predlcLable, a Lhread may see a sLale value Lhe flrsL Llme lL reads a fleld and Lhen a
more upLodaLe value Lhe nexL Llme, whlch ls why $##&9.'$"4.7 can Lhrow 1##&9.42"L9929.
[16] Whlle lL may seem LhaL fleld values seL ln a consLrucLor are Lhe flrsL values wrlLLen Lo Lhose flelds and Lherefore LhaL Lhere are no "older"
values Lo see as sLale values, Lhe Cb[ecL consLrucLor flrsL wrlLes Lhe defaulL values Lo all flelds before subclass consLrucLors run. lL ls Lherefore
posslble Lo see Lhe defaulL value for a fleld as a sLale value.
AL Lhe rlsk of repeaLlng ourselves, some very sLrange Lhlngs can happen when daLa ls shared across Lhreads wlLhouL
sufflclenL synchronlzaLlon.
LAVADA ,55(."Y?* EY]*'.3 "&0 ,&/./"?/O"./%& C"6*.+
8ecause lmmuLable ob[ecLs are so lmporLanL, Lhe !avaMemory Model offers a speclal guaranLee of lnlLlallzaLlon safeLy
for sharlng lmmuLable ob[ecLs. As we've seen, LhaL an ob[ecL reference becomes vlslble Lo anoLher Lhread does noL
necessarlly mean LhaL Lhe sLaLe of LhaL ob[ecL ls vlslble Lo Lhe consumlng Lhread. ln order Lo guaranLee a conslsLenL vlew
of Lhe ob[ecL's sLaLe, synchronlzaLlon ls needed.
lmmuLable ob[ecLs, on Lhe oLher hand, can be safely accessed even when synchronlzaLlon ls noL used Lo publlsh Lhe
ob[ecL reference. lor Lhls guaranLee of lnlLlallzaLlon safeLy Lo hold, all of Lhe requlremenLs for lmmuLablllLy musL be meL:
unmodlflable sLaLe, all flelds are %4"$0, and proper consLrucLlon. (lf E20;&9 ln LlsLlng 3.13 were lmmuLable,
$##&9.'$"4.7 could noL Lhrow 1##&9.42"L9929, even lf Lhe E20;&9 was noL properly publlshed.)
lmmuLable ob[ecLs can be used safely by any Lhread wlLhouL addlLlonal synchronlzaLlon, even when synchronlzaLlon ls
noL used Lo publlsh Lhem.
1hls guaranLee exLends Lo Lhe values of all flnal flelds of properly consLrucLed ob[ecLs, flnal flelds can be safely accessed
wlLhouL addlLlonal synchronlzaLlon. Powever, lf flnal flelds refer Lo muLable ob[ecLs, synchronlzaLlon ls sLlll requlred Lo
access Lhe sLaLe of Lhe ob[ecLs Lhey refer Lo.
LAVALA C"6* -(Y?/'"./%& ,0/%53
Cb[ecLs LhaL are noL lmmuLable musL be safely publlshed, whlch usually enLalls synchronlzaLlon by boLh Lhe publlshlng
and Lhe consumlng Lhread. lor Lhe momenL, leL's focus on ensurlng LhaL Lhe consumlng Lhread can see Lhe ob[ecL ln lLs
as publlshed sLaLe, we'll deal wlLh vlslblllLy of modlflcaLlons made afLer publlcaLlon soon.
1o publlsh an ob[ecL safely, boLh Lhe reference Lo Lhe ob[ecL and Lhe ob[ecL's sLaLe musL be made vlslble Lo oLher
Lhreads aL Lhe same Llme. A properly consLrucLed ob[ecL can be safely publlshed by:
lnlLlallzlng an ob[ecL reference from a sLaLlc lnlLlallzer,
SLorlng a reference Lo lL lnLo a /20$.40& fleld or 1.234*N&%&9&"*&,
SLorlng a reference Lo lL lnLo a %4"$0 fleld of a properly consLrucLed ob[ecL, or
SLorlng a reference Lo lL lnLo a fleld LhaL ls properly guarded by a lock.

33
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 138ChapLer 3. Sharlng
Cb[ecLs

1he lnLernal synchronlzaLlon ln Lhreadsafe collecLlons means LhaL placlng an ob[ecL ln a Lhreadsafe collecLlon, such as a
B&*.29 or #7"*892"4:&;54#., fulfllls Lhe lasL of Lhese requlremenLs. lf Lhread A places ob[ecL x ln a Lhreadsafe
collecLlon and Lhread 8 subsequenLly reLrleves lL, 8 ls guaranLeed Lo see Lhe sLaLe of x as A lefL lL, even Lhough Lhe
appllcaLlon code LhaL hands x off ln Lhls manner has no expllclL synchronlzaLlon. 1he Lhreadsafe llbrary collecLlons offer
Lhe followlng safe publlcaLlon guaranLees, even lf Lhe !avadoc ls less Lhan clear on Lhe sub[ecL:
laclng a key or value ln a E$#8.$M0&, #7"*892"4:&;F$G, or D2"*)99&".aF$G safely publlshes lL Lo any Lhread
LhaL reLrleves lL from Lhe F$G (wheLher dlrecLly or vla an lLeraLor),
laclng an elemenL ln a B&*.29, D2G7J"P94.&199$754#., D2G7J"P94.&a199$7'&., #7"*892"4:&;54#., or
#7"*892"4:&;'&. safely publlshes lL Lo any Lhread LhaL reLrleves lL from Lhe collecLlon,
laclng an elemenL on a K02*C4"6O)&)& or a D2"*)99&".54"C&;O)&)& safely publlshes lL Lo any Lhread LhaL
reLrleves lL from Lhe queue.
CLher handoff mechanlsms ln Lhe class llbrary (such as <).)9& and L-*8$"6&9) also consLlLuLe safe publlcaLlon, we wlll
ldenLlfy Lhese as provldlng safe publlcaLlon as Lhey are lnLroduced.
uslng a sLaLlc lnlLlallzer ls ofLen Lhe easlesL and safesL way Lo publlsh ob[ecLs LhaL can be sLaLlcally consLrucLed:
G)M04* #.$.4* E20;&9 820;&9 ] "&? E20;&9UijV[
SLaLlc lnlLlallzers are execuLed by Lhe !vM aL class lnlLlallzaLlon Llme, because of lnLernal synchronlzaLlon ln Lhe !vM, Lhls
mechanlsm ls guaranLeed Lo safely publlsh any ob[ecLs lnlLlallzed ln Lhls way [!LS 12.4.2].
LAVAMA =66*'./#*?+ ,55(."Y?* EY]*'.3
Safe publlcaLlon ls sufflclenL for oLher Lhreads Lo safely access ob[ecLs LhaL are noL golng Lo be modlfled afLer publlcaLlon
wlLhouL addlLlonal synchronlzaLlon. 1he safe publlcaLlon mechanlsms all guaranLee LhaL Lhe aspubllshed sLaLe of an
ob[ecL ls vlslble Lo all accesslng Lhreads as soon as Lhe reference Lo lL ls vlslble, and lf LhaL sLaLe ls noL golng Lo be
changed agaln, Lhls ls sufflclenL Lo ensure LhaL any access ls safe.
Cb[ecLs LhaL are noL Lechnlcally lmmuLable, buL whose sLaLe wlll noL be modlfled afLer publlcaLlon, are called effecLlvely
lmmuLable. 1hey do noL need Lo meeL Lhe sLrlcL deflnlLlon of lmmuLablllLy ln SecLlon 3.4, Lhey merely need Lo be LreaLed
by Lhe program as lf Lhey were lmmuLable afLer Lhey are publlshed. uslng effecLlvely lmmuLable ob[ecLs can slmpllfy
developmenL and lmprove performance by reduclng Lhe need for synchronlzaLlon.
Safely publlshed effecLlvely lmmuLable ob[ecLs can be used safely by any Lhread wlLhouL addlLlonal synchronlzaLlon.
lor example, A$.& ls muLable,
[17]
buL lf you use lL as lf lL were lmmuLable, you may be able Lo ellmlnaLe Lhe locklng LhaL
would oLherwlse be requlred when shared a A$.& across Lhreads. Suppose you wanL Lo malnLaln a F$G sLorlng Lhe lasL
logln Llme of each user:
[17] 1hls was probably a mlsLake ln Lhe class llbrary deslgn.
G)M04* F$GR'.94"6e A$.&T 0$#.5264" ]
D200&*.42"#+#7"*892"4:&;F$GU"&? E$#8F$GR'.94"6e A$.&TUVV[
lf Lhe A$.& values are noL modlfled afLer Lhey are placed ln Lhe F$G, Lhen Lhe synchronlzaLlon ln Lhe #7"*892"4:&;F$G
lmplemenLaLlon ls sufflclenL Lo publlsh Lhe A$.& values safely, and no addlLlonal synchronlzaLlon ls needed when
accesslng Lhem.
LAVAVA K(."Y?* EY]*'.3
lf an ob[ecL may be modlfled afLer consLrucLlon, safe publlcaLlon ensures only Lhe vlslblllLy of Lhe aspubllshed sLaLe.
SynchronlzaLlon musL be used noL only Lo publlsh a muLable ob[ecL, buL also every Llme Lhe ob[ecL ls accessed Lo ensure
vlslblllLy of subsequenL modlflcaLlons. 1o share muLable ob[ecLs safely, Lhey musL be safely publlshed and be elLher
Lhreadsafe or guarded by a lock.
1he publlcaLlon requlremenLs for an ob[ecL depend on lLs muLablllLy:
lmmuLable ob[ecLs can be publlshed Lhrough any mechanlsm,
LffecLlvely lmmuLable ob[ecLs musL be safely publlshed,
MuLable ob[ecLs musL be safely publlshed, and musL be elLher Lhreadsafe or guarded by a lock.

36 !ava Concurrency ln racLlce
LAVAWA C:")/&4 EY]*'.3 C"6*?+
Whenever you acqulre a reference Lo an ob[ecL, you should know whaL you are allowed Lo do wlLh lL. uo you need Lo
acqulre a lock before uslng lL? Are you allowed Lo modlfy lLs sLaLe, or only Lo read lL? Many concurrency errors sLem
from falllng Lo undersLand Lhese "rules of engagemenL" for a shared ob[ecL. When you publlsh an ob[ecL, you should
documenL how Lhe ob[ecL can be accessed.
1he mosL useful pollcles for uslng and sharlng ob[ecLs ln a concurrenL program are:
1hreadconflned. A Lhreadconflned ob[ecL ls owned excluslvely by and conflned Lo one Lhread, and can be modlfled by
lLs ownlng Lhread.
Shared readonly. A shared readonly ob[ecL can be accessed concurrenLly by mulLlple Lhreads wlLhouL addlLlonal
synchronlzaLlon, buL cannoL be modlfled by any Lhread. Shared readonly ob[ecLs lnclude lmmuLable and effecLlvely
lmmuLable ob[ecLs.
Shared Lhreadsafe. A Lhreadsafe ob[ecL performs synchronlzaLlon lnLernally, so mulLlple Lhreads can freely access lL
Lhrough lLs publlc lnLerface wlLhouL furLher synchronlzaLlon.
Cuarded. A guarded ob[ecL can be accessed only wlLh a speclflc lock held. Cuarded ob[ecLs lnclude Lhose LhaL are
encapsulaLed wlLhln oLher Lhreadsafe ob[ecLs and publlshed ob[ecLs LhaL are known Lo be guarded by a speclflc lock.


37
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 168ChapLer 4.
Composlng Cb[ecLs
$:">.*) MA $%5>%3/&4 EY]*'.3
So far, we've covered Lhe lowlevel baslcs of Lhread safeLy and synchronlzaLlon. 8uL we don'L wanL Lo have Lo analyze
each memory access Lo ensure LhaL our program ls Lhreadsafe, we wanL Lo be able Lo Lake Lhreadsafe componenLs and
safely compose Lhem lnLo larger componenLs or programs. 1hls chapLer covers paLLerns for sLrucLurlng classes LhaL can
make lL easler Lo make Lhem Lhreadsafe and Lo malnLaln Lhem wlLhouL accldenLally undermlnlng Lhelr safeLy
guaranLees.
MA@A [*3/4&/&4 " J:)*"03"6* $?"33
Whlle lL ls posslble Lo wrlLe a Lhreadsafe program LhaL sLores all lLs sLaLe ln publlc sLaLlc flelds, lL ls a loL harder Lo verlfy
lLs Lhread safeLy or Lo modlfy lL so LhaL lL remalns Lhreadsafe Lhan one LhaL uses encapsulaLlon approprlaLely.
LncapsulaLlon makes lL posslble Lo deLermlne LhaL a class ls Lhreadsafe wlLhouL havlng Lo examlne Lhe enLlre program.
1he deslgn process for a Lhreadsafe class should lnclude Lhese Lhree baslc elemenLs:
ldenLlfy Lhe varlables LhaL form Lhe ob[ecL's sLaLe,
ldenLlfy Lhe lnvarlanLs LhaL consLraln Lhe sLaLe varlables,
LsLabllsh a pollcy for managlng concurrenL access Lo Lhe ob[ecL's sLaLe.
An ob[ecL's sLaLe sLarLs wlLh lLs flelds. lf Lhey are all of prlmlLlve Lype, Lhe flelds comprlse Lhe enLlre sLaLe. D2)".&9 ln
LlsLlng 4.1 has only one fleld, so Lhe /$0)& fleld comprlses lLs enLlre sLaLe. 1he sLaLe of an ob[ecL wlLh n prlmlLlve flelds ls
[usL Lhe nLuple of lLs fleld values, Lhe sLaLe of a 2u @24". ls lLs (x, y) value. lf Lhe ob[ecL has flelds LhaL are references Lo
oLher ob[ecLs, lLs sLaLe wlll encompass flelds from Lhe referenced ob[ecLs as well. lor example, Lhe sLaLe of a
54"C&;54#. lncludes Lhe sLaLe of all Lhe llnk node ob[ecLs belonglng Lo Lhe llsL.
1he synchronlzaLlon pollcy deflnes how an ob[ecL coordlnaLes access Lo lLs sLaLe wlLhouL vlolaLlng lLs lnvarlanLs or posL
condlLlons. lL speclfles whaL comblnaLlon of lmmuLablllLy, Lhread conflnemenL, and locklng ls used Lo malnLaln Lhread
safeLy, and whlch varlables are guarded by whlch locks. 1o ensure LhaL Lhe class can be analyzed and malnLalned,
documenL Lhe synchronlzaLlon pollcy.
2/3./&4 MA@A C/5>?* J:)*"03"6* $%(&.*) 93/&4 .:* !"#" K%&/.%) -"..*)&A
b=89&$;'$%&
G)M04* %4"$0 *0$## D2)".&9 W
bH)$9;&;K7Ud.84#dV G94/$.& 02"6 /$0)& ] Z[

G)M04* #7"*892"4:&; 02"6 6&.B$0)&UV W
9&.)9" /$0)&[
\
G)M04* #7"*892"4:&; 02"6 4"*9&3&".UV W
4% U/$0)& ]] 52"6+F1okB15!LV
.892? "&? I00&6$0'.$.&L-*&G.42"Ud*2)".&9 2/&9%02?dV[
9&.)9" __/$0)&[
\
\
MA@A@A R".:*)/&4 C+&':)%&/O"./%& N*Q(/)*5*&.3
Maklng a class Lhreadsafe means ensurlng LhaL lLs lnvarlanLs hold under concurrenL access, Lhls requlres reasonlng
abouL lLs sLaLe. Cb[ecLs and varlables have a sLaLe space: Lhe range of posslble sLaLes Lhey can Lake on. 1he smaller Lhls
sLaLe space, Lhe easler lL ls Lo reason abouL. 8y uslng flnal flelds wherever pracLlcal, you make lL slmpler Lo analyze Lhe
posslble sLaLes an ob[ecL can be ln. (ln Lhe exLreme case, lmmuLable ob[ecLs can only be ln a slngle sLaLe.)
Many classes have lnvarlanLs LhaL ldenLlfy cerLaln sLaLes as valld or lnvalld. 1he /$0)& fleld ln D2)".&9 ls a 02"6. 1he
sLaLe space of a 02"6 ranges from 52"6+FI,kB15!L Lo 52"6+F1okB15!L, buL D2)".&9 places consLralnLs on /$0)&,
negaLlve values are noL allowed.
Slmllarly, operaLlons may have posLcondlLlons LhaL ldenLlfy cerLaln sLaLe LranslLlons as lnvalld. lf Lhe currenL sLaLe of a
D2)".&9 ls 17, Lhe only valld nexL sLaLe ls 18. When Lhe nexL sLaLe ls derlved from Lhe currenL sLaLe, Lhe operaLlon ls
necessarlly a compound acLlon. noL all operaLlons lmpose sLaLe LranslLlon consLralnLs, when updaLlng a varlable LhaL
holds Lhe currenL LemperaLure, lLs prevlous sLaLe does noL affecL Lhe compuLaLlon.
ConsLralnLs placed on sLaLes or sLaLe LranslLlons by lnvarlanLs and posLcondlLlons creaLe addlLlonal synchronlzaLlon or
encapsulaLlon requlremenLs. lf cerLaln sLaLes are lnvalld, Lhen Lhe underlylng sLaLe varlables musL be encapsulaLed,
oLherwlse cllenL code could puL Lhe ob[ecL lnLo an lnvalld sLaLe. lf an operaLlon has lnvalld sLaLe LranslLlons, lL musL be

38 !ava Concurrency ln racLlce
made aLomlc. Cn Lhe oLher hand, lf Lhe class does noL lmpose any such consLralnLs, we may be able Lo relax
encapsulaLlon or serlallzaLlon requlremenLs Lo obLaln greaLer flexlblllLy or beLLer performance.
A class can also have lnvarlanLs LhaL consLraln mulLlple sLaLe varlables. A number range class, llke ,)3M&9N$"6& ln LlsLlng
4.10, Lyplcally malnLalns sLaLe varlables for Lhe lower and upper bounds of Lhe range. 1hese varlables musL obey Lhe
consLralnL LhaL Lhe lower bound be less Lhan or equal Lo Lhe upper bound. MulLlvarlable lnvarlanLs llke Lhls one creaLe
aLomlclLy requlremenLs: relaLed varlables musL be feLched or updaLed ln a slngle aLomlc operaLlon. ?ou cannoL updaLe
one, release and reacqulre Lhe lock, and Lhen updaLe Lhe oLhers, slnce Lhls could lnvolve leavlng Lhe ob[ecL ln an lnvalld
sLaLe when Lhe lock was released. When mulLlple varlables parLlclpaLe ln an lnvarlanL, Lhe lock LhaL guards Lhem musL
be held for Lhe duraLlon of any operaLlon LhaL accesses Lhe relaLed varlables.
?ou cannoL ensure Lhread safeLy wlLhouL undersLandlng an ob[ecL's lnvarlanLs and posLcondlLlons. ConsLralnLs on Lhe
valld values or sLaLe LranslLlons for sLaLe varlables can creaLe aLomlclLy and encapsulaLlon requlremenLs.
MA@ADA C.".*0*>*&0*&. E>*)"./%&3
Class lnvarlanLs and meLhod posLcondlLlons consLraln Lhe valld sLaLes and sLaLe LranslLlons for an ob[ecL. Some ob[ecLs
also have meLhods wlLh sLaLebased precondlLlons. lor example, you cannoL remove an lLem from an empLy queue, a
queue musL be ln Lhe "nonempLy" sLaLe before you can remove an elemenL. CperaLlons wlLh sLaLebased precondlLlons
are called sLaLedependenL [C! 3].
ln a slngleLhreaded program, lf a precondlLlon does noL hold, Lhe operaLlon has no cholce buL Lo fall. 8uL ln a
concurrenL program, Lhe precondlLlon may become Lrue laLer due Lo Lhe acLlon of anoLher Lhread. ConcurrenL programs
add Lhe posslblllLy of walLlng unLll Lhe precondlLlon becomes Lrue, and Lhen proceedlng wlLh Lhe operaLlon.
1he bullLln mechanlsms for efflclenLly walLlng for a condlLlon Lo become Lrue ?$4. and "2.4%7 are LlghLly bound Lo
lnLrlnslc locklng, and can be dlfflculL Lo use correcLly. 1o creaLe operaLlons LhaL walL for a precondlLlon Lo become Lrue
before proceedlng, lL ls ofLen easler Lo use exlsLlng llbrary classes, such as blocklng queues or semaphores, Lo provlde
Lhe deslred sLaLedependenL behavlor. 8locklng llbrary classes such as K02*C4"6O)&)&, '&3$G829&, and oLher
synchronlzers are covered ln ChapLer 3, creaLlng sLaLedependenL classes uslng Lhe lowlevel mechanlsms provlded by
Lhe plaLform and class llbrary ls covered ln ChapLer 14.
MA@ALA C.".* E8&*)3:/>
We lmplled ln SecLlon 4.1 LhaL an ob[ecL's sLaLe could be a subseL of Lhe flelds ln Lhe ob[ecL graph rooLed aL LhaL ob[ecL.
Why mlghL lL be a subseL? under whaL condlLlons are flelds reachable from a glven ob[ecL noL parL of LhaL ob[ecL's sLaLe?
When deflnlng whlch varlables form an ob[ecL's sLaLe, we wanL Lo conslder only Lhe daLa LhaL ob[ecL owns. Cwnershlp ls
noL embodled expllclLly ln Lhe language, buL ls lnsLead an elemenL of class deslgn. lf you allocaLe and populaLe a
E$#8F$G, you are creaLlng mulLlple ob[ecLs: Lhe E$#8F$G ob[ecL, a number of F$G+L".97 ob[ecLs used by Lhe
lmplemenLaLlon of E$#8F$G, and perhaps oLher lnLernal ob[ecLs as well. 1he loglcal sLaLe of a E$#8F$G lncludes Lhe sLaLe
of all lLs F$G+L".97 and lnLernal ob[ecLs, even Lhough Lhey are lmplemenLed as separaLe ob[ecLs.
lor beLLer or worse, garbage collecLlon leLs us avold Lhlnklng carefully abouL ownershlp. When passlng an ob[ecL Lo a
meLhod ln C++, you have Lo Lhlnk falrly carefully abouL wheLher you are Lransferrlng ownershlp, engaglng ln a shorL
Lerm loan, or envlslonlng longLerm [olnL ownershlp. ln !ava, all Lhese same ownershlp models are posslble, buL Lhe
garbage collecLor reduces Lhe cosL of many of Lhe common errors ln reference sharlng, enabllng lessLhanpreclse
Lhlnklng abouL ownershlp.
ln many cases, ownershlp and encapsulaLlon go LogeLher Lhe ob[ecL encapsulaLes Lhe sLaLe lL owns and owns Lhe sLaLe
lL encapsulaLes. lL ls Lhe owner of a glven sLaLe varlable LhaL geLs Lo declde on Lhe locklng proLocol used Lo malnLaln Lhe
lnLegrlLy of LhaL varlable's sLaLe. Cwnershlp lmplles conLrol, buL once you publlsh a reference Lo a muLable ob[ecL, you
no longer have excluslve conLrol, aL besL, you mlghL have "shared ownershlp". A class usually does noL own Lhe ob[ecLs
passed Lo lLs meLhods or consLrucLors, unless Lhe meLhod ls deslgned Lo expllclLly Lransfer ownershlp of ob[ecLs passed
ln (such as Lhe synchronlzed collecLlon wrapper facLory meLhods).
CollecLlon classes ofLen exhlblL a form of "spllL ownershlp", ln whlch Lhe collecLlon owns Lhe sLaLe of Lhe collecLlon
lnfrasLrucLure, buL cllenL code owns Lhe ob[ecLs sLored ln Lhe collecLlon. An example ls '&9/0&.D2".&-. from Lhe servleL
framework. '&9/0&.D2".&-. provldes a F$Gllke ob[ecL conLalner servlce Lo servleLs where Lhey can reglsLer and
reLrleve appllcaLlon ob[ecLs by name wlLh #&.1..94M).& and 6&.1..94M).&. 1he '&9/0&.D2".&-. ob[ecL lmplemenLed
by Lhe servleL conLalner musL be Lhreadsafe, because lL wlll necessarlly be accessed by mulLlple Lhreads. ServleLs need
noL use synchronlzaLlon when calllng #&.a1..94M).& and 6&.1..94M).&, buL Lhey may have Lo use synchronlzaLlon
when uslng Lhe ob[ecLs sLored ln Lhe '&9/0&.D2".&-.. 1hese ob[ecLs are owned by Lhe appllcaLlon, Lhey are belng

39
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 168ChapLer 4.
Composlng Cb[ecLs
sLored for safekeeplng by Lhe servleL conLalner on Lhe appllcaLlon's behalf. Llke all shared ob[ecLs, Lhey musL be shared
safely, ln order Lo prevenL lnLerference from mulLlple Lhreads accesslng Lhe same ob[ecL concurrenLly, Lhey should
elLher be Lhreadsafe, effecLlvely lmmuLable, or expllclLly guarded by a lock.
[1]

[1] lnLeresLlngly, Lhe E..G'&##42" ob[ecL, whlch performs a slmllar funcLlon ln Lhe servleL framework, may have sLrlcLer requlremenLs. 8ecause
Lhe servleL conLalner may access Lhe ob[ecLs ln Lhe E..G'&##42" so Lhey can be serlallzed for repllcaLlon or passlvaLlon, Lhey musL be Lhread
safe because Lhe conLalner wlll be accesslng Lhem as well as Lhe web appllcaLlon. (We say "may have" slnce repllcaLlon and passlvaLlon ls ouLslde
of Lhe servleL speclflcaLlon buL ls a common feaLure of servleL conLalners.)
MADA ,&3."&'* $%&6/&*5*&.
lf an ob[ecL ls noL Lhreadsafe, several Lechnlques can sLlll leL lL be used safely ln a mulLlLhreaded program. ?ou can
ensure LhaL lL ls only accessed from a slngle Lhread (Lhread conflnemenL), or LhaL all access Lo lL ls properly guarded by a
lock.
LncapsulaLlon slmpllfles maklng classes Lhreadsafe by promoLlng lnsLance conflnemenL, ofLen [usL called conflnemenL
[C! 2.3.3]. When an ob[ecL ls encapsulaLed wlLhln anoLher ob[ecL, all code paLhs LhaL have access Lo Lhe encapsulaLed
ob[ecL are known and can be Lherefore be analyzed more easlly Lhan lf LhaL ob[ecL were accesslble Lo Lhe enLlre
program. Comblnlng conflnemenL wlLh an approprlaLe locklng dlsclpllne can ensure LhaL oLherwlse nonLhreadsafe
ob[ecLs are used ln a Lhreadsafe manner.
LncapsulaLlng daLa wlLhln an ob[ecL conflnes access Lo Lhe daLa Lo Lhe ob[ecL's meLhods, maklng lL easler Lo ensure LhaL
Lhe daLa ls always accessed wlLh Lhe approprlaLe lock held.
Conflned ob[ecLs musL noL escape Lhelr lnLended scope. An ob[ecL may be conflned Lo a class lnsLance (such as a prlvaLe
class member), a lexlcal scope (such as a local varlable), or a Lhread (such as an ob[ecL LhaL ls passed from meLhod Lo
meLhod wlLhln a Lhread, buL noL supposed Lo be shared across Lhreads). Cb[ecLs don'L escape on Lhelr own, of course
Lhey need help from Lhe developer, who asslsLs by publlshlng Lhe ob[ecL beyond lLs lnLended scope.
@&9#2"'&. ln LlsLlng 4.2 lllusLraLes how conflnemenL and locklng can work LogeLher Lo make a class Lhreadsafe even
when lLs componenL sLaLe varlables are noL. 1he sLaLe of @&9#2"'&. ls managed by a E$#8'&., whlch ls noL Lhreadsafe.
8uL because 37'&. ls prlvaLe and noL allowed Lo escape, Lhe E$#8'&. ls conflned Lo Lhe @&9#2"'&.. 1he only code paLhs
LhaL can access 37'&. are $;;@&9#2" and *2".$4"#@&9#2", and each of Lhese acqulres Lhe lock on Lhe @&9#2"'&.. All lLs
sLaLe ls guarded by lLs lnLrlnslc lock, maklng @&9#2"'&. Lhreadsafe.
2/3./&4 MADA 93/&4 $%&6/&*5*&. .% =&3()* J:)*"0 C"6*.+A
b=89&$;'$%&
G)M04* *0$## @&9#2"'&. W
bH)$9;&;K7Ud.84#dV
G94/$.& %4"$0 '&.R@&9#2"T 37'&. ] "&? E$#8'&.R@&9#2"TUV[

G)M04* #7"*892"4:&; /24; $;;@&9#2"U@&9#2" GV W
37'&.+$;;UGV[
\

G)M04* #7"*892"4:&; M220&$" *2".$4"#@&9#2"U@&9#2" GV W
9&.)9" 37'&.+*2".$4"#UGV[
\
\
1hls example makes no assumpLlons abouL Lhe LhreadsafeLy of @&9#2", buL lf lL ls muLable, addlLlonal synchronlzaLlon
wlll be needed when accesslng a @&9#2" reLrleved from a @&9#2"'&.. 1he mosL rellable way Lo do Lhls would be Lo make
@&9#2" Lhreadsafe, less rellable would be Lo guard Lhe @&9#2" ob[ecLs wlLh a lock and ensure LhaL all cllenLs follow Lhe
proLocol of acqulrlng Lhe approprlaLe lock before accesslng Lhe @&9#2".
lnsLance conflnemenL ls one of Lhe easlesL ways Lo bulld Lhreadsafe classes. lL also allows flexlblllLy ln Lhe cholce of
locklng sLraLegy, @&9#2"'&. happened Lo use lLs own lnLrlnslc lock Lo guard lLs sLaLe, buL any lock, conslsLenLly used,
would do [usL as well. lnsLance conflnemenL also allows dlfferenL sLaLe varlables Lo be guarded by dlfferenL locks. (lor an
example of a class LhaL uses mulLlple lock ob[ecLs Lo guard lLs sLaLe, see '&9/&9'.$.)# on 236.)
1here are many examples of conflnemenL ln Lhe plaLform class llbrarles, lncludlng some classes LhaL exlsL solely Lo Lurn
nonLhreadsafe classes lnLo Lhreadsafe ones. 1he baslc collecLlon classes such as 199$754#. and E$#8F$G are noL
Lhreadsafe, buL Lhe class llbrary provldes wrapper facLory meLhods (D200&*.42"#+#7"*892"4:&;54#. and frlends) so
Lhey can be used safely ln mulLlLhreaded envlronmenLs. 1hese facLorles use Lhe uecoraLor paLLern (Camma eL al., 1993)
Lo wrap Lhe collecLlon wlLh a synchronlzed wrapper ob[ecL, Lhe wrapper lmplemenLs each meLhod of Lhe approprlaLe

40 !ava Concurrency ln racLlce
lnLerface as a synchronlzed meLhod LhaL forwards Lhe requesL Lo Lhe underlylng collecLlon ob[ecL. So long as Lhe
wrapper ob[ecL holds Lhe only reachable reference Lo Lhe underlylng collecLlon (l.e., Lhe underlylng collecLlon ls conflned
Lo Lhe wrapper), Lhe wrapper ob[ecL ls Lhen Lhreadsafe. 1he !avadoc for Lhese meLhods warns LhaL all access Lo Lhe
underlylng collecLlon musL be made Lhrough Lhe wrapper.
Cf course, lL ls sLlll posslble Lo vlolaLe conflnemenL by publlshlng a supposedly conflned ob[ecL, lf an ob[ecL ls lnLended
Lo be conflned Lo a speclflc scope, Lhen leLLlng lL escape from LhaL scope ls a bug. Conflned ob[ecLs can also escape by
publlshlng oLher ob[ecLs such as lLeraLors or lnner class lnsLances LhaL may lndlrecLly publlsh Lhe conflned ob[ecLs.
ConflnemenL makes lL easler Lo bulld Lhreadsafe classes because a class LhaL conflnes lLs sLaLe can be analyzed for
Lhread safeLy wlLhouL havlng Lo examlne Lhe whole program.
MADA@A J:* !"#" K%&/.%) -"..*)&
lollowlng Lhe prlnclple of lnsLance conflnemenL Lo lLs loglcal concluslon leads you Lo Lhe !ava monlLor paLLern.
[2]
An
ob[ecL followlng Lhe !ava monlLor paLLern encapsulaLes all lLs muLable sLaLe and guards lL wlLh Lhe ob[ecL's own lnLrlnslc
lock.
[2] 1he !ava monlLor paLLern ls lnsplred by Poare's work on monlLors (Poare, 1974), Lhough Lhere are slgnlflcanL dlfferences beLween Lhls paLLern
and a Lrue monlLor. 1he byLecode lnsLrucLlons for enLerlng and exlLlng a synchronlzed block are even called monlLorenLer and monlLorexlL, and
!ava's bullLln (lnLrlnslc) locks are someLlmes called monlLor locks or monlLors.
D2)".&9 ln LlsLlng 4.1 shows a Lyplcal example of Lhls paLLern. lL encapsulaLes one sLaLe varlable, /$0)&, and all access
Lo LhaL sLaLe varlable ls Lhrough Lhe meLhods of D2)".&9, whlch are all synchronlzed.
1he !ava monlLor paLLern ls used by many llbrary classes, such as B&*.29 and E$#8.$M0&. SomeLlmes a more
sophlsLlcaLed synchronlzaLlon pollcy ls needed, ChapLer 11 shows how Lo lmprove scalablllLy Lhrough flnergralned
locklng sLraLegles. 1he prlmary advanLage of Lhe !ava monlLor paLLern ls lLs slmpllclLy.
1he !ava monlLor paLLern ls merely a convenLlon, any lock ob[ecL could be used Lo guard an ob[ecL's sLaLe so long as lL ls
used conslsLenLly. LlsLlng 4.3 lllusLraLes a class LhaL uses a prlvaLe lock Lo guard lLs sLaLe.
2/3./&4 MALA R(")0/&4 C.".* 8/.: " -)/#".* 2%'<A
G)M04* *0$## @94/$.&52*C W
G94/$.& %4"$0 JM`&*. 3752*C ] "&? JM`&*.UV[
bH)$9;&;K7Ud3752*CdV P4;6&. ?4;6&.[

/24; #23&F&.82;UV W
#7"*892"4:&;U3752*CV W
XX 1**&## 29 32;4%7 .8& #.$.& 2% ?4;6&.
\
\
\
1here are advanLages Lo uslng a prlvaLe lock ob[ecL lnsLead of an ob[ecL's lnLrlnslc lock (or any oLher publlcly accesslble
lock). Maklng Lhe lock ob[ecL prlvaLe encapsulaLes Lhe lock so LhaL cllenL code cannoL acqulre lL, whereas a publlcly
accesslble lock allows cllenL code Lo parLlclpaLe ln lLs synchronlzaLlon pollcy correcLly or lncorrecLly. CllenLs LhaL
lmproperly acqulre anoLher ob[ecL's lock could cause llveness problems, and verlfylng LhaL a publlcly accesslble lock ls
properly used requlres examlnlng Lhe enLlre program raLher Lhan a slngle class.
MADADA =1"5>?*T J)"'</&4 S?**. H*:/'?*3
D2)".&9 ln LlsLlng 4.1 ls a conclse, buL Lrlvlal, example of Lhe !ava monlLor paLLern. LeL's bulld a sllghLly less Lrlvlal
example: a "vehlcle Lracker" for dlspaLchlng fleeL vehlcles such as Laxlcabs, pollce cars, or dellvery Lrucks. We'll bulld lL
flrsL uslng Lhe monlLor paLLern, and Lhen see how Lo relax some of Lhe encapsulaLlon requlremenLs whlle reLalnlng
Lhread safeLy.
Lach vehlcle ls ldenLlfled by a '.94"6 and has a locaLlon represenLed by (x, y) coordlnaLes. 1he B&84*0&=9$*C&9 classes
encapsulaLe Lhe ldenLlLy and locaLlons of Lhe known vehlcles, maklng Lhem wellsulLed as a daLa model ln a modelvlew
conLroller Cul appllcaLlon where lL mlghL be shared by a vlew Lhread and mulLlple updaLer Lhreads. 1he vlew Lhread
would feLch Lhe names and locaLlons of Lhe vehlcles and render Lhem on a dlsplay:
F$GR'.94"6e @24".T 02*$.42"# ] /&84*0&#+6&.52*$.42"#UV[
%29 U'.94"6 C&7 h 02*$.42"#+C&7'&.UVV
9&";&9B&84*0&UC&7e 02*$.42"#+6&.UC&7VV[


41
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 168ChapLer 4.
Composlng Cb[ecLs
Slmllarly, Lhe updaLer Lhreads would modlfy vehlcle locaLlons wlLh daLa recelved from CS devlces or enLered manually
by a dlspaLcher Lhrough a Cul lnLerface:
/24; /&84*0&F2/&;UB&84*0&F2/&;L/&". &/.V W
@24". 02* ] &/.+6&.,&?52*$.42"UV[
/&84*0&#+#&.52*$.42"U&/.+6&.B&84*0&I;UVe 02*+-e 02*+7V[
\
Slnce Lhe vlew Lhread and Lhe updaLer Lhreads wlll access Lhe daLa model concurrenLly, lL musL be Lhreadsafe. LlsLlng
4.4 shows an lmplemenLaLlon of Lhe vehlcle Lracker uslng Lhe !ava monlLor paLLern LhaL uses F).$M0&@24". ln LlsLlng 4.3
for represenLlng Lhe vehlcle locaLlons.
Lven Lhough F).$M0&@24". ls noL Lhreadsafe, Lhe Lracker class ls. nelLher Lhe map nor any of Lhe muLable polnLs lL
conLalns ls ever publlshed. When we need Lo a reLurn vehlcle locaLlons Lo callers, Lhe approprlaLe values are copled
uslng elLher Lhe F).$M0&@24". copy consLrucLor or ;&&GD2G7, whlch creaLes a new F$G whose values are coples of Lhe
keys and values from Lhe old F$G.
[3]

[3] noLe LhaL deepCopy can'L [usL wrap Lhe Map wlLh an unmodlflableMap, because LhaL proLecLs only Lhe collecLlon from modlflcaLlon, lL does noL
prevenL callers from modlfylng Lhe muLable ob[ecLs sLored ln lL. lor Lhe same reason, populaLlng Lhe PashMap ln deepCopy vla a copy consLrucLor
wouldn'L work elLher, because only Lhe references Lo Lhe polnLs would be copled, noL Lhe polnL ob[ecLs Lhemselves.
1hls lmplemenLaLlon malnLalns Lhread safeLy ln parL by copylng muLable daLa before reLurnlng lL Lo Lhe cllenL. 1hls ls
usually noL a performance lssue, buL could become one lf Lhe seL of vehlcles ls very large.
[4]
AnoLher consequence of
copylng Lhe daLa on each call Lo 6&.52*$.42" ls LhaL Lhe conLenLs of Lhe reLurned collecLlon do noL change even lf Lhe
underlylng locaLlons change. WheLher Lhls ls good or bad depends on your requlremenLs. lL could be a beneflL lf Lhere
are lnLernal conslsLency requlremenLs on Lhe locaLlon seL, ln whlch case reLurnlng a conslsLenL snapshoL ls crlLlcal, or a
drawback lf callers requlre upLodaLe lnformaLlon for each vehlcle and Lherefore need Lo refresh Lhelr snapshoL more
ofLen.
[4] 8ecause deepCopy ls called from a synchronlzed meLhod, Lhe Lracker's lnLrlnslc lock ls held for Lhe duraLlon of whaL mlghL be a longrunnlng
copy operaLlon, and Lhls could degrade Lhe responslveness of Lhe user lnLerface when many vehlcles are belng Lracked.
MALA [*?*4"./&4 J:)*"0 C"6*.+
All buL Lhe mosL Lrlvlal ob[ecLs are composlLe ob[ecLs. 1he !ava monlLor paLLern ls useful when bulldlng classes from
scraLch or composlng classes ouL of ob[ecLs LhaL are noL Lhreadsafe. 8uL whaL lf Lhe componenLs of our class are already
Lhreadsafe? uo we need Lo add an addlLlonal layer of Lhread safeLy? 1he answer ls . . . "lL depends". ln some cases a
composlLe made of Lhreadsafe componenLs ls Lhreadsafe (LlsLlngs 4.7 and 4.9), and ln oLhers lL ls merely a good sLarL
(4.10).
ln D2)".4"6<$*.294:&9 on page 23, we added an 1.234*52"6 Lo an oLherwlse sLaLeless ob[ecL, and Lhe resulLlng
composlLe ob[ecL was sLlll Lhreadsafe. Slnce Lhe sLaLe of D2)".4"6<$*.294:&9 ls Lhe sLaLe of Lhe Lhreadsafe
1.234*52"6, and slnce D2)".4"6<$*.294:&9 lmposes no addlLlonal valldlLy consLralnLs on Lhe sLaLe of Lhe counLer, lL ls
easy Lo see LhaL D2)".4"6<$*.294:&9 ls Lhreadsafe. We could say LhaL D2)".4"6<$*.294:&9 delegaLes lLs Lhread
safeLy responslblllLles Lo Lhe 1.234*52"6: D2)".4"6<$*.294:&9 ls Lhreadsafe because 1.234*52"6 ls.
[3]

[3] lf counL were noL flnal, Lhe Lhread safeLy analysls of CounLlnglacLorlzer would be more compllcaLed. lf CounLlnglacLorlzer could modlfy counL
Lo reference a dlfferenL ALomlcLong, we would Lhen have Lo ensure LhaL Lhls updaLe was vlslble Lo all Lhreads LhaL mlghL access Lhe counL, and LhaL
Lhere were no race condlLlons regardlng Lhe value of Lhe counL reference. 1hls ls anoLher good reason Lo use flnal flelds wherever pracLlcal.

42 !ava Concurrency ln racLlce
2/3./&4 MAMA K%&/.%)Y"3*0 H*:/'?* J)"'<*) ,5>?*5*&."./%&A
b=89&$;'$%&
G)M04* *0$## F2"4.29B&84*0&=9$*C&9 W
bH)$9;&;K7Ud.84#dV
G94/$.& %4"$0 F$GR'.94"6e F).$M0&@24".T 02*$.42"#[

G)M04* F2"4.29B&84*0&=9$*C&9U
F$GR'.94"6e F).$M0&@24".T 02*$.42"#V W
.84#+02*$.42"# ] ;&&GD2G7U02*$.42"#V[
\

G)M04* #7"*892"4:&; F$GR'.94"6e F).$M0&@24".T 6&.52*$.42"#UV W
9&.)9" ;&&GD2G7U02*$.42"#V[
\

G)M04* #7"*892"4:&; F).$M0&@24". 6&.52*$.42"U'.94"6 4;V W
F).$M0&@24". 02* ] 02*$.42"#+6&.U4;V[
9&.)9" 02* ]] ")00 S ")00 h "&? F).$M0&@24".U02*V[
\

G)M04* #7"*892"4:&; /24; #&.52*$.42"U'.94"6 4;e 4". -e 4". 7V W
F).$M0&@24". 02* ] 02*$.42"#+6&.U4;V[
4% U02* ]] ")00V
.892? "&? I00&6$0196)3&".L-*&G.42"Ud,2 #)*8 IAh d _ 4;V[
02*+- ] -[
02*+7 ] 7[
\

G94/$.& #.$.4* F$GR'.94"6e F).$M0&@24".T ;&&GD2G7U
F$GR'.94"6e F).$M0&@24".T 3V W
F$GR'.94"6e F).$M0&@24".T 9&#)0. ]
"&? E$#8F$GR'.94"6e F).$M0&@24".TUV[
%29 U'.94"6 4; h 3+C&7'&.UVV
9&#)0.+G).U4;e "&? F).$M0&@24".U3+6&.U4;VVV[
9&.)9" D200&*.42"#+)"32;4%4$M0&F$GU9&#)0.V[
\
\

G)M04* *0$## F).$M0&@24". W Xc 54#.4"6 i+p cX \
2/3./&4 MAVA K(."Y?* -%/&. $?"33 C/5/?") .% >$/$+$?.+@24".A

b,2.=89&$;'$%&
G)M04* *0$## F).$M0&@24". W
G)M04* 4". -e 7[

G)M04* F).$M0&@24".UV W - ] Z[ 7 ] Z[ \
G)M04* F).$M0&@24".UF).$M0&@24". GV W
.84#+- ] G+-[
.84#+7 ] G+7[
\
\
MALA@A =1"5>?*T H*:/'?* J)"'<*) 93/&4 [*?*4"./%&
As a more subsLanLlal example of delegaLlon, leL's consLrucL a verslon of Lhe vehlcle Lracker LhaL delegaLes Lo a Lhread
safe class. We sLore Lhe locaLlons ln a F$G, so we sLarL wlLh a Lhreadsafe F$G lmplemenLaLlon, D2"*)99&".E$#8F$G. We
also sLore Lhe locaLlon uslng an lmmuLable @24". class lnsLead of F).$M0&@24"., shown ln LlsLlng 4.6.
2/3./&4 MAWA ,55(."Y?* @24". '?"33 (3*0 Y+ A&0&6$.4"6B&84*0&=9$*C&9A
bI33).$M0&
G)M04* *0$## @24". W
G)M04* %4"$0 4". -e 7[

G)M04* @24".U4". -e 4". 7V W
.84#+- ] -[
.84#+7 ] 7[
\
\
@24". ls Lhreadsafe because lL ls lmmuLable. lmmuLable values can be freely shared and publlshed, so we no longer
need Lo copy Lhe locaLlons when reLurnlng Lhem.
A&0&6$.4"6B&84*0&=9$*C&9 ln LlsLlng 4.7 does noL use any expllclL synchronlzaLlon, all access Lo sLaLe ls managed by
D2"*)99&".E$#8F$G, and all Lhe keys and values of Lhe F$G are lmmuLable.

43
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 168ChapLer 4.
Composlng Cb[ecLs
lf we had used Lhe orlglnal F).$M0&@24". class lnsLead of @24"., we would be breaklng encapsulaLlon by leLLlng
6&.52*$.42"# publlsh a reference Lo muLable sLaLe LhaL ls noL Lhreadsafe. noLlce LhaL we've changed Lhe behavlor of
Lhe vehlcle Lracker class sllghLly, whlle Lhe monlLor verslon reLurned a snapshoL of Lhe locaLlons, Lhe delegaLlng verslon
reLurns an unmodlflable buL "llve" vlew of Lhe vehlcle locaLlons. 1hls means LhaL lf Lhread A calls 6&.52*$.42"# and
Lhread 8 laLer modlfles Lhe locaLlon of some of Lhe polnLs, Lhose changes are reflecLed ln Lhe F$G reLurned Lo Lhread A.
As we remarked earller, Lhls can be a beneflL (more upLodaLe daLa) or a llablllLy (poLenLlally lnconslsLenL vlew of Lhe
fleeL), dependlng on your requlremenLs.
lf an unchanglng vlew of Lhe fleeL ls requlred, 6&.52*$.42"# could lnsLead reLurn a shallow copy of Lhe 02*$.42"# map.
Slnce Lhe conLenLs of Lhe F$G are lmmuLable, only Lhe sLrucLure of Lhe F$G, noL Lhe conLenLs, musL be copled, as shown
ln LlsLlng 4.8 (whlch reLurns a plaln E$#8F$G, slnce 6&.52*$.42"# dld noL promlse Lo reLurn a Lhreadsafe F$G).
2/3./&4 MAZA [*?*4"./&4 J:)*"0 C"6*.+ .% " D2"*)99&".E$#8F$GA
b=89&$;'$%&
G)M04* *0$## A&0&6$.4"6B&84*0&=9$*C&9 W
G94/$.& %4"$0 D2"*)99&".F$GR'.94"6e @24".T 02*$.42"#[
G94/$.& %4"$0 F$GR'.94"6e @24".T )"32;4%4$M0&F$G[

G)M04* A&0&6$.4"6B&84*0&=9$*C&9UF$GR'.94"6e @24".T G24".#V W
02*$.42"# ] "&? D2"*)99&".E$#8F$GR'.94"6e @24".TUG24".#V[
)"32;4%4$M0&F$G ] D200&*.42"#+)"32;4%4$M0&F$GU02*$.42"#V[
\

G)M04* F$GR'.94"6e @24".T 6&.52*$.42"#UV W
9&.)9" )"32;4%4$M0&F$G[
\

G)M04* @24". 6&.52*$.42"U'.94"6 4;V W
9&.)9" 02*$.42"#+6&.U4;V[
\

G)M04* /24; #&.52*$.42"U'.94"6 4;e 4". -e 4". 7V W
4% U02*$.42"#+9&G0$*&U4;e "&? @24".U-e 7VV ]] ")00V
.892? "&? I00&6$0196)3&".L-*&G.42"U
d4"/$04; /&84*0& "$3&h d _ 4;V[
\
\
2/3./&4 MA\A N*.()&/&4 " C."./' $%>+ %6 .:* 2%'"./%& C*. ,&3.*"0 %6 " `2/#*` E&*A
G)M04* F$GR'.94"6e @24".T 6&.52*$.42"#UV W
9&.)9" D200&*.42"#+)"32;4%4$M0&F$GU
"&? E$#8F$GR'.94"6e @24".TU02*$.42"#VV[
\
MALADA ,&0*>*&0*&. C.".* H")/"Y?*3
1he delegaLlon examples so far delegaLe Lo a slngle, Lhreadsafe sLaLe varlable. We can also delegaLe Lhread safeLy Lo
more Lhan one underlylng sLaLe varlable as long as Lhose underlylng sLaLe varlables are lndependenL, meanlng LhaL Lhe
composlLe class does noL lmpose any lnvarlanLs lnvolvlng Lhe mulLlple sLaLe varlables.
B4#)$0D23G2"&". ln LlsLlng 4.9 ls a graphlcal componenL LhaL allows cllenLs Lo reglsLer llsLeners for mouse and
keysLroke evenLs. lL malnLalns a llsL of reglsLered llsLeners of each Lype, so LhaL when an evenL occurs Lhe approprlaLe
llsLeners can be lnvoked. 8uL Lhere ls no relaLlonshlp beLween Lhe seL of mouse llsLeners and key llsLeners, Lhe Lwo are
lndependenL, and Lherefore B4#)$0D23G2"&". can delegaLe lLs Lhread safeLy obllgaLlons Lo Lwo underlylng Lhreadsafe
llsLs.
B4#)$0D23G2"&". uses a D2G7J"P94.&199$754#. Lo sLore each llsLener llsL, Lhls ls a Lhreadsafe 54#. lmplemenLaLlon
parLlcularly sulLed for managlng llsLener llsLs (see SecLlon 3.2.3). Lach 54#. ls Lhreadsafe, and because Lhere are no
consLralnLs coupllng Lhe sLaLe of one Lo Lhe sLaLe of Lhe oLher, B4#)$0D23G2"&". can delegaLe lLs Lhread safeLy
responslblllLles Lo Lhe underlylng 32)#&54#.&"&9# and C&754#.&"&9# ob[ecLs.

44 !ava Concurrency ln racLlce
2/3./&4 MA^A [*?*4"./&4 J:)*"0 C"6*.+ .% K(?./>?* 9&0*)?+/&4 C.".* H")/"Y?*3A
G)M04* *0$## B4#)$0D23G2"&". W
G94/$.& %4"$0 54#.Rl&754#.&"&9T C&754#.&"&9#
] "&? D2G7J"P94.&199$754#.Rl&754#.&"&9TUV[
G94/$.& %4"$0 54#.RF2)#&54#.&"&9T 32)#&54#.&"&9#
] "&? D2G7J"P94.&199$754#.RF2)#&54#.&"&9TUV[

G)M04* /24; $;;l&754#.&"&9Ul&754#.&"&9 04#.&"&9V W
C&754#.&"&9#+$;;U04#.&"&9V[
\

G)M04* /24; $;;F2)#&54#.&"&9UF2)#&54#.&"&9 04#.&"&9V W
32)#&54#.&"&9#+$;;U04#.&"&9V[
\

G)M04* /24; 9&32/&l&754#.&"&9Ul&754#.&"&9 04#.&"&9V W
C&754#.&"&9#+9&32/&U04#.&"&9V[
\

G)M04* /24; 9&32/&F2)#&54#.&"&9UF2)#&54#.&"&9 04#.&"&9V W
32)#&54#.&"&9#+9&32/&U04#.&"&9V[
\
\
MALALA B:*& [*?*4"./%& S"/?3
MosL composlLe classes are noL as slmple as B4#)$0D23G2"&".: Lhey have lnvarlanLs LhaL relaLe Lhelr componenL sLaLe
varlables. ,)3M&9N$"6& ln LlsLlng 4.10 uses Lwo 1.234*I".&6&9s Lo manage lLs sLaLe, buL lmposes an addlLlonal
consLralnL LhaL Lhe flrsL number be less Lhan or equal Lo Lhe second.
,)3M&9N$"6& ls noL Lhreadsafe, lL does noL preserve Lhe lnvarlanL LhaL consLralns 02?&9 and )GG&9. 1he #&.52?&9 and
#&.!GG&9 meLhods aLLempL Lo respecL Lhls lnvarlanL, buL do so poorly. 8oLh #&.52?&9 and #&.!GG&9 are checkLhenacL
sequences, buL Lhey do noL use sufflclenL locklng Lo make Lhem aLomlc. lf Lhe number range holds (0, 10), and one
Lhread calls #&.52?&9UpV whlle anoLher Lhread calls #&.!GG&9UiV, wlLh some unlucky Llmlng boLh wlll pass Lhe checks
ln Lhe seLLers and boLh modlflcaLlons wlll be applled. 1he resulL ls LhaL Lhe range now holds (3, 4)an lnvalld sLaLe. So
whlle Lhe underlylng 1.234*I".&6&9s are Lhreadsafe, Lhe composlLe class ls noL. 8ecause Lhe underlylng sLaLe varlables
02?&9 and )GG&9 are noL lndependenL, ,)3M&9N$"6& cannoL slmply delegaLe Lhread safeLy Lo lLs Lhreadsafe sLaLe
varlables.
,)3M&9N$"6& could be made Lhreadsafe by uslng locklng Lo malnLaln lLs lnvarlanLs, such as guardlng 02?&9 and )GG&9
wlLh a common lock. lL musL also avold publlshlng 02?&9 and )GG&9 Lo prevenL cllenLs from subverLlng lLs lnvarlanLs.
lf a class has compound acLlons, as ,)3M&9N$"6& does, delegaLlon alone ls agaln noL a sulLable approach for Lhread
safeLy. ln Lhese cases, Lhe class musL provlde lLs own locklng Lo ensure LhaL compound acLlons are aLomlc, unless Lhe
enLlre compound acLlon can also be delegaLed Lo Lhe underlylng sLaLe varlables.
lf a class ls composed of mulLlple lndependenL Lhreadsafe sLaLe varlables and has no operaLlons LhaL have any lnvalld
sLaLe LranslLlons, Lhen lL can delegaLe Lhread safeLy Lo Lhe underlylng sLaLe varlables.


43
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 168ChapLer 4.
Composlng Cb[ecLs
2/3./&4 MA@_A P(5Y*) N"&4* $?"33 .:". 0%*3 P%. C(66/'/*&.?+ -)%.*'. ,.3 ,&#")/"&.3A !"#$% !" %&'()

G)M04* *0$## ,)3M&9N$"6& W
XX I,B1NI1,=h 02?&9 R] )GG&9
G94/$.& %4"$0 1.234*I".&6&9 02?&9 ] "&? 1.234*I".&6&9UZV[
G94/$.& %4"$0 1.234*I".&6&9 )GG&9 ] "&? 1.234*I".&6&9UZV[

G)M04* /24; #&.52?&9U4". 4V W
XX P$9"4"6 aa )"#$%& *8&*Ca.8&"a$*.
4% U4 T )GG&9+6&.UVV
.892? "&? I00&6$0196)3&".L-*&G.42"U
d*$"m. #&. 02?&9 .2 d _ 4 _ d T )GG&9dV[
02?&9+#&.U4V[
\

G)M04* /24; #&.!GG&9U4". 4V W
XX P$9"4"6 aa )"#$%& *8&*Ca.8&"a$*.
4% U4 R 02?&9+6&.UVV
.892? "&? I00&6$0196)3&".L-*&G.42"U
d*$"m. #&. )GG&9 .2 d _ 4 _ d R 02?&9dV[
)GG&9+#&.U4V[
\

G)M04* M220&$" 4#I"N$"6&U4". 4V W
9&.)9" U4 T] 02?&9+6&.UV qq 4 R] )GG&9+6&.UVV[
\
\
1he problem LhaL prevenLed ,)3M&9N$"6& from belng Lhreadsafe even Lhough lLs sLaLe componenLs were Lhreadsafe ls
very slmllar Lo one of Lhe rules abouL volaLlle varlables descrlbed ln SecLlon 3.1.4: a varlable ls sulLable for belng
declared /20$.40& only lf lL does noL parLlclpaLe ln lnvarlanLs lnvolvlng oLher sLaLe varlables.
MALAMA -(Y?/3:/&4 9&0*)?+/&4 C.".* H")/"Y?*3
When you delegaLe Lhread safeLy Lo an ob[ecL's underlylng sLaLe varlables, under whaL condlLlons can you publlsh Lhose
varlables so LhaL oLher classes can modlfy Lhem as well? Agaln, Lhe answer depends on whaL lnvarlanLs your class
lmposes on Lhose varlables. Whlle Lhe underlylng /$0)& fleld ln D2)".&9 could Lake on any lnLeger value, D2)".&9
consLralns lL Lo Lake on only poslLlve values, and Lhe lncremenL operaLlon consLralns Lhe seL of valld nexL sLaLes glven
any currenL sLaLe. lf you were Lo make Lhe /$0)& fleld publlc, cllenLs could change lL Lo an lnvalld value, so publlshlng lL
would render Lhe class lncorrecL. Cn Lhe oLher hand, lf a varlable represenLs Lhe currenL LemperaLure or Lhe lu of Lhe
lasL user Lo log on, Lhen havlng anoLher class modlfy Lhls value aL any Llme probably would noL vlolaLe any lnvarlanLs, so
publlshlng Lhls varlable mlghL be accepLable. (lL sLlll may noL be a good ldea, slnce publlshlng muLable varlables
consLralns fuLure developmenL and opporLunlLles for subclasslng, buL lL would noL necessarlly render Lhe class noL
Lhreadsafe.)
lf a sLaLe varlable ls Lhreadsafe, does noL parLlclpaLe ln any lnvarlanLs LhaL consLraln lLs value, and has no prohlblLed
sLaLe LranslLlons for any of lLs operaLlons, Lhen lL can safely be publlshed.
lor example, lL would be safe Lo publlsh 32)#&54#.&"&9# or C&754#.&"&9# ln B4#)$0D23G2"&".. 8ecause
B4#)$0D23G2"&". does noL lmpose any consLralnLs on Lhe valld sLaLes of lLs llsLener llsLs, Lhese flelds could be made
publlc or oLherwlse publlshed wlLhouL compromlslng Lhread safeLy.
MALAVA =1"5>?*T H*:/'?* J)"'<*) .:". -(Y?/3:*3 ,.3 C.".*
LeL's consLrucL anoLher verslon of Lhe vehlcle Lracker LhaL publlshes lLs underlylng muLable sLaLe. Agaln, we need Lo
modlfy Lhe lnLerface a llLLle blL Lo accommodaLe Lhls change, Lhls Llme uslng muLable buL Lhreadsafe polnLs.

46 !ava Concurrency ln racLlce
2/3./&4 MA@@A J:)*"03"6* K(."Y?* -%/&. $?"33A
b=89&$;'$%&
G)M04* *0$## '$%&@24". W
bH)$9;&;K7Ud.84#dV G94/$.& 4". -e 7[

G94/$.& '$%&@24".U4".fg $V W .84#U$fZge $f^gV[ \

G)M04* '$%&@24".U'$%&@24". GV W .84#UG+6&.UVV[ \

G)M04* '$%&@24".U4". -e 4". 7V W
.84#+- ] -[
.84#+7 ] 7[
\

G)M04* #7"*892"4:&; 4".fg 6&.UV W
9&.)9" "&? 4".fg W -e 7 \[
\

G)M04* #7"*892"4:&; /24; #&.U4". -e 4". 7V W
.84#+- ] -[
.84#+7 ] 7[
\
\
'$%&@24". ln LlsLlng 4.11 provldes a geLLer LhaL reLrleves boLh Lhe x and y values aL once by reLurnlng a LwoelemenL
array.
[6]
lf we provlded separaLe geLLers for x and y, Lhen Lhe values could change beLween Lhe Llme one coordlnaLe ls
reLrleved and Lhe oLher, resulLlng ln a caller seelng an lnconslsLenL value: an (x, y) locaLlon where Lhe vehlcle never was.
uslng '$%&@24"., we can consLrucL a vehlcle Lracker LhaL publlshes Lhe underlylng muLable sLaLe wlLhouL undermlnlng
Lhread safeLy, as shown ln Lhe @)M04#84"6B&84*0&=9$*C&9 class ln LlsLlng 4.12.
[6] 1he prlvaLe consLrucLor exlsLs Lo avold Lhe race condlLlon LhaL would occur lf Lhe copy consLrucLor were lmplemenLed as Lhls(p.x, p.y), Lhls ls an
example of Lhe prlvaLe consLrucLor capLure ldlom (8loch and CafLer, 2003).
@)M04#84"6B&84*0&=9$*C&9 derlves lLs Lhread safeLy from delegaLlon Lo an underlylng D2"*)99&".E$#8F$G, buL Lhls
Llme Lhe conLenLs of Lhe F$G are Lhreadsafe muLable polnLs raLher Lhan lmmuLable ones. 1he 6&.52*$.42" meLhod
reLurns an unmodlflable copy of Lhe underlylng F$G. Callers cannoL add or remove vehlcles, buL could change Lhe
locaLlon of one of Lhe vehlcles by muLaLlng Lhe '$%&@24". values ln Lhe reLurned F$G. Agaln, Lhe "llve" naLure of Lhe F$G
may be a beneflL or a drawback, dependlng on Lhe requlremenLs. @)M04#84"6B&84*0&=9$*C&9 ls Lhreadsafe, buL would
noL be so lf lL lmposed any addlLlonal consLralnLs on Lhe valld values for vehlcle locaLlons. lf lL needed Lo be able Lo
"veLo" changes Lo vehlcle locaLlons or Lo Lake acLlon when a locaLlon changes, Lhe approach Laken by
@)M04#84"6B&84*0&=9$*C&9 would noL be approprlaLe.
2/3./&4 MA@DA H*:/'?* J)"'<*) .:". C"6*?+ -(Y?/3:*3 9&0*)?+/&4 C.".*A
b=89&$;'$%&
G)M04* *0$## @)M04#84"6B&84*0&=9$*C&9 W
G94/$.& %4"$0 F$GR'.94"6e '$%&@24".T 02*$.42"#[
G94/$.& %4"$0 F$GR'.94"6e '$%&@24".T )"32;4%4$M0&F$G[

G)M04* @)M04#84"6B&84*0&=9$*C&9U
F$GR'.94"6e '$%&@24".T 02*$.42"#V W
.84#+02*$.42"#
] "&? D2"*)99&".E$#8F$GR'.94"6e '$%&@24".TU02*$.42"#V[
.84#+)"32;4%4$M0&F$G
] D200&*.42"#+)"32;4%4$M0&F$GU.84#+02*$.42"#V[
\

G)M04* F$GR'.94"6e '$%&@24".T 6&.52*$.42"#UV W
9&.)9" )"32;4%4$M0&F$G[
\

G)M04* '$%&@24". 6&.52*$.42"U'.94"6 4;V W
9&.)9" 02*$.42"#+6&.U4;V[
\

G)M04* /24; #&.52*$.42"U'.94"6 4;e 4". -e 4". 7V W
4% UY02*$.42"#+*2".$4"#l&7U4;VV
.892? "&? I00&6$0196)3&".L-*&G.42"U
d4"/$04; /&84*0& "$3&h d _ 4;V[
02*$.42"#+6&.U4;V+#&.U-e 7V[
\
\


47
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 168ChapLer 4.
Composlng Cb[ecLs
MAMA F00/&4 S(&'./%&"?/.+ .% =1/3./&4 J:)*"03"6* $?"33*3
1he !ava class llbrary conLalns many useful "bulldlng block" classes. 8euslng exlsLlng classes ls ofLen preferable Lo
creaLlng new ones: reuse can reduce developmenL efforL, developmenL rlsk (because Lhe exlsLlng componenLs are
already LesLed), and malnLenance cosL. SomeLlmes a Lhreadsafe class LhaL supporLs all of Lhe operaLlons we wanL
already exlsLs, buL ofLen Lhe besL we can flnd ls a class LhaL supporLs almosL all Lhe operaLlons we wanL, and Lhen we
need Lo add a new operaLlon Lo lL wlLhouL undermlnlng lLs Lhread safeLy.
As an example, leL's say we need a Lhreadsafe 54#. wlLh an aLomlc puLlfabsenL operaLlon. 1he synchronlzed 54#.
lmplemenLaLlons nearly do Lhe [ob, slnce Lhey provlde Lhe *2".$4"# and $;; meLhods from whlch we can consLrucL a
puLlfabsenL operaLlon.
1he concepL of puLlfabsenL ls sLralghLforward enough check Lo see lf an elemenL ls ln Lhe collecLlon before addlng lL,
and do noL add lL lf lL ls already Lhere. (?our "checkLhenacL" warnlng bells should be golng off now.) 1he requlremenL
LhaL Lhe class be Lhreadsafe lmpllclLly adds anoLher requlremenL LhaL operaLlons llke puLlfabsenL be aLomlc. Any
reasonable lnLerpreLaLlon suggesLs LhaL, lf you Lake a 54#. LhaL does noL conLaln ob[ecL x, and add x Lwlce wlLh puLlf
absenL, Lhe resulLlng collecLlon conLalns only one copy of x. 8uL, lf puLlfabsenL were noL aLomlc, wlLh some unlucky
Llmlng Lwo Lhreads could boLh see LhaL x was noL presenL and boLh add x, resulLlng ln Lwo coples of x.
1he safesL way Lo add a new aLomlc operaLlon ls Lo modlfy Lhe orlglnal class Lo supporL Lhe deslred operaLlon, buL Lhls ls
noL always posslble because you may noL have access Lo Lhe source code or may noL be free Lo modlfy lL. lf you can
modlfy Lhe orlglnal class, you need Lo undersLand Lhe lmplemenLaLlon's synchronlzaLlon pollcy so LhaL you can enhance
lL ln a manner conslsLenL wlLh lLs orlglnal deslgn. Addlng Lhe new meLhod dlrecLly Lo Lhe class means LhaL all Lhe code
LhaL lmplemenLs Lhe synchronlzaLlon pollcy for LhaL class ls sLlll conLalned ln one source flle, faclllLaLlng easler
comprehenslon and malnLenance.
AnoLher approach ls Lo exLend Lhe class, assumlng lL was deslgned for exLenslon. K&..&9B&*.29 ln LlsLlng 4.13 exLends
B&*.29 Lo add a G).I%1M#&". meLhod. LxLendlng B&*.29 ls sLralghLforward enough, buL noL all classes expose enough
of Lhelr sLaLe Lo subclasses Lo admlL Lhls approach.
LxLenslon ls more fraglle Lhan addlng code dlrecLly Lo a class, because Lhe lmplemenLaLlon of Lhe synchronlzaLlon pollcy
ls now dlsLrlbuLed over mulLlple, separaLely malnLalned source flles. lf Lhe underlylng class were Lo change lLs
synchronlzaLlon pollcy by chooslng a dlfferenL lock Lo guard lLs sLaLe varlables, Lhe subclass would subLly and sllenLly
break, because lL no longer used Lhe rlghL lock Lo conLrol concurrenL access Lo Lhe base class's sLaLe. (1he
synchronlzaLlon pollcy of B&*.29 ls flxed by lLs speclflcaLlon, so K&..&9B&*.29 would noL suffer from Lhls problem.)
2/3./&4 MA@LA =1.*&0/&4 B&*.29 .% :"#* " -(./6"Y3*&. K*.:%0A
b=89&$;'$%&
G)M04* *0$## K&..&9B&*.29RLT &-.&";# B&*.29RLT W
G)M04* #7"*892"4:&; M220&$" G).I%1M#&".UL -V W
M220&$" $M#&". ] Y*2".$4"#U-V[
4% U$M#&".V
$;;U-V[
9&.)9" $M#&".[
\
\
MAMA@A $?/*&.3/0* 2%'</&4
lor an 199$754#. wrapped wlLh a D200&*.42"#+#7"*892"4:&;54#. wrapper, nelLher of Lhese approaches addlng a
meLhod Lo Lhe orlglnal class or exLendlng Lhe class works because Lhe cllenL code does noL even know Lhe class of Lhe
54#. ob[ecL reLurned from Lhe synchronlzed wrapper facLorles. A Lhlrd sLraLegy ls Lo exLend Lhe funcLlonallLy of Lhe class
wlLhouL exLendlng Lhe class lLself by placlng exLenslon code ln a "helper" class.
LlsLlng 4.14 shows a falled aLLempL Lo creaLe a helper class wlLh an aLomlc puLlfabsenL operaLlon for operaLlng on a
Lhreadsafe 54#..

48 !ava Concurrency ln racLlce
2/3./&4 MA@MA P%&.:)*"03"6* F..*5>. .% ,5>?*5*&. -(./6"Y3*&.A !"#$% !" %&'()

b,2.=89&$;'$%&
G)M04* *0$## 54#.E&0G&9RLT W
G)M04* 54#.RLT 04#. ]
D200&*.42"#+#7"*892"4:&;54#.U"&? 199$754#.RLTUVV[
+++
G)M04* #7"*892"4:&; M220&$" G).I%1M#&".UL -V W
M220&$" $M#&". ] Y04#.+*2".$4"#U-V[
4% U$M#&".V
04#.+$;;U-V[
9&.)9" $M#&".[
\
\
Why wouldn'L Lhls work? AfLer all, G).I%1M#&". ls #7"*892"4:&;, rlghL? 1he problem ls LhaL lL synchronlzes on Lhe
wrong lock. WhaLever lock Lhe 54#. uses Lo guard lLs sLaLe, lL sure lsn'L Lhe lock on Lhe 54#.E&0G&9. 54#.E&0G&9
provldes only Lhe llluslon of synchronlzaLlon, Lhe varlous llsL operaLlons, whlle all #7"*892"4:&;, use dlfferenL locks,
whlch means LhaL G).I%1M#&". ls noL aLomlc relaLlve Lo oLher operaLlons on Lhe 54#.. So Lhere ls no guaranLee LhaL
anoLher Lhread won'L modlfy Lhe llsL whlle G).I%1M#&". ls execuLlng.
1o make Lhls approach work, we have Lo use Lhe same lock LhaL Lhe 54#. uses by uslng cllenLslde locklng or exLernal
locklng. CllenLslde locklng enLalls guardlng cllenL code LhaL uses some ob[ecL x wlLh Lhe lock x uses Lo guard lLs own
sLaLe. ln order Lo use cllenLslde locklng, you musL know whaL lock x uses.
1he documenLaLlon for B&*.29 and Lhe synchronlzed wrapper classes sLaLes, albelL obllquely, LhaL Lhey supporL cllenL
slde locklng, by uslng Lhe lnLrlnslc lock for Lhe B&*.29 or Lhe wrapper collecLlon (noL Lhe wrapped collecLlon). LlsLlng
4.13 shows a G).I%1M#&". operaLlon on a Lhreadsafe 54#. LhaL correcLly uses cllenLslde locklng.
2/3./&4 MA@VA ,5>?*5*&./&4 -(./6"Y3*&. 8/.: $?/*&.3/0* 2%'</&4A
b=89&$;'$%&
G)M04* *0$## 54#.E&0G&9RLT W
G)M04* 54#.RLT 04#. ]
D200&*.42"#+#7"*892"4:&;54#.U"&? 199$754#.RLTUVV[
+++
G)M04* M220&$" G).I%1M#&".UL -V W
#7"*892"4:&; U04#.V W
M220&$" $M#&". ] Y04#.+*2".$4"#U-V[
4% U$M#&".V
04#.+$;;U-V[
9&.)9" $M#&".[
\
\
\
lf exLendlng a class Lo add anoLher aLomlc operaLlon ls fraglle because lL dlsLrlbuLes Lhe locklng code for a class over
mulLlple classes ln an ob[ecL hlerarchy, cllenLslde locklng ls even more fraglle because lL enLalls puLLlng locklng code for
class C lnLo classes LhaL are LoLally unrelaLed Lo C. Lxerclse care when uslng cllenLslde locklng on classes LhaL do noL
commlL Lo Lhelr locklng sLraLegy.
CllenLslde locklng has a loL ln common wlLh class exLenslon Lhey boLh couple Lhe behavlor of Lhe derlved class Lo Lhe
lmplemenLaLlon of Lhe base class. !usL as exLenslon vlolaLes encapsulaLlon of lmplemenLaLlon [L! lLem 14], cllenLslde
locklng vlolaLes encapsulaLlon of synchronlzaLlon pollcy.
MAMADA $%5>%3/./%&
1here ls a less fraglle alLernaLlve for addlng an aLomlc operaLlon Lo an exlsLlng class: composlLlon. I3G92/&;54#. ln
LlsLlng 4.16 lmplemenLs Lhe 54#. operaLlons by delegaLlng Lhem Lo an underlylng 54#. lnsLance, and adds an aLomlc
G).I%1M#&". meLhod. (Llke D200&*.42"#+#7"*892"4:&;54#. and oLher collecLlons wrappers, I3G92/&;54#. assumes
LhaL once a llsL ls passed Lo lLs consLrucLor, Lhe cllenL wlll noL use Lhe underlylng llsL dlrecLly agaln, accesslng lL only
Lhrough Lhe I3G92/&;54#..)
I3G92/&;54#. adds an addlLlonal level of locklng uslng lLs own lnLrlnslc lock. lL does noL care wheLher Lhe underlylng
54#. ls Lhreadsafe, because lL provldes lLs own conslsLenL locklng LhaL provldes Lhread safeLy even lf Lhe 54#. ls noL
Lhreadsafe or changes lLs locklng lmplemenLaLlon. Whlle Lhe exLra layer of synchronlzaLlon may add some small
performance penalLy,
[7]
Lhe lmplemenLaLlon ln I3G92/&;54#. ls less fraglle Lhan aLLempLlng Lo mlmlc Lhe locklng

49
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 168ChapLer 4.
Composlng Cb[ecLs
sLraLegy of anoLher ob[ecL. ln effecL, we've used Lhe !ava monlLor paLLern Lo encapsulaLe an exlsLlng 54#., and Lhls ls
guaranLeed Lo provlde Lhread safeLy so long as our class holds Lhe only ouLsLandlng reference Lo Lhe underlylng 54#..
2/3./&4 MA@WA ,5>?*5*&./&4 -(./6"Y3*&. 93/&4 $%5>%3/./%&A
b=89&$;'$%&
G)M04* *0$## I3G92/&;54#.R=T 43G0&3&".# 54#.R=T W
G94/$.& %4"$0 54#.R=T 04#.[

G)M04* I3G92/&;54#.U54#.R=T 04#.V W .84#+04#. ] 04#.[ \

G)M04* #7"*892"4:&; M220&$" G).I%1M#&".U= -V W
M220&$" *2".$4"# ] 04#.+*2".$4"#U-V[
4% U*2".$4"#V
04#.+$;;U-V[
9&.)9" Y*2".$4"#[
\

G)M04* #7"*892"4:&; /24; *0&$9UV W 04#.+*0&$9UV[ \
XX +++ #4340$907 ;&0&6$.& 2.8&9 54#. 3&.82;#
\
[7] 1he penalLy wlll be small because Lhe synchronlzaLlon on Lhe underlylng LlsL ls guaranLeed Lo be unconLended and Lherefore fasL, see ChapLer
11.
MAVA [%'(5*&./&4 C+&':)%&/O"./%& -%?/'/*3
uocumenLaLlon ls one of Lhe mosL powerful (and, sadly, mosL underuLlllzed) Lools for managlng Lhread safeLy. users
look Lo Lhe documenLaLlon Lo flnd ouL lf a class ls Lhreadsafe, and malnLalners look Lo Lhe documenLaLlon Lo undersLand
Lhe lmplemenLaLlon sLraLegy so Lhey can malnLaln lL wlLhouL lnadverLenLly compromlslng safeLy. unforLunaLely, boLh of
Lhese consLlLuencles usually flnd less lnformaLlon ln Lhe documenLaLlon Lhan Lhey'd llke.
uocumenL a class's Lhread safeLy guaranLees for lLs cllenLs, documenL lLs synchronlzaLlon pollcy for lLs malnLalners.
Lach use of #7"*892"4:&;, /20$.40&, or any Lhreadsafe class reflecLs a synchronlzaLlon pollcy deflnlng a sLraLegy for
ensurlng Lhe lnLegrlLy of daLa ln Lhe face of concurrenL access. 1haL pollcy ls an elemenL of your program's deslgn, and
should be documenLed. Cf course, Lhe besL Llme Lo documenL deslgn declslons ls aL deslgn Llme. Weeks or monLhs laLer,
Lhe deLalls may be a blur so wrlLe lL down before you forgeL.
CrafLlng a synchronlzaLlon pollcy requlres a number of declslons: whlch varlables Lo make /20$.40&, whlch varlables Lo
guard wlLh locks, whlch lock(s) guard whlch varlables, whlch varlables Lo make lmmuLable or conflne Lo a Lhread, whlch
operaLlons musL be aLomlc, eLc. Some of Lhese are sLrlcLly lmplemenLaLlon deLalls and should be documenLed for Lhe
sake of fuLure malnLalners, buL some affecL Lhe publlcly observable locklng behavlor of your class and should be
documenLed as parL of lLs speclflcaLlon.
AL Lhe very leasL, documenL Lhe Lhread safeLy guaranLees made by a class. ls lL Lhreadsafe? uoes lL make callbacks wlLh
a lock held? Are Lhere any speclflc locks LhaL affecL lLs behavlor? uon'L force cllenLs Lo make rlsky guesses. lf you don'L
wanL Lo commlL Lo supporLlng cllenLslde locklng, LhaL's flne, buL say so. lf you wanL cllenLs Lo be able Lo creaLe new
aLomlc operaLlons on your class, as we dld ln SecLlon 4.4, you need Lo documenL whlch locks Lhey should acqulre Lo do
so safely. lf you use locks Lo guard sLaLe, documenL Lhls for fuLure malnLalners, because lL's so easy Lhe bH)$9;&;K7
annoLaLlon wlll do Lhe Lrlck. lf you use more subLle means Lo malnLaln Lhread safeLy, documenL Lhem because Lhey may
noL be obvlous Lo malnLalners.
1he currenL sLaLe of affalrs ln Lhread safeLy documenLaLlon, even ln Lhe plaLform llbrary classes, ls noL encouraglng. Pow
many Llmes have you looked aL Lhe !avadoc for a class and wondered wheLher lL was Lhreadsafe?
[8]
MosL classes don'L
offer any clue elLher way. Many offlclal !ava Lechnology speclflcaLlons, such as servleLs and !u8C, woefully
underdocumenL Lhelr Lhread safeLy promlses and requlremenLs.
[8] lf you've never wondered Lhls, we admlre your opLlmlsm.
Whlle prudence suggesLs LhaL we noL assume behavlors LhaL aren'L parL of Lhe speclflcaLlon, we have work Lo geL done,
and we are ofLen faced wlLh a cholce of bad assumpLlons. Should we assume an ob[ecL ls Lhreadsafe because lL seems
LhaL lL oughL Lo be? Should we assume LhaL access Lo an ob[ecL can be made Lhreadsafe by acqulrlng lLs lock flrsL? (1hls
rlsky Lechnlque works only lf we conLrol all Lhe code LhaL accesses LhaL ob[ecL, oLherwlse, lL provldes only Lhe llluslon of
Lhread safeLy.) nelLher cholce ls very saLlsfylng.

30 !ava Concurrency ln racLlce
1o make maLLers worse, our lnLulLlon may ofLen be wrong on whlch classes are "probably Lhreadsafe" and whlch are
noL. As an example, `$/$+.&-.+'43G0&A$.&<293$. lsn'L Lhreadsafe, buL Lhe !avadoc neglecLed Lo menLlon Lhls unLll
!uk 1.4. 1haL Lhls parLlcular class lsn'L Lhreadsafe comes as a surprlse Lo many developers. Pow many programs
mlsLakenly creaLe a shared lnsLance of a nonLhreadsafe ob[ecL and used lL from mulLlple Lhreads, unaware LhaL Lhls
mlghL cause erroneous resulLs under heavy load?
1he problem wlLh '43G0&A$.&<293$. could be avolded by noL assumlng a class ls Lhreadsafe lf lL doesn'L say so. Cn Lhe
oLher hand, lL ls lmposslble Lo develop a servleLbased appllcaLlon wlLhouL maklng some preLLy quesLlonable
assumpLlons abouL Lhe Lhread safeLy of conLalnerprovlded ob[ecLs llke E..G'&##42". uon'L make your cusLomers or
colleagues have Lo make guesses llke Lhls.
MAVA@A ,&.*)>)*./&4 H"4(* [%'(5*&."./%&
Many !ava Lechnology speclflcaLlons are sllenL, or aL leasL unforLhcomlng, abouL Lhread safeLy guaranLees and
requlremenLs for lnLerfaces such as '&9/0&.D2".&-., E..G'&##42", or A$.$'2)9*&.
[9]
Slnce Lhese lnLerfaces are
lmplemenLed by your conLalner or daLabase vendor, you ofLen can'L look aL Lhe code Lo see whaL lL does. 8esldes, you
don'L wanL Lo rely on Lhe lmplemenLaLlon deLalls of one parLlcular !u8C drlver you wanL Lo be compllanL wlLh Lhe
sLandard so your code works properly wlLh any !u8C drlver. 8uL Lhe words "Lhread" and "concurrenL" do noL appear aL
all ln Lhe !u8C speclflcaLlon, and appear frusLraLlngly rarely ln Lhe servleL speclflcaLlon. So whaL do you do?
[9] We flnd lL parLlcularly frusLraLlng LhaL Lhese omlsslons perslsL desplLe mulLlple ma[or revlslons of Lhe speclflcaLlons.
?ou are golng Lo have Lo guess. Cne way Lo lmprove Lhe quallLy of your guess ls Lo lnLerpreL Lhe speclflcaLlon from Lhe
perspecLlve of someone who wlll lmplemenL lL (such as a conLalner or daLabase vendor), as opposed Lo someone who
wlll merely use lL. ServleLs are always called from a conLalnermanaged Lhread, and lL ls safe Lo assume LhaL lf Lhere ls
more Lhan one such Lhread, Lhe conLalner knows Lhls. 1he servleL conLalner makes avallable cerLaln ob[ecLs LhaL provlde
servlce Lo mulLlple servleLs, such as E..G'&##42" or '&9/0&.D2".&-.. So Lhe servleL conLalner should expecL Lo have
Lhese ob[ecLs accessed concurrenLly, slnce lL has creaLed mulLlple Lhreads and called meLhods llke '&9/0&.+#&9/4*&
from Lhem LhaL could reasonably be expecLed Lo access Lhe '&9/0&.D2".&-..
Slnce lL ls lmposslble Lo lmaglne a slngleLhreaded conLexL ln whlch Lhese ob[ecLs would be useful, one has Lo assume
LhaL Lhey have been made Lhreadsafe, even Lhough Lhe speclflcaLlon does noL expllclLly requlre Lhls. 8esldes, lf Lhey
requlred cllenLslde locklng, on whaL lock should Lhe cllenL code synchronlze? 1he documenLaLlon doesn'L say, and lL
seems absurd Lo guess. 1hls "reasonable assumpLlon" ls furLher bolsLered by Lhe examples ln Lhe speclflcaLlon and
offlclal LuLorlals LhaL show how Lo access '&9/0&.D2".&-. or E..G'&##42" and do noL use any cllenLslde
synchronlzaLlon.
Cn Lhe oLher hand, Lhe ob[ecLs placed ln Lhe '&9/0&.D2".&-. or E..G'&##42" wlLh #&.1..94M).& are owned by Lhe
web appllcaLlon, noL Lhe servleL conLalner. 1he servleL speclflcaLlon does noL suggesL any mechanlsm for coordlnaLlng
concurrenL access Lo shared aLLrlbuLes. So aLLrlbuLes sLored by Lhe conLalner on behalf of Lhe web appllcaLlon should be
Lhreadsafe or effecLlvely lmmuLable. lf all Lhe conLalner dld was sLore Lhese aLLrlbuLes on behalf of Lhe web appllcaLlon,
anoLher opLlon would be Lo ensure LhaL Lhey are conslsLenLly guarded by a lock when accessed from servleL appllcaLlon
code. 8uL because Lhe conLalner may wanL Lo serlallze ob[ecLs ln Lhe E..G'&##42" for repllcaLlon or passlvaLlon
purposes, and Lhe servleL conLalner can'L posslbly know your locklng proLocol, you should make Lhem Lhreadsafe.
Cne can make a slmllar lnference abouL Lhe !u8C A$.$'2)9*& lnLerface, whlch represenLs a pool of reusable daLabase
connecLlons. A A$.$'2)9*& provldes servlce Lo an appllcaLlon, and lL doesn'L make much sense ln Lhe conLexL of a
slngleLhreaded appllcaLlon. lL ls hard Lo lmaglne a use case LhaL doesn'L lnvolve calllng 6&.D2""&*.42" from mulLlple
Lhreads. And, as wlLh servleLs, Lhe examples ln Lhe !u8C speclflcaLlon do noL suggesL Lhe need for any cllenLslde locklng
ln Lhe many code examples uslng A$.$'2)9*&. So, even Lhough Lhe speclflcaLlon doesn'L promlse LhaL A$.$'2)9*& ls
Lhreadsafe or requlre conLalner vendors Lo provlde a Lhreadsafe lmplemenLaLlon, by Lhe same "lL would be absurd lf lL
weren'L" argumenL, we have no cholce buL Lo assume LhaL A$.$'2)9*&+6&.D2""&*.42" does noL requlre addlLlonal
cllenLslde locklng.
Cn Lhe oLher hand, we would noL make Lhe same argumenL abouL Lhe !u8C D2""&*.42" ob[ecLs dlspensed by Lhe
A$.$'2)9*&, slnce Lhese are noL necessarlly lnLended Lo be shared by oLher acLlvlLles unLll Lhey are reLurned Lo Lhe pool.
So lf an acLlvlLy LhaL obLalns a !u8C D2""&*.42" spans mulLlple Lhreads, lL musL Lake responslblllLy for ensurlng LhaL
access Lo Lhe D2""&*.42" ls properly guarded by synchronlzaLlon. (ln mosL appllcaLlons, acLlvlLles LhaL use a !u8C
D2""&*.42" are lmplemenLed so as Lo conflne Lhe D2""&*.42" Lo a speclflc Lhread anyway.)


31
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 178ChapLer 3. 8ulldlng
8locks
$:">.*) VA ;(/?0/&4 ;?%'<3
1he lasL chapLer explored several Lechnlques for consLrucLlng Lhreadsafe classes, lncludlng delegaLlng Lhread safeLy Lo
exlsLlng Lhreadsafe classes. Where pracLlcal, delegaLlon ls one of Lhe mosL effecLlve sLraLegles for creaLlng Lhreadsafe
classes: [usL leL exlsLlng Lhreadsafe classes manage all Lhe sLaLe.
1he plaLform llbrarles lnclude a rlch seL of concurrenL bulldlng blocks, such as Lhreadsafe collecLlons and a varleLy of
synchronlzers LhaL can coordlnaLe Lhe conLrol flow of cooperaLlng Lhreads. 1hls chapLer covers Lhe mosL useful
concurrenL bulldlng blocks, especlally Lhose lnLroduced ln !ava 3.0 and !ava 6, and some paLLerns for uslng Lhem Lo
sLrucLure concurrenL appllcaLlons.
VA@A C+&':)%&/O*0 $%??*'./%&3
1he synchronlzed collecLlon classes lnclude B&*.29 and E$#8.$M0&, parL of Lhe orlglnal !uk, as well as Lhelr couslns
added ln !uk 1.2, Lhe synchronlzed wrapper classes creaLed by Lhe D200&*.42"#+#7"*892"4:&;o-- facLory meLhods.
1hese classes achleve Lhread safeLy by encapsulaLlng Lhelr sLaLe and synchronlzlng every publlc meLhod so LhaL only one
Lhread aL a Llme can access Lhe collecLlon sLaLe.
VA@A@A -)%Y?*53 8/.: C+&':)%&/O*0 $%??*'./%&3
1he synchronlzed collecLlons are Lhreadsafe, buL you may someLlmes need Lo use addlLlonal cllenLslde locklng Lo guard
compound acLlons. Common compound acLlons on collecLlons lnclude lLeraLlon (repeaLedly feLch elemenLs unLll Lhe
collecLlon ls exhausLed), navlgaLlon (flnd Lhe nexL elemenL afLer Lhls one accordlng Lo some order), and condlLlonal
operaLlons such as puLlfabsenL (check lf a F$G has a mapplng for key k, and lf noL, add Lhe mapplng (k,v)). WlLh a
synchronlzed collecLlon, Lhese compound acLlons are sLlll Lechnlcally Lhreadsafe even wlLhouL cllenLslde locklng, buL
Lhey may noL behave as you mlghL expecL when oLher Lhreads can concurrenLly modlfy Lhe collecLlon.
LlsLlng 3.1 shows Lwo meLhods LhaL operaLe on a B&*.29, 6&.5$#. and ;&0&.&a5$#., boLh of whlch are checkLhenacL
sequences. Lach calls #4:& Lo deLermlne Lhe slze of Lhe array and uses Lhe resulLlng value Lo reLrleve or remove Lhe lasL
elemenL.
2/3./&4 VA@A $%5>%(&0 F'./%&3 %& " B&*.29 .:". 5"+ -)%0('* $%&6(3/&4 N*3(?.3A


G)M04* #.$.4* JM`&*. 6&.5$#.UB&*.29 04#.V W
4". 0$#.I";&- ] 04#.+#4:&UV a ^[
9&.)9" 04#.+6&.U0$#.I";&-V[
\

G)M04* #.$.4* /24; ;&0&.&5$#.UB&*.29 04#.V W
4". 0$#.I";&- ] 04#.+#4:&UV a ^[
04#.+9&32/&U0$#.I";&-V[
\
1hese meLhods seem harmless, and ln a sense Lhey are Lhey can'L corrupL Lhe B&*.29, no maLLer how many Lhreads
call Lhem slmulLaneously. 8uL Lhe caller of Lhese meLhods mlghL have a dlfferenL oplnlon. lf Lhread A calls 6&.5$#. on a
B&*.29 wlLh Len elemenLs, Lhread 8 calls ;&0&.&5$#. on Lhe same B&*.29, and Lhe operaLlons are lnLerleaved as shown
ln llgure 3.1, 6&.5$#. Lhrows 199$7I";&-J).J%K2)";#L-*&G.42". 8eLween Lhe call Lo #4:& and Lhe subsequenL call Lo
6&. ln 6&.5$#., Lhe B&*.29 shrank and Lhe lndex compuLed ln Lhe flrsL sLep ls no longer valld. 1hls ls perfecLly conslsLenL
wlLh Lhe speclflcaLlon of B&*.29lL Lhrows an excepLlon lf asked for a nonexlsLenL elemenL. 8uL Lhls ls noL whaL a caller
expecLs 6&.5$#. Lo do, even ln Lhe face of concurrenL modlflcaLlon, unless perhaps Lhe B&*.29 was empLy Lo begln
wlLh.
S/4()* VA@A ,&.*)?*"#/&4 %6 H&.0$#. "&0 A&0&.&0$#. .:". .:)%83 199$7I";&-J).J%K2)";#L-*&G.42"A


32 !ava Concurrency ln racLlce
8ecause Lhe synchronlzed collecLlons commlL Lo a synchronlzaLlon pollcy LhaL supporLs cllenLslde locklng,
[1]
lL ls
posslble Lo creaLe new operaLlons LhaL are aLomlc wlLh respecL Lo oLher collecLlon operaLlons as long as we know whlch
lock Lo use. 1he synchronlzed collecLlon classes guard each meLhod wlLh Lhe lock on Lhe synchronlzed collecLlon ob[ecL
lLself. 8y acqulrlng Lhe collecLlon lock we can make 6&.5$#. and ;&0&.&5$#. aLomlc, ensurlng LhaL Lhe slze of Lhe
B&*.29 does noL change beLween calllng #4:& and 6&., as shown ln LlsLlng 3.2.
[1] 1hls ls documenLed only obllquely ln Lhe !ava 3.0 !avadoc, as an example of Lhe correcL lLeraLlon ldlom.
1he rlsk LhaL Lhe slze of Lhe llsL mlghL change beLween a call Lo #4:& and Lhe correspondlng call Lo 6&. ls also presenL
when we lLeraLe Lhrough Lhe elemenLs of a B&*.29 as shown ln LlsLlng 3.3.
1hls lLeraLlon ldlom relles on a leap of falLh LhaL oLher Lhreads wlll noL modlfy Lhe B&*.29 beLween Lhe calls Lo #4:& and
6&.. ln a slngleLhreaded envlronmenL, Lhls assumpLlon ls perfecLly valld, buL when oLher Lhreads may concurrenLly
modlfy Lhe B&*.29 lL can lead Lo Lrouble. !usL as wlLh 6&.5$#., lf anoLher Lhread deleLes an elemenL whlle you are
lLeraLlng Lhrough Lhe B&*.29 and Lhe operaLlons are lnLerleaved unlucklly, Lhls lLeraLlon ldlom Lhrows
199$7I";&-J).J%K2)";#L-*&G.42".
2/3./&4 VADA $%5>%(&0 F'./%&3 %& B&*.29 93/&4 $?/*&.3/0* 2%'</&4A
G)M04* #.$.4* JM`&*. 6&.5$#.UB&*.29 04#.V W
#7"*892"4:&; U04#.V W
4". 0$#.I";&- ] 04#.+#4:&UV a ^[
9&.)9" 04#.+6&.U0$#.I";&-V[
\
\

G)M04* #.$.4* /24; ;&0&.&5$#.UB&*.29 04#.V W
#7"*892"4:&; U04#.V W
4". 0$#.I";&- ] 04#.+#4:&UV a ^[
04#.+9&32/&U0$#.I";&-V[
\
\
2/3./&4 VALA ,.*)"./%& .:". 5"+ J:)%8 199$7I";&-J).J%K2)";#L-*&G.42"A
%29 U4". 4 ] Z[ 4 R /&*.29+#4:&UV[ 4__V
;2'23&.84"6U/&*.29+6&.U4VV[
Lven Lhough Lhe lLeraLlon ln LlsLlng 3.3 can Lhrow an excepLlon, Lhls doesn'L mean B&*.29 lsn'L Lhreadsafe. 1he sLaLe of
Lhe B&*.29 ls sLlll valld and Lhe excepLlon ls ln facL ln conformance wlLh lLs speclflcaLlon. Powever, LhaL someLhlng as
mundane as feLchlng Lhe lasL elemenL or lLeraLlon Lhrow an excepLlon ls clearly undeslrable.
1he problem of unrellable lLeraLlon can agaln be addressed by cllenLslde locklng, aL some addlLlonal cosL Lo scalablllLy.
8y holdlng Lhe B&*.29 lock for Lhe duraLlon of lLeraLlon, as shown ln LlsLlng 3.4, we prevenL oLher Lhreads from
modlfylng Lhe B&*.29 whlle we are lLeraLlng lL. unforLunaLely, we also prevenL oLher Lhreads from accesslng lL aL all
durlng Lhls Llme, lmpalrlng concurrency.
2/3./&4 VAMA ,.*)"./%& 8/.: $?/*&.3/0* 2%'</&4A
#7"*892"4:&; U/&*.29V W
%29 U4". 4 ] Z[ 4 R /&*.29+#4:&UV[ 4__V
;2'23&.84"6U/&*.29+6&.U4VV[
\
VA@ADA ,.*)".%)3 "&0 $%&'())*&.5%0/6/'"./%&*1'*>./%&
We use B&*.29 for Lhe sake of clarlLy ln many of our examples, even Lhough lL ls consldered a "legacy" collecLlon class.
8uL Lhe more "modern" collecLlon classes do noL ellmlnaLe Lhe problem of compound acLlons. 1he sLandard way Lo
lLeraLe a D200&*.42" ls wlLh an I.&9$.29, elLher expllclLly or Lhrough Lhe foreach loop synLax lnLroduced ln !ava 3.0,
buL uslng lLeraLors does noL obvlaLe Lhe need Lo lock Lhe collecLlon durlng lLeraLlon lf oLher Lhreads can concurrenLly
modlfy lL. 1he lLeraLors reLurned by Lhe synchronlzed collecLlons are noL deslgned Lo deal wlLh concurrenL modlflcaLlon,
and Lhey are fallfasL meanlng LhaL lf Lhey deLecL LhaL Lhe collecLlon has changed slnce lLeraLlon began, Lhey Lhrow Lhe
unchecked D2"*)99&".F2;4%4*$.42"L-*&G.42".
1hese fallfasL lLeraLors are noL deslgned Lo be foolproof Lhey are deslgned Lo caLch concurrency errors on a "good
falLhefforL" basls and Lhus acL only as earlywarnlng lndlcaLors for concurrency problems. 1hey are lmplemenLed by
assoclaLlng a modlflcaLlon counL wlLh Lhe collecLlon: lf Lhe modlflcaLlon counL changes durlng lLeraLlon, 8$#,&-. or "&-.
Lhrows D2"*)99&".F2;4%4*$.42"L-*&G.42". Powever, Lhls check ls done wlLhouL synchronlzaLlon, so Lhere ls a rlsk of
seelng a sLale value of Lhe modlflcaLlon counL and Lherefore LhaL Lhe lLeraLor does noL reallze a modlflcaLlon has been
made. 1hls was a dellberaLe deslgn Lradeoff Lo reduce Lhe performance lmpacL of Lhe concurrenL modlflcaLlon deLecLlon
code.
[2]


33
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 178ChapLer 3. 8ulldlng
8locks
[2] ConcurrenLModlflcaLlonLxcepLlon can arlse ln slngleLhreaded code as well, Lhls happens when ob[ecLs are removed from Lhe collecLlon dlrecLly
raLher Lhan Lhrough lLeraLor.remove.
LlsLlng 3.3 lllusLraLes lLeraLlng a collecLlon wlLh Lhe foreach loop synLax. lnLernally, `$/$* generaLes code LhaL uses an
I.&9$.29, repeaLedly calllng 8$#,&-. and "&-. Lo lLeraLe Lhe 54#.. !usL as wlLh lLeraLlng Lhe B&*.29, Lhe way Lo prevenL
D2"*)99&".F2;4%4*$.42"L-*&G.42" ls Lo hold Lhe collecLlon lock for Lhe duraLlon of Lhe lLeraLlon.
2/3./&4 VAVA ,.*)"./&4 " 54#. 8/.: "& I.&9$.29A


54#.RP4;6&.T ?4;6&.54#.
] D200&*.42"#+#7"*892"4:&;54#.U"&? 199$754#.RP4;6&.TUVV[
+++
XX F$7 .892? D2"*)99&".F2;4%4*$.42"L-*&G.42"
%29 UP4;6&. ? h ?4;6&.54#.V
;2'23&.84"6U?V[
1here are several reasons, however, why locklng a collecLlon durlng lLeraLlon may be undeslrable. CLher Lhreads LhaL
need Lo access Lhe collecLlon wlll block unLll Lhe lLeraLlon ls compleLe, lf Lhe collecLlon ls large or Lhe Lask performed for
each elemenL ls lengLhy, Lhey could walL a long Llme. Also, lf Lhe collecLlon ls locked as ln LlsLlng 3.4, ;2'23&.84"6 ls
belng called wlLh a lock held, whlch ls a rlsk facLor for deadlock (see ChapLer 10). Lven ln Lhe absence of sLarvaLlon or
deadlock rlsk, locklng collecLlons for slgnlflcanL perlods of Llme hurLs appllcaLlon scalablllLy. 1he longer a lock ls held, Lhe
more llkely lL ls Lo be conLended, and lf many Lhreads are blocked walLlng for a lock LhroughpuL and Cu uLlllzaLlon can
suffer (see ChapLer 11).
An alLernaLlve Lo locklng Lhe collecLlon durlng lLeraLlon ls Lo clone Lhe collecLlon and lLeraLe Lhe copy lnsLead. Slnce Lhe
clone ls Lhreadconflned, no oLher Lhread can modlfy lL durlng lLeraLlon, ellmlnaLlng Lhe posslblllLy of
D2"*)99&".F2;4%4*$.42"L-*&G.42". (1he collecLlon sLlll musL be locked durlng Lhe clone operaLlon lLself.) Clonlng Lhe
collecLlon has an obvlous performance cosL, wheLher Lhls ls a favorable Lradeoff depends on many facLors lncludlng Lhe
slze of Lhe collecLlon, how much work ls done for each elemenL, Lhe relaLlve frequency of lLeraLlon compared Lo oLher
collecLlon operaLlons, and responslveness and LhroughpuL requlremenLs.
VA@ALA 7/00*& ,.*)".%)3
Whlle locklng can prevenL lLeraLors from Lhrowlng D2"*)99&".F2;4%4*$.42"L-*&G.42", you have Lo remember Lo use
locklng everywhere a shared collecLlon mlghL be lLeraLed. 1hls ls Lrlckler Lhan lL sounds, as lLeraLors are someLlmes
hldden, as ln E4;;&"I.&9$.29 ln LlsLlng 3.6. 1here ls no expllclL lLeraLlon ln E4;;&"I.&9$.29, buL Lhe code ln bold
enLalls lLeraLlon [usL Lhe same. 1he sLrlng concaLenaLlon geLs Lurned by Lhe compller lnLo a call Lo
'.94"6K)40;&9.$GG&";UJM`&*.V, whlch ln Lurn lnvokes Lhe collecLlon's .2'.94"6 meLhod and Lhe lmplemenLaLlon of
.2'.94"6 ln Lhe sLandard collecLlons lLeraLes Lhe collecLlon and calls .2'.94"6 on each elemenL Lo produce a nlcely
formaLLed represenLaLlon of Lhe collecLlon's conLenLs.
1he $;;=&"=84"6# meLhod could Lhrow D2"*)99&".F2;4%4*$.42"L-*&G.42", because Lhe collecLlon ls belng lLeraLed
by .2'.94"6 ln Lhe process of preparlng Lhe debugglng message. Cf course, Lhe real problem ls LhaL E4;;&"I.&9$.29 ls
noL Lhreadsafe, Lhe E4;;&"I.&9$.29 lock should be acqulred before uslng #&. ln Lhe G94".0" call, buL debugglng and
logglng code commonly neglecL Lo do Lhls.
1he real lesson here ls LhaL Lhe greaLer Lhe dlsLance beLween Lhe sLaLe and Lhe synchronlzaLlon LhaL guards lL, Lhe more
llkely LhaL someone wlll forgeL Lo use proper synchronlzaLlon when accesslng LhaL sLaLe. lf E4;;&"I.&9$.29 wrapped
Lhe E$#8'&. wlLh a #7"*892"4:&;'&., encapsulaLlng Lhe synchronlzaLlon, Lhls sorL of error would noL occur.
!usL as encapsulaLlng an ob[ecL's sLaLe makes lL easler Lo preserve lLs lnvarlanLs, encapsulaLlng lLs synchronlzaLlon makes
lL easler Lo enforce lLs synchronlzaLlon pollcy.


34 !ava Concurrency ln racLlce
2/3./&4 VAWA ,.*)"./%& 7/00*& 8/.:/& C.)/&4 $%&'".*&"./%&A !"#$% !" %&'()

G)M04* *0$## E4;;&"I.&9$.29 W
bH)$9;&;K7Ud.84#dV
G94/$.& %4"$0 '&.RI".&6&9T #&. ] "&? E$#8'&.RI".&6&9TUV[

G)M04* #7"*892"4:&; /24; $;;UI".&6&9 4V W #&.+$;;U4V[ \
G)M04* #7"*892"4:&; /24; 9&32/&UI".&6&9 4V W #&.+9&32/&U4V[ \

G)M04* /24; $;;=&"=84"6#UV W
N$";23 9 ] "&? N$";23UV[
%29 U4". 4 ] Z[ 4 R ^Z[ 4__V
$;;U9+"&-.I".UVV[
'7#.&3+2).+G94".0"UdALK!Hh $;;&; .&" &0&3&".# .2 d _ #&.V[
\
\
lLeraLlon ls also lndlrecLly lnvoked by Lhe collecLlon's 8$#8D2;& and &()$0# meLhods, whlch may be called lf Lhe
collecLlon ls used as an elemenL or key of anoLher collecLlon. Slmllarly, Lhe *2".$4"#100, 9&32/&100, and 9&.$4"100
meLhods, as well as Lhe consLrucLors LhaL Lake collecLlons are argumenLs, also lLeraLe Lhe collecLlon. All of Lhese lndlrecL
uses of lLeraLlon can cause D2"*)99&".F2;4%4*$.42"L-*&G.42".
VADA $%&'())*&. $%??*'./%&3
!ava 3.0 lmproves on Lhe synchronlzed collecLlons by provldlng several concurrenL collecLlon classes. Synchronlzed
collecLlons achleve Lhelr Lhread safeLy by serlallzlng all access Lo Lhe collecLlon's sLaLe. 1he cosL of Lhls approach ls poor
concurrency, when mulLlple Lhreads conLend for Lhe collecLlonwlde lock, LhroughpuL suffers.
1he concurrenL collecLlons, on Lhe oLher hand, are deslgned for concurrenL access from mulLlple Lhreads. !ava 3.0 adds
D2"*)99&".E$#8F$G, a replacemenL for synchronlzed hashbased F$G lmplemenLaLlons, and D2G7J"P94.&199$754#., a
replacemenL for synchronlzed 54#. lmplemenLaLlons for cases where Lraversal ls Lhe domlnanL operaLlon. 1he new
D2"*)99&".F$G lnLerface adds supporL for common compound acLlons such as puLlfabsenL, replace, and condlLlonal
remove.
8eplaclng synchronlzed collecLlons wlLh concurrenL collecLlons can offer dramaLlc scalablllLy lmprovemenLs wlLh llLLle
rlsk.
!ava 3.0 also adds Lwo new collecLlon Lypes, O)&)& and K02*C4"6O)&)&. A O)&)& ls lnLended Lo hold a seL of elemenLs
Lemporarlly whlle Lhey awalL processlng. Several lmplemenLaLlons are provlded, lncludlng D2"*)99&".54"C&;O)&)&, a
LradlLlonal lllC queue, and @94294.7O)&)&, a (non concurrenL) prlorlLy ordered queue. O)&)& operaLlons do noL block,
lf Lhe queue ls empLy, Lhe reLrleval operaLlon reLurns ")00. Whlle you can slmulaLe Lhe behavlor of a O)&)& wlLh a
54#.ln facL, 54"C&;54#. also lmplemenLs O)&)& Lhe O)&)& classes were added because ellmlnaLlng Lhe randomaccess
requlremenLs of 54#. admlLs more efflclenL concurrenL lmplemenLaLlons.
K02*C4"6O)&)& exLends O)&)& Lo add blocklng lnserLlon and reLrleval operaLlons. lf Lhe queue ls empLy, a reLrleval
blocks unLll an elemenL ls avallable, and lf Lhe queue ls full (for bounded queues) an lnserLlon blocks unLll Lhere ls space
avallable. 8locklng queues are exLremely useful ln producerconsumer deslgns, and are covered ln greaLer deLall ln
SecLlon 3.3.
!usL as D2"*)99&".E$#8F$G ls a concurrenL replacemenL for a synchronlzed hashbased F$G, !ava 6 adds
D2"*)99&".'C4G54#.F$G and D2"*)99&".'C4G54#.'&., whlch are concurrenL replacemenLs for a synchronlzed
'29.&;F$G or '29.&;'&. (such as =9&&F$G or =9&&'&. wrapped wlLh #7"*892"4:&;F$G).
VADA@A $%&'())*&.7"3:K">
1he synchronlzed collecLlons classes hold a lock for Lhe duraLlon of each operaLlon. Some operaLlons, such as
E$#8F$G+6&. or 54#.+*2".$4"#, may lnvolve more work Lhan ls lnlLlally obvlous: Lraverslng a hash buckeL or llsL Lo flnd
a speclflc ob[ecL enLalls calllng &()$0# (whlch lLself may lnvolve a falr amounL of compuLaLlon) on a number of candldaLe
ob[ecLs. ln a hashbased collecLlon, lf 8$#8D2;& does noL spread ouL hash values well, elemenLs may be unevenly
dlsLrlbuLed among buckeLs, ln Lhe degeneraLe case, a poor hash funcLlon wlll Lurn a hash Lable lnLo a llnked llsL.
1raverslng a long llsL and calllng &()$0# on some or all of Lhe elemenLs can Lake a long Llme, and durlng LhaL Llme no
oLher Lhread can access Lhe collecLlon.

33
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 178ChapLer 3. 8ulldlng
8locks
D2"*)99&".E$#8F$G ls a hashbased F$G llke E$#8F$G, buL lL uses an enLlrely dlfferenL locklng sLraLegy LhaL offers beLLer
concurrency and scalablllLy. lnsLead of synchronlzlng every meLhod on a common lock, resLrlcLlng access Lo a slngle
Lhread aL a Llme, lL uses a flnergralned locklng mechanlsm called lock sLrlplng (see SecLlon 11.4.3) Lo allow a greaLer
degree of shared access. ArblLrarlly many readlng Lhreads can access Lhe map concurrenLly, readers can access Lhe map
concurrenLly wlLh wrlLers, and a llmlLed number of wrlLers can modlfy Lhe map concurrenLly. 1he resulL ls far hlgher
LhroughpuL under concurrenL access, wlLh llLLle performance penalLy for slngleLhreaded access.
D2"*)99&".E$#8F$G, along wlLh Lhe oLher concurrenL collecLlons, furLher lmprove on Lhe synchronlzed collecLlon
classes by provldlng lLeraLors LhaL do noL Lhrow D2"*)99&".F2;4%4*$.42"L-*&G.42", Lhus ellmlnaLlng Lhe need Lo lock
Lhe collecLlon durlng lLeraLlon. 1he lLeraLors reLurned by D2"*)99&".E$#8F$G are weakly conslsLenL lnsLead of fallfasL.
A weakly conslsLenL lLeraLor can LoleraLe concurrenL modlflcaLlon, Lraverses elemenLs as Lhey exlsLed when Lhe lLeraLor
was consLrucLed, and may (buL ls noL guaranLeed Lo) reflecL modlflcaLlons Lo Lhe collecLlon afLer Lhe consLrucLlon of Lhe
lLeraLor.
As wlLh all lmprovemenLs, Lhere are sLlll a few Lradeoffs. 1he semanLlcs of meLhods LhaL operaLe on Lhe enLlre F$G, such
as #4:& and 4#L3G.7, have been sllghLly weakened Lo reflecL Lhe concurrenL naLure of Lhe collecLlon. Slnce Lhe resulL of
#4:& could be ouL of daLe by Lhe Llme lL ls compuLed, lL ls really only an esLlmaLe, so #4:& ls allowed Lo reLurn an
approxlmaLlon lnsLead of an exacL counL. Whlle aL flrsL Lhls may seem dlsLurblng, ln reallLy meLhods llke #4:& and
4#L3G.7 are far less useful ln concurrenL envlronmenLs because Lhese quanLlLles are movlng LargeLs. So Lhe
requlremenLs for Lhese operaLlons were weakened Lo enable performance opLlmlzaLlons for Lhe mosL lmporLanL
operaLlons, prlmarlly 6&., G)., *2".$4"#l&7, and 9&32/&.
1he one feaLure offered by Lhe synchronlzed F$G lmplemenLaLlons buL noL by D2"*)99&".E$#8F$G ls Lhe ablllLy Lo lock
Lhe map for excluslve access. WlLh E$#8.$M0& and #7"*892"4:&;F$G, acqulrlng Lhe F$G lock prevenLs any oLher Lhread
from accesslng lL. 1hls mlghL be necessary ln unusual cases such as addlng several mapplngs aLomlcally, or lLeraLlng Lhe
F$G several Llmes and needlng Lo see Lhe same elemenLs ln Lhe same order. Cn Lhe whole, Lhough, Lhls ls a reasonable
Lradeoff: concurrenL collecLlons should be expecLed Lo change Lhelr conLenLs conLlnuously.
8ecause lL has so many advanLages and so few dlsadvanLages compared Lo E$#8.$M0& or #7"*892"4:&;F$G, replaclng
synchronlzed F$G lmplemenLaLlons wlLh D2"*)99&".E$#8F$G ln mosL cases resulLs only ln beLLer scalablllLy. Cnly lf your
appllcaLlon needs Lo lock Lhe map for excluslve access
[3]
ls D2"*)99&".E$#8F$G noL an approprlaLe dropln
replacemenL.
[3] Cr lf you are relylng on Lhe synchronlzaLlon slde effecLs of Lhe synchronlzed Map lmplemenLaLlons.
VADADA F00/./%&"? F.%5/' K"> E>*)"./%&3
Slnce a D2"*)99&".E$#8F$G cannoL be locked for excluslve access, we cannoL use cllenLslde locklng Lo creaLe new
aLomlc operaLlons such as puLlfabsenL, as we dld for B&*.29 ln SecLlon 4.4.1. lnsLead, a number of common compound
operaLlons such as puLlfabsenL, removelfequal, and replacelfequal are lmplemenLed as aLomlc operaLlons and
speclfled by Lhe D2"*)99&".F$G lnLerface, shown ln LlsLlng 3.7. lf you flnd yourself addlng such funcLlonallLy Lo an
exlsLlng synchronlzed F$G lmplemenLaLlon, lL ls probably a slgn LhaL you should conslder uslng a D2"*)99&".F$G lnsLead.
VADALA $%>+E&B)/.*F))"+2/3.
D2G7J"P94.&199$754#. ls a concurrenL replacemenL for a synchronlzed 54#. LhaL offers beLLer concurrency ln some
common slLuaLlons and ellmlnaLes Lhe need Lo lock or copy Lhe collecLlon durlng lLeraLlon. (Slmllarly,
D2G7J"P94.&199$7'&. ls a concurrenL replacemenL for a synchronlzed '&..)
1he copyonwrlLe collecLlons derlve Lhelr Lhread safeLy from Lhe facL LhaL as long as an effecLlvely lmmuLable ob[ecL ls
properly publlshed, no furLher synchronlzaLlon ls requlred when accesslng lL. 1hey lmplemenL muLablllLy by creaLlng and
republlshlng a new copy of Lhe collecLlon every Llme lL ls modlfled. lLeraLors for Lhe copyonwrlLe collecLlons reLaln a
reference Lo Lhe backlng array LhaL was currenL aL Lhe sLarL of lLeraLlon, and slnce Lhls wlll never change, Lhey need Lo
synchronlze only brlefly Lo ensure vlslblllLy of Lhe array conLenLs. As a resulL, mulLlple Lhreads can lLeraLe Lhe collecLlon
wlLhouL lnLerference from one anoLher or from Lhreads wanLlng Lo modlfy Lhe collecLlon. 1he lLeraLors reLurned by Lhe
copyonwrlLe collecLlons do noL Lhrow D2"*)99&".F2;4%4*$.42"L-*&G.42" and reLurn Lhe elemenLs exacLly as Lhey
were aL Lhe Llme Lhe lLeraLor was creaLed, regardless of subsequenL modlflcaLlons.

36 !ava Concurrency ln racLlce
2/3./&4 VAZA D2"*)99&".F$G ,&.*)6"'*A
G)M04* 4".&9%$*& D2"*)99&".F$GRleBT &-.&";# F$GRleBT W
XX I"#&9. 4".2 3$G 2"07 4% "2 /$0)& 4# 3$GG&; %923 l
B G).I%1M#&".Ul C&7e B /$0)&V[

XX N&32/& 2"07 4% l 4# 3$GG&; .2 B
M220&$" 9&32/&Ul C&7e B /$0)&V[

XX N&G0$*& /$0)& 2"07 4% l 4# 3$GG&; .2 20;B$0)&
M220&$" 9&G0$*&Ul C&7e B 20;B$0)&e B "&?B$0)&V[

XX N&G0$*& /$0)& 2"07 4% l 4# 3$GG&; .2 #23& /$0)&
B 9&G0$*&Ul C&7e B "&?B$0)&V[
\
Cbvlously, Lhere ls some cosL Lo copylng Lhe backlng array every Llme Lhe collecLlon ls modlfled, especlally lf Lhe
collecLlon ls large, Lhe copyonwrlLe collecLlons are reasonable Lo use only when lLeraLlon ls far more common Lhan
modlflcaLlon. 1hls crlLerlon exacLly descrlbes many evenLnoLlflcaLlon sysLems: dellverlng a noLlflcaLlon requlres lLeraLlng
Lhe llsL of reglsLered llsLeners and calllng each one of Lhem, and ln mosL cases reglsLerlng or unreglsLerlng an evenL
llsLener ls far less common Lhan recelvlng an evenL noLlflcaLlon. (See [C! 2.4.4] for more lnformaLlon on copyonwrlLe.)
VALA ;?%'</&4 a(*(*3 "&0 .:* -)%0('*)'%&3(5*) -"..*)&
8locklng queues provlde blocklng G). and .$C& meLhods as well as Lhe Llmed equlvalenLs 2%%&9 and G200. lf Lhe queue
ls full, G). blocks unLll space becomes avallable, lf Lhe queue ls empLy, .$C& blocks unLll an elemenL ls avallable. Cueues
can be bounded or unbounded, unbounded queues are never full, so a G). on an unbounded queue never blocks.
8locklng queues supporL Lhe producerconsumer deslgn paLLern. A producerconsumer deslgn separaLes Lhe
ldenLlflcaLlon of work Lo be done from Lhe execuLlon of LhaL work by placlng work lLems on a "Lo do" llsL for laLer
processlng, raLher Lhan processlng Lhem lmmedlaLely as Lhey are ldenLlfled. 1he producerconsumer paLLern slmpllfles
developmenL because lL removes code dependencles beLween producer and consumer classes, and slmpllfles workload
managemenL by decoupllng acLlvlLles LhaL may produce or consume daLa aL dlfferenL or varlable raLes.
ln a producerconsumer deslgn bullL around a blocklng queue, producers place daLa onLo Lhe queue as lL becomes
avallable, and consumers reLrleve daLa from Lhe queue when Lhey are ready Lo Lake Lhe approprlaLe acLlon. roducers
don'L need Lo know anyLhlng abouL Lhe ldenLlLy or number of consumers, or even wheLher Lhey are Lhe only producer
all Lhey have Lo do ls place daLa lLems on Lhe queue. Slmllarly, consumers need noL know who Lhe producers are or
where Lhe work came from. K02*C4"6O)&)& slmpllfles Lhe lmplemenLaLlon of producerconsumer deslgns wlLh any
number of producers and consumers. Cne of Lhe mosL common producerconsumer deslgns ls a Lhread pool coupled
wlLh a work queue, Lhls paLLern ls embodled ln Lhe L-&*).29 Lask execuLlon framework LhaL ls Lhe sub[ecL of ChapLers 6
and 8.
1he famlllar dlvlslon of labor for Lwo people washlng Lhe dlshes ls an example of a producerconsumer deslgn: one
person washes Lhe dlshes and places Lhem ln Lhe dlsh rack, and Lhe oLher person reLrleves Lhe dlshes from Lhe rack and
drles Lhem. ln Lhls scenarlo, Lhe dlsh rack acLs as a blocklng queue, lf Lhere are no dlshes ln Lhe rack, Lhe consumer walLs
unLll Lhere are dlshes Lo dry, and lf Lhe rack fllls up, Lhe producer has Lo sLop washlng unLll Lhere ls more space. 1hls
analogy exLends Lo mulLlple producers (Lhough Lhere may be conLenLlon for Lhe slnk) and mulLlple consumers, each
worker lnLeracLs only wlLh Lhe dlsh rack. no one needs Lo know how many producers or consumers Lhere are, or who
produced a glven lLem of work.
1he labels "producer" and "consumer" are relaLlve, an acLlvlLy LhaL acLs as a consumer ln one conLexL may acL as a
producer ln anoLher. urylng Lhe dlshes "consumes" clean weL dlshes and "produces" clean dry dlshes. A Lhlrd person
wanLlng Lo help mlghL puL away Lhe dry dlshes, ln whlch case Lhe drler ls boLh a consumer and a producer, and Lhere are
now Lwo shared work queues (each of whlch may block Lhe drler from proceedlng.)
8locklng queues slmpllfy Lhe codlng of consumers, slnce .$C& blocks unLll daLa ls avallable. lf Lhe producers don'L
generaLe work fasL enough Lo keep Lhe consumers busy, Lhe consumers [usL walL unLll more work ls avallable.
SomeLlmes Lhls ls perfecLly accepLable (as ln a server appllcaLlon when no cllenL ls requesLlng servlce), and someLlmes lL
lndlcaLes LhaL Lhe raLlo of producer Lhreads Lo consumer Lhreads should be ad[usLed Lo achleve beLLer uLlllzaLlon (as ln a
web crawler or oLher appllcaLlon ln whlch Lhere ls effecLlvely lnflnlLe work Lo do).
lf Lhe producers conslsLenLly generaLe work fasLer Lhan Lhe consumers can process lL, evenLually Lhe appllcaLlon wlll run
ouL of memory because work lLems wlll queue up wlLhouL bound. Agaln, Lhe blocklng naLure of G). greaLly slmpllfles
codlng of producers, lf we use a bounded queue, Lhen when Lhe queue fllls up Lhe producers block, glvlng Lhe
consumers Llme Lo caLch up because a blocked producer cannoL generaLe more work.

37
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 178ChapLer 3. 8ulldlng
8locks
8locklng queues also provlde an 2%%&9 meLhod, whlch reLurns a fallure sLaLus lf Lhe lLem cannoL be enqueued. 1hls
enables you Lo creaLe more flexlble pollcles for deallng wlLh overload, such as sheddlng load, serlallzlng excess work
lLems and wrlLlng Lhem Lo dlsk, reduclng Lhe number of producer Lhreads, or LhroLLllng producers ln some oLher
manner.
8ounded queues are a powerful resource managemenL Lool for bulldlng rellable appllcaLlons: Lhey make your program
more robusL Lo overload by LhroLLllng acLlvlLles LhaL LhreaLen Lo produce more work Lhan can be handled.
Whlle Lhe producerconsumer paLLern enables producer and consumer code Lo be decoupled from each oLher, Lhelr
behavlor ls sLlll coupled lndlrecLly Lhrough Lhe shared work queue. lL ls LempLlng Lo assume LhaL Lhe consumers wlll
always keep up, so LhaL you need noL place any bounds on Lhe slze of work queues, buL Lhls ls a prescrlpLlon for
rearchlLecLlng your sysLem laLer. 8ulld resource managemenL lnLo your deslgn early uslng blocklng queues lL ls a loL
easler Lo do Lhls up fronL Lhan Lo reLroflL lL laLer. 8locklng queues make Lhls easy for a number of slLuaLlons, buL lf
blocklng queues don'L flL easlly lnLo your deslgn, you can creaLe oLher blocklng daLa sLrucLures uslng '&3$G829& (see
SecLlon 3.3.3).
1he class llbrary conLalns several lmplemenLaLlons of K02*C4"6O)&)&. 54"C&;K02*C4"6O)&)& and 199$7K02*C4"6O)&)&
are lllC queues, analogous Lo 54"C&;54#. and 199$754#. buL wlLh beLLer concurrenL performance Lhan a synchronlzed
54#.. @94294.7K02*C4"6O)&)& ls a prlorlLyordered queue, whlch ls useful when you wanL Lo process elemenLs ln an
order oLher Lhan lllC. !usL llke oLher sorLed collecLlons, @94294.7K02*C4"6O)&)& can compare elemenLs accordlng Lo
Lhelr naLural order (lf Lhey lmplemenL D23G$9$M0&) or uslng a D23G$9$.29.
1he lasL K02*C4"6O)&)& lmplemenLaLlon, '7"*892"2)#O)&)&, ls noL really a queue aL all, ln LhaL lL malnLalns no sLorage
space for queued elemenLs. lnsLead, lL malnLalns a llsL of queued Lhreads walLlng Lo enqueue or dequeue an elemenL. ln
Lhe dlshwashlng analogy, Lhls would be llke havlng no dlsh rack, buL lnsLead handlng Lhe washed dlshes dlrecLly Lo Lhe
nexL avallable dryer. Whlle Lhls may seem a sLrange way Lo lmplemenL a queue, lL reduces Lhe laLency assoclaLed wlLh
movlng daLa from producer Lo consumer because Lhe work can be handed off dlrecLly. (ln a LradlLlonal queue, Lhe
enqueue and dequeue operaLlons musL compleLe sequenLlally before a unlL of work can be handed off.) 1he dlrecL
handoff also feeds back more lnformaLlon abouL Lhe sLaLe of Lhe Lask Lo Lhe producer, when Lhe handoff ls accepLed, lL
knows a consumer has Laken responslblllLy for lL, raLher Lhan slmply leLLlng lL slL on a queue somewhere much llke Lhe
dlfference beLween handlng a documenL Lo a colleague and merely puLLlng lL ln her mallbox and hoplng she geLs lL soon.
Slnce a '7"*892"2)#O)&)& has no sLorage capaclLy, G). and .$C& wlll block unless anoLher Lhread ls already walLlng Lo
parLlclpaLe ln Lhe handoff. Synchronous queues are generally sulLable only when Lhere are enough consumers LhaL Lhere
nearly always wlll be one ready Lo Lake Lhe handoff.
VALA@A =1"5>?*T [*3<.%> C*")':
Cne Lype of program LhaL ls amenable Lo decomposlLlon lnLo producers and consumers ls an agenL LhaL scans local
drlves for documenLs and lndexes Lhem for laLer searchlng, slmllar Lo Coogle ueskLop or Lhe Wlndows lndexlng servlce.
A4#CD9$?0&9 ln LlsLlng 3.8 shows a producer Lask LhaL searches a flle hlerarchy for flles meeLlng an lndexlng crlLerlon
and puLs Lhelr names on Lhe work queue, I";&-&9 ln LlsLlng 3.8 shows Lhe consumer Lask LhaL Lakes flle names from Lhe
queue and lndexes Lhem.
1he producerconsumer paLLern offers a Lhreadfrlendly means of decomposlng Lhe deskLop search problem lnLo
slmpler componenLs. lacLorlng fllecrawllng and lndexlng lnLo separaLe acLlvlLles resulLs ln code LhaL ls more readable
and reusable Lhan wlLh a monollLhlc acLlvlLy LhaL does boLh, each of Lhe acLlvlLles has only a slngle Lask Lo do, and Lhe
blocklng queue handles all Lhe flow conLrol, so Lhe code for each ls slmpler and clearer.
1he producerconsumer paLLern also enables several performance beneflLs. roducers and consumers can execuLe
concurrenLly, lf one ls l/Cbound and Lhe oLher ls Cubound, execuLlng Lhem concurrenLly ylelds beLLer overall
LhroughpuL Lhan execuLlng Lhem sequenLlally. lf Lhe producer and consumer acLlvlLles are parallellzable Lo dlfferenL
degrees, LlghLly coupllng Lhem reduces parallellzablllLy Lo LhaL of Lhe less parallellzable acLlvlLy.
LlsLlng 3.9 sLarLs several crawlers and lndexers, each ln Lhelr own Lhread. As wrlLLen, Lhe consumer Lhreads never exlL,
whlch prevenLs Lhe program from LermlnaLlng, we examlne several Lechnlques for addresslng Lhls problem ln ChapLer 7.
Whlle Lhls example uses expllclLly managed Lhreads, many producerconsumer deslgns can be expressed uslng Lhe
L-&*).29 Lask execuLlon framework, whlch lLself uses Lhe producerconsumer paLLern.
VALADA C*)/"? J:)*"0 $%&6/&*5*&.
1he blocklng queue lmplemenLaLlons ln `$/$+).40+*2"*)99&". all conLaln sufflclenL lnLernal synchronlzaLlon Lo safely
publlsh ob[ecLs from a producer Lhread Lo Lhe consumer Lhread.

38 !ava Concurrency ln racLlce
lor muLable ob[ecLs, producerconsumer deslgns and blocklng queues faclllLaLe serlal Lhread conflnemenL for handlng
off ownershlp of ob[ecLs from producers Lo consumers. A Lhreadconflned ob[ecL ls owned excluslvely by a slngle Lhread,
buL LhaL ownershlp can be "Lransferred" by publlshlng lL safely where only one oLher Lhread wlll galn access Lo lL and
ensurlng LhaL Lhe publlshlng Lhread does noL access lL afLer Lhe handoff. 1he safe publlcaLlon ensures LhaL Lhe ob[ecL's
sLaLe ls vlslble Lo Lhe new owner, and slnce Lhe orlglnal owner wlll noL Louch lL agaln, lL ls now conflned Lo Lhe new
Lhread. 1he new owner may modlfy lL freely slnce lL has excluslve access.
Cb[ecL pools explolL serlal Lhread conflnemenL, "lendlng" an ob[ecL Lo a requesLlng Lhread. As long as Lhe pool conLalns
sufflclenL lnLernal synchronlzaLlon Lo publlsh Lhe pooled ob[ecL safely, and as long as Lhe cllenLs do noL Lhemselves
publlsh Lhe pooled ob[ecL or use lL afLer reLurnlng lL Lo Lhe pool, ownershlp can be Lransferred safely from Lhread Lo
Lhread.
Cne could also use oLher publlcaLlon mechanlsms for Lransferrlng ownershlp of a muLable ob[ecL, buL lL ls necessary Lo
ensure LhaL only one Lhread recelves Lhe ob[ecL belng handed off. 8locklng queues make Lhls easy, wlLh a llLLle more
work, lL could also done wlLh Lhe aLomlc 9&32/& meLhod of D2"*)99&".F$G or Lhe *23G$9&1";'&. meLhod of
1.234*N&%&9&"*&.
2/3./&4 VA\A -)%0('*) "&0 $%&3(5*) J"3<3 /& " [*3<.%> C*")': F>>?/'"./%&A
G)M04* *0$## <40&D9$?0&9 43G0&3&".# N)""$M0& W
G94/$.& %4"$0 K02*C4"6O)&)&R<40&T %40&O)&)&[
G94/$.& %4"$0 <40&<40.&9 %40&<40.&9[
G94/$.& %4"$0 <40& 922.[
+++
G)M04* /24; 9)"UV W
.97 W
*9$?0U922.V[
\ *$.*8 UI".&99)G.&;L-*&G.42" &V W
=89&$;+*)99&".=89&$;UV+4".&99)G.UV[
\
\

G94/$.& /24; *9$?0U<40& 922.V .892?# I".&99)G.&;L-*&G.42" W
<40&fg &".94&# ] 922.+04#.<40&#U%40&<40.&9V[
4% U&".94&# Y] ")00V W
%29 U<40& &".97 h &".94&#V
4% U&".97+4#A49&*.297UVV
*9$?0U&".97V[
&0#& 4% UY$09&$;7I";&-&;U&".97VV
%40&O)&)&+G).U&".97V[
\
\
\

G)M04* *0$## I";&-&9 43G0&3&".# N)""$M0& W
G94/$.& %4"$0 K02*C4"6O)&)&R<40&T ()&)&[

G)M04* I";&-&9UK02*C4"6O)&)&R<40&T ()&)&V W
.84#+()&)& ] ()&)&[
\

G)M04* /24; 9)"UV W
.97 W
?840& U.9)&V
4";&-<40&U()&)&+.$C&UVV[
\ *$.*8 UI".&99)G.&;L-*&G.42" &V W
=89&$;+*)99&".=89&$;UV+4".&99)G.UV[
\
\
\
2/3./&4 VA^A C.")./&4 .:* [*3<.%> C*")':A
G)M04* #.$.4* /24; #.$9.I";&-4"6U<40&fg 922.#V W
K02*C4"6O)&)&R<40&T ()&)& ] "&? 54"C&;K02*C4"6O)&)&R<40&TUKJ!,AV[
<40&<40.&9 %40.&9 ] "&? <40&<40.&9UV W
G)M04* M220&$" $**&G.U<40& %40&V W 9&.)9" .9)&[ \
\[

%29 U<40& 922. h 922.#V
"&? =89&$;U"&? <40&D9$?0&9U()&)&e %40.&9e 922.VV+#.$9.UV[

%29 U4". 4 ] Z[ 4 R ,kDJ,'!FLN'[ 4__V
"&? =89&$;U"&? I";&-&9U()&)&VV+#.$9.UV[
\


39
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 178ChapLer 3. 8ulldlng
8locks
VALALA [*Q(*3 "&0 B%)< C.*"?/&4
!ava 6 also adds anoLher Lwo collecLlon Lypes, A&()& (pronounced "deck") and K02*C4"6A&()&, LhaL exLend O)&)& and
K02*C4"6O)&)&. A A&()& ls a doubleended queue LhaL allows efflclenL lnserLlon and removal from boLh Lhe head and
Lhe Lall. lmplemenLaLlons lnclude 199$7A&()& and 54"C&;K02*C4"6A&()&.
!usL as blocklng queues lend Lhemselves Lo Lhe producerconsumer paLLern, deques lend Lhemselves Lo a relaLed
paLLern called work sLeallng. A producerconsumer deslgn has one shared work queue for all consumers, ln a work
sLeallng deslgn, every consumer has lLs own deque. lf a consumer exhausLs Lhe work ln lLs own deque, lL can sLeal work
from Lhe Lall of someone else's deque. Work sLeallng can be more scalable Lhan a LradlLlonal producerconsumer deslgn
because workers don'L conLend for a shared work queue, mosL of Lhe Llme Lhey access only Lhelr own deque, reduclng
conLenLlon. When a worker has Lo access anoLher's queue, lL does so from Lhe Lall raLher Lhan Lhe head, furLher
reduclng conLenLlon.
Work sLeallng ls well sulLed Lo problems ln whlch consumers are also producers when performlng a unlL of work ls
llkely Lo resulL ln Lhe ldenLlflcaLlon of more work. lor example, processlng a page ln a web crawler usually resulLs ln Lhe
ldenLlflcaLlon of new pages Lo be crawled. Slmllarly, many graphexplorlng algorlLhms, such as marklng Lhe heap durlng
garbage collecLlon, can be efflclenLly parallellzed uslng work sLeallng. When a worker ldenLlfles a new unlL of work, lL
places lL aL Lhe end of lLs own deque (or alLernaLlvely, ln a work sharlng deslgn, on LhaL of anoLher worker), when lLs
deque ls empLy, lL looks for work aL Lhe end of someone else's deque, ensurlng LhaL each worker sLays busy.
VAMA ;?%'</&4 "&0 ,&.*))(>./Y?* K*.:%03
1hreads may block, or pause, for several reasons: walLlng for l/C compleLlon, walLlng Lo acqulre a lock, walLlng Lo wake
up from =89&$;+#0&&G, or walLlng for Lhe resulL of a compuLaLlon ln anoLher Lhread. When a Lhread blocks, lL ls usually
suspended and placed ln one of Lhe blocked Lhread sLaLes (K5JDlLA, P1I=I,H, or =IFLAkP1I=I,H). 1he dlsLlncLlon
beLween a blocklng operaLlon and an ordlnary operaLlon LhaL merely Lakes a long Llme Lo flnlsh ls LhaL a blocked Lhread
musL walL for an evenL LhaL ls beyond lLs conLrol before lL can proceed Lhe l/C compleLes, Lhe lock becomes avallable,
or Lhe exLernal compuLaLlon flnlshes. When LhaL exLernal evenL occurs, Lhe Lhread ls placed back ln Lhe N!,,1K5L sLaLe
and becomes ellglble agaln for schedullng.
1he G). and .$C& meLhods of K02*C4"6O)&)& Lhrow Lhe checked I".&99)G.&;L-*&G.42", as do a number of oLher
llbrary meLhods such as =89&$;+#0&&G. When a meLhod can Lhrow I".&99)G.&;L-*&G.42", lL ls Lelllng you LhaL lL ls a
blocklng meLhod, and furLher LhaL lf lL ls lnLerrupLed, lL wlll make an efforL Lo sLop blocklng early.
=89&$; provldes Lhe 4".&99)G. meLhod for lnLerrupLlng a Lhread and for querylng wheLher a Lhread has been
lnLerrupLed. Lach Lhread has a boolean properLy LhaL represenLs lLs lnLerrupLed sLaLus, lnLerrupLlng a Lhread seLs Lhls
sLaLus.
lnLerrupLlon ls a cooperaLlve mechanlsm. Cne Lhread cannoL force anoLher Lo sLop whaL lL ls dolng and do someLhlng
else, when Lhread A lnLerrupLs Lhread 8, A ls merely requesLlng LhaL 8 sLop whaL lL ls dolng when lL geLs Lo a convenlenL
sLopplng polnL lf lL feels llke lL. Whlle Lhere ls noLhlng ln Lhe Al or language speclflcaLlon LhaL demands any speclflc
appllcaLlonlevel semanLlcs for lnLerrupLlon, Lhe mosL senslble use for lnLerrupLlon ls Lo cancel an acLlvlLy. 8locklng
meLhods LhaL are responslve Lo lnLerrupLlon make lL easler Lo cancel longrunnlng acLlvlLles on a Llmely basls.
When your code calls a meLhod LhaL Lhrows I".&99)G.&;L-*&G.42", Lhen your meLhod ls a blocklng meLhod Loo, and
musL have a plan for respondlng Lo lnLerrupLlon. lor llbrary code, Lhere are baslcally Lwo cholces:
ropagaLe Lhe I".&99)G.&;L-*&G.42". 1hls ls ofLen Lhe mosL senslble pollcy lf you can geL away wlLh lL [usL propagaLe
Lhe I".&99)G.&;L-*&G.42" Lo your caller. 1hls could lnvolve noL caLchlng I".&99)G.&;L-*&G.42", or caLchlng lL and
Lhrowlng lL agaln afLer performlng some brlef acLlvlLyspeclflc cleanup.
8esLore Lhe lnLerrupL. SomeLlmes you cannoL Lhrow I".&99)G.&;L-*&G.42", for lnsLance when your code ls parL of a
N)""$M0&. ln Lhese slLuaLlons, you musL caLch I".&99)G.&;L-*&G.42" and resLore Lhe lnLerrupLed sLaLus by calllng
4".&99)G. on Lhe currenL Lhread, so LhaL code hlgher up Lhe call sLack can see LhaL an lnLerrupL was lssued, as
demonsLraLed ln LlsLlng 3.10.
?ou can geL much more sophlsLlcaLed wlLh lnLerrupLlon, buL Lhese Lwo approaches should work ln Lhe vasL ma[orlLy of
slLuaLlons. 8uL Lhere ls one Lhlng you should noL do wlLh I".&99)G.&;L-*&G.42"caLch lL and do noLhlng ln response.
1hls deprlves code hlgher up on Lhe call sLack of Lhe opporLunlLy Lo acL on Lhe lnLerrupLlon, because Lhe evldence LhaL
Lhe Lhread was lnLerrupLed ls losL. 1he only slLuaLlon ln whlch lL ls accepLable Lo swallow an lnLerrupL ls when you are
exLendlng 1hread and Lherefore conLrol all Lhe code hlgher up on Lhe call sLack. CancellaLlon and lnLerrupLlon are
covered ln greaLer deLall ln ChapLer 7.

60 !ava Concurrency ln racLlce
2/3./&4 VA@_A N*3.%)/&4 .:* ,&.*))(>.*0 C.".(3 3% "3 P%. .% C8"??%8 .:* ,&.*))(>.A
G)M04* *0$## =$#CN)""$M0& 43G0&3&".# N)""$M0& W
K02*C4"6O)&)&R=$#CT ()&)&[
+++
G)M04* /24; 9)"UV W
.97 W
G92*&##=$#CU()&)&+.$C&UVV[
\ *$.*8 UI".&99)G.&;L-*&G.42" &V W
XX 9&#.29& 4".&99)G.&; #.$.)#
=89&$;+*)99&".=89&$;UV+4".&99)G.UV[
\
\
\
VAVA C+&':)%&/O*)3
8locklng queues are unlque among Lhe collecLlons classes: noL only do Lhey acL as conLalners for ob[ecLs, buL Lhey can
also coordlnaLe Lhe conLrol flow of producer and consumer Lhreads because .$C& and G). block unLll Lhe queue enLers
Lhe deslred sLaLe (noL empLy or noL full).
A synchronlzer ls any ob[ecL LhaL coordlnaLes Lhe conLrol flow of Lhreads based on lLs sLaLe. 8locklng queues can acL as
synchronlzers, oLher Lypes of synchronlzers lnclude semaphores, barrlers, and laLches. 1here are a number of
synchronlzer classes ln Lhe plaLform llbrary, lf Lhese do noL meeL your needs, you can also creaLe your own uslng Lhe
mechanlsms descrlbed ln ChapLer 14.
All synchronlzers share cerLaln sLrucLural properLles: Lhey encapsulaLe sLaLe LhaL deLermlnes wheLher Lhreads arrlvlng aL
Lhe synchronlzer should be allowed Lo pass or forced Lo walL, provlde meLhods Lo manlpulaLe LhaL sLaLe, and provlde
meLhods Lo walL efflclenLly for Lhe synchronlzer Lo enLer Lhe deslred sLaLe.
VAVA@A 2".':*3
A laLch ls a synchronlzer LhaL can delay Lhe progress of Lhreads unLll lL reaches lLs Lermlnal sLaLe [C! 3.4.2]. A laLch acLs
as a gaLe: unLll Lhe laLch reaches Lhe Lermlnal sLaLe Lhe gaLe ls closed and no Lhread can pass, and ln Lhe Lermlnal sLaLe
Lhe gaLe opens, allowlng all Lhreads Lo pass. Cnce Lhe laLch reaches Lhe Lermlnal sLaLe, lL cannoL change sLaLe agaln, so lL
remalns open forever. LaLches can be used Lo ensure LhaL cerLaln acLlvlLles do noL proceed unLll oLher oneLlme
acLlvlLles compleLe, such as:
Lnsurlng LhaL a compuLaLlon does noL proceed unLll resources lL needs have been lnlLlallzed. A slmple blnary
(LwosLaLe) laLch could be used Lo lndlcaLe "8esource 8 has been lnlLlallzed", and any acLlvlLy LhaL requlres 8
would walL flrsL on Lhls laLch.
Lnsurlng LhaL a servlce does noL sLarL unLll oLher servlces on whlch lL depends have sLarLed. Lach servlce would
have an assoclaLed blnary laLch, sLarLlng servlce S would lnvolve flrsL walLlng on Lhe laLches for oLher servlces on
whlch S depends, and Lhen releaslng Lhe S laLch afLer sLarLup compleLes so any servlces LhaL depend on S can
Lhen proceed.
WalLlng unLll all Lhe parLles lnvolved ln an acLlvlLy, for lnsLance Lhe players ln a mulLlplayer game, are ready Lo
proceed. ln Lhls case, Lhe laLch reaches Lhe Lermlnal sLaLe afLer all Lhe players are ready.
D2)".A2?"5$.*8 ls a flexlble laLch lmplemenLaLlon LhaL can be used ln any of Lhese slLuaLlons, lL allows one or more
Lhreads Lo walL for a seL of evenLs Lo occur. 1he laLch sLaLe conslsLs of a counLer lnlLlallzed Lo a poslLlve number,
represenLlng Lhe number of evenLs Lo walL for. 1he *2)".A2?" meLhod decremenLs Lhe counLer, lndlcaLlng LhaL an evenL
has occurred, and Lhe $?$4. meLhods walL for Lhe counLer Lo reach zero, whlch happens when all Lhe evenLs have
occurred. lf Lhe counLer ls nonzero on enLry, $?$4. blocks unLll Lhe counLer reaches zero, Lhe walLlng Lhread ls
lnLerrupLed, or Lhe walL Llmes ouL.
=&#.E$9"&## ln LlsLlng 3.11 lllusLraLes Lwo common uses for laLches. =&#.E$9"&## creaLes a number of Lhreads LhaL run
a glven Lask concurrenLly. lL uses Lwo laLches, a "sLarLlng gaLe" and an "endlng gaLe". 1he sLarLlng gaLe ls lnlLlallzed wlLh
a counL of one, Lhe endlng gaLe ls lnlLlallzed wlLh a counL equal Lo Lhe number of worker Lhreads. 1he flrsL Lhlng each
worker Lhread does ls walL on Lhe sLarLlng gaLe, Lhls ensures LhaL none of Lhem sLarLs worklng unLll Lhey all are ready Lo
sLarL. 1he lasL Lhlng each does ls counL down on Lhe endlng gaLe, Lhls allows Lhe masLer Lhread Lo walL efflclenLly unLll
Lhe lasL of Lhe worker Lhreads has flnlshed, so lL can calculaLe Lhe elapsed Llme.
Why dld we boLher wlLh Lhe laLches ln =&#.E$9"&## lnsLead of [usL sLarLlng Lhe Lhreads lmmedlaLely afLer Lhey are
creaLed? resumably, we wanLed Lo measure how long lL Lakes Lo run a Lask n Llmes concurrenLly. lf we slmply creaLed
and sLarLed Lhe Lhreads, Lhe Lhreads sLarLed earller would have a "head sLarL" on Lhe laLer Lhreads, and Lhe degree of
conLenLlon would vary over Llme as Lhe number of acLlve Lhreads lncreased or decreased. uslng a sLarLlng gaLe allows

61
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 178ChapLer 3. 8ulldlng
8locks
Lhe masLer Lhread Lo release all Lhe worker Lhreads aL once, and Lhe endlng gaLe allows Lhe masLer Lhread Lo walL for
Lhe lasL Lhread Lo flnlsh raLher Lhan walLlng sequenLlally for each Lhread Lo flnlsh.
VAVADA S(.()*J"3<
<).)9&=$#C also acLs llke a laLch. (<).)9&=$#C lmplemenLs <).)9&, whlch descrlbes an absLracL resulLbearlng
compuLaLlon [C! 4.3.3].) A compuLaLlon represenLed by a <).)9&=$#C ls lmplemenLed wlLh a D$00$M0&, Lhe resulL
bearlng equlvalenL of N)""$M0&, and can be ln one of Lhree sLaLes: walLlng Lo run, runnlng, or compleLed. CompleLlon
subsumes all Lhe ways a compuLaLlon can compleLe, lncludlng normal compleLlon, cancellaLlon, and excepLlon. Cnce a
<).)9&=$#C enLers Lhe compleLed sLaLe, lL sLays ln LhaL sLaLe forever.
1he behavlor of <).)9&+6&. depends on Lhe sLaLe of Lhe Lask. lf lL ls compleLed, 6&. reLurns Lhe resulL lmmedlaLely, and
oLherwlse blocks unLll Lhe Lask LranslLlons Lo Lhe compleLed sLaLe and Lhen reLurns Lhe resulL or Lhrows an excepLlon.
<).)9&=$#C conveys Lhe resulL from Lhe Lhread execuLlng Lhe compuLaLlon Lo Lhe Lhread(s) reLrlevlng Lhe resulL, Lhe
speclflcaLlon of <).)9&=$#C guaranLees LhaL Lhls Lransfer consLlLuLes a safe publlcaLlon of Lhe resulL.
2/3./&4 VA@@A 93/&4 D2)".A2?"5$.*8 6%) C.")./&4 "&0 C.%>>/&4 J:)*"03 /& J/5/&4 J*3.3A
G)M04* *0$## =&#.E$9"&## W
G)M04* 02"6 .43&=$#C#U4". "=89&$;#e %4"$0 N)""$M0& .$#CV
.892?# I".&99)G.&;L-*&G.42" W
%4"$0 D2)".A2?"5$.*8 #.$9.H$.& ] "&? D2)".A2?"5$.*8U^V[
%4"$0 D2)".A2?"5$.*8 &";H$.& ] "&? D2)".A2?"5$.*8U"=89&$;#V[

%29 U4". 4 ] Z[ 4 R "=89&$;#[ 4__V W
=89&$; . ] "&? =89&$;UV W
G)M04* /24; 9)"UV W
.97 W
#.$9.H$.&+$?$4.UV[
.97 W
.$#C+9)"UV[
\ %4"$007 W
&";H$.&+*2)".A2?"UV[
\
\ *$.*8 UI".&99)G.&;L-*&G.42" 46"29&;V W \
\
\[
.+#.$9.UV[
\

02"6 #.$9. ] '7#.&3+"$"2=43&UV[
#.$9.H$.&+*2)".A2?"UV[
&";H$.&+$?$4.UV[
02"6 &"; ] '7#.&3+"$"2=43&UV[
9&.)9" &";a#.$9.[
\
\
<).)9&=$#C ls used by Lhe L-&*).29 framework Lo represenL asynchronous Lasks, and can also be used Lo represenL any
poLenLlally lengLhy compuLaLlon LhaL can be sLarLed before Lhe resulLs are needed. @9&02$;&9 ln LlsLlng 3.12 uses
<).)9&=$#C Lo perform an expenslve compuLaLlon whose resulLs are needed laLer, by sLarLlng Lhe compuLaLlon early,
you reduce Lhe Llme you would have Lo walL laLer when you acLually need Lhe resulLs.

62 !ava Concurrency ln racLlce
2/3./&4 VA@DA 93/&4 <).)9&=$#C .% -)*?%"0 ["." .:". /3 P**0*0 2".*)A
G)M04* *0$## @9&02$;&9 W
G94/$.& %4"$0 <).)9&=$#CR@92;)*.I"%2T %).)9& ]
"&? <).)9&=$#CR@92;)*.I"%2TU"&? D$00$M0&R@92;)*.I"%2TUV W
G)M04* @92;)*.I"%2 *$00UV .892?# A$.$52$;L-*&G.42" W
9&.)9" 02$;@92;)*.I"%2UV[
\
\V[
G94/$.& %4"$0 =89&$; .89&$; ] "&? =89&$;U%).)9&V[

G)M04* /24; #.$9.UV W .89&$;+#.$9.UV[ \

G)M04* @92;)*.I"%2 6&.UV
.892?# A$.$52$;L-*&G.42"e I".&99)G.&;L-*&G.42" W
.97 W
9&.)9" %).)9&+6&.UV[
\ *$.*8 UL-&*).42"L-*&G.42" &V W
=892?$M0& *$)#& ] &+6&.D$)#&UV[
4% U*$)#& 4"#.$"*&2% A$.$52$;L-*&G.42"V
.892? UA$.$52$;L-*&G.42"V *$)#&[
&0#&
.892? 0$)";&9=892?$M0&U*$)#&V[
\
\
\
@9&02$;&9 creaLes a <).)9&=$#C LhaL descrlbes Lhe Lask of loadlng producL lnformaLlon from a daLabase and a Lhread ln
whlch Lhe compuLaLlon wlll be performed. lL provldes a #.$9. meLhod Lo sLarL Lhe Lhread, slnce lL ls lnadvlsable Lo sLarL
a Lhread from a consLrucLor or sLaLlc lnlLlallzer. When Lhe program laLer needs Lhe @92;)*.I"%2, lL can call 6&., whlch
reLurns Lhe loaded daLa lf lL ls ready, or walLs for Lhe load Lo compleLe lf noL.
1asks descrlbed by D$00$M0& can Lhrow checked and unchecked excepLlons, and any code can Lhrow an L9929.
WhaLever Lhe Lask code may Lhrow, lL ls wrapped ln an L-&*).42"L-*&G.42" and reLhrown from <).)9&+6&.. 1hls
compllcaLes code LhaL calls 6&., noL only because lL musL deal wlLh Lhe posslblllLy of L-&*).42"L-*&G.42" (and Lhe
unchecked D$"*&00$.42"L-*&G.42"), buL also because Lhe cause of Lhe L-&*).42"L-*&G.42" ls reLurned as a
=EN2?$M0&, whlch ls lnconvenlenL Lo deal wlLh.
When 6&. Lhrows an L-&*).42"L-*&G.42" ln @9&02$;&9, Lhe cause wlll fall lnLo one of Lhree caLegorles: a checked
excepLlon Lhrown by Lhe D$00$M0&, a N)".43&L-*&G.42", or an L9929. We musL handle each of Lhese cases separaLely,
buL we wlll use Lhe 0$)";&9=892?$M0& uLlllLy meLhod ln LlsLlng 3.13 Lo encapsulaLe some of Lhe messler excepLlon
handllng loglc. 8efore calllng 0$)";&9=892?$M0&, @9&02$;&9 LesLs for Lhe known checked excepLlons and reLhrows
Lhem. 1haL leaves only unchecked excepLlons, whlch @9&02$;&9 handles by calllng 0$)";&9=892?$M0& and Lhrowlng Lhe
resulL. lf Lhe =892?$M0& passed Lo 0$)";&9=892?$M0& ls an L9929, 0$)";&9=892?$M0& reLhrows lL dlrecLly, lf lL ls noL a
N)".43&L-*&G.42", lL Lhrows an I00&6$0'.$.&L-*&G.42" Lo lndlcaLe a loglc error. 1haL leaves only N)".43&L-*&G.42",
whlch 0$)";&9=892?$M0& reLurns Lo lLs caller, and whlch Lhe caller generally reLhrows.
2/3./&4 VA@LA $%*)'/&4 "& 9&':*'<*0 =892?$M0& .% " N)".43&L-*&G.42"A
Xcc I% .8& =892?$M0& 4# $" L9929e .892? 4.[ 4% 4. 4# $
c N)".43&L-*&G.42" 9&.)9" 4.e 2.8&9?4#& .892? I00&6$0'.$.&L-*&G.42"
cX
G)M04* #.$.4* N)".43&L-*&G.42" 0$)";&9=892?$M0&U=892?$M0& .V W
4% U. 4"#.$"*&2% N)".43&L-*&G.42"V
9&.)9" UN)".43&L-*&G.42"V .[
&0#& 4% U. 4"#.$"*&2% L9929V
.892? UL9929V .[
&0#&
.892? "&? I00&6$0'.$.&L-*&G.42"Ud,2. )"*8&*C&;de .V[
\
VAVALA C*5">:%)*3
CounLlng semaphores are used Lo conLrol Lhe number of acLlvlLles LhaL can access a cerLaln resource or perform a glven
acLlon aL Lhe same Llme [C! 3.4.1]. CounLlng semaphores can be used Lo lmplemenL resource pools or Lo lmpose a
bound on a collecLlon.
A '&3$G829& manages a seL of vlrLual permlLs, Lhe lnlLlal number of permlLs ls passed Lo Lhe '&3$G829& consLrucLor.
AcLlvlLles can acqulre permlLs (as long as some remaln) and release permlLs when Lhey are done wlLh Lhem. lf no permlL
ls avallable, $*()49& blocks unLll one ls (or unLll lnLerrupLed or Lhe operaLlon Llmes ouL). 1he 9&0&$#& meLhod reLurns a
permlL Lo Lhe semaphore.
[4]
A degeneraLe case of a counLlng semaphore ls a blnary semaphore, a '&3$G829& wlLh an
lnlLlal counL of one. A blnary semaphore can be used as a muLex wlLh nonreenLranL locklng semanLlcs, whoever holds
Lhe sole permlL holds Lhe muLex.

63
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 178ChapLer 3. 8ulldlng
8locks
[4] 1he lmplemenLaLlon has no acLual permlL ob[ecLs, and Semaphore does noL assoclaLe dlspensed permlLs wlLh Lhreads, so a permlL acqulred ln
one Lhread can be released from anoLher Lhread. ?ou can Lhlnk of acqulre as consumlng a permlL and release as creaLlng one, a Semaphore ls noL
llmlLed Lo Lhe number of permlLs lL was creaLed wlLh.
Semaphores are useful for lmplemenLlng resource pools such as daLabase connecLlon pools. Whlle lL ls easy Lo consLrucL
a flxedslzed pool LhaL falls lf you requesL a resource from an empLy pool, whaL you really wanL ls Lo block lf Lhe pool ls
empLy and unblock when lL becomes nonempLy agaln. lf you lnlLlallze a '&3$G829& Lo Lhe pool slze, $*()49& a permlL
before Lrylng Lo feLch a resource from Lhe pool, and 9&0&$#& Lhe permlL afLer puLLlng a resource back ln Lhe pool,
$*()49& blocks unLll Lhe pool becomes nonempLy. 1hls Lechnlque ls used ln Lhe bounded buffer class ln ChapLer 12. (An
easler way Lo consLrucL a blocklng ob[ecL pool would be Lo use a K02*C4"6O)&)& Lo hold Lhe pooled resources.)
Slmllarly, you can use a '&3$G829& Lo Lurn any collecLlon lnLo a blocklng bounded collecLlon, as lllusLraLed by
K2)";&;E$#8'&. ln LlsLlng 3.14. 1he semaphore ls lnlLlallzed Lo Lhe deslred maxlmum slze of Lhe collecLlon. 1he $;;
operaLlon acqulres a permlL before addlng Lhe lLem lnLo Lhe underlylng collecLlon. lf Lhe underlylng $;; operaLlon does
noL acLually add anyLhlng, lL releases Lhe permlL lmmedlaLely. Slmllarly, a successful 9&32/& operaLlon releases a permlL,
enabllng more elemenLs Lo be added. 1he underlylng '&. lmplemenLaLlon knows noLhlng abouL Lhe bound, Lhls ls
handled by K2)";&;E$#8'&..
VAVAMA ;"))/*)3
We have seen how laLches can faclllLaLe sLarLlng a group of relaLed acLlvlLles or walLlng for a group of relaLed acLlvlLles
Lo compleLe. LaLches are slngleuse ob[ecLs, once a laLch enLers Lhe Lermlnal sLaLe, lL cannoL be reseL.
8arrlers are slmllar Lo laLches ln LhaL Lhey block a group of Lhreads unLll some evenL has occurred [C! 4.4.3]. 1he key
dlfference ls LhaL wlLh a barrler, all Lhe Lhreads musL come LogeLher aL a barrler polnL aL Lhe same Llme ln order Lo
proceed. LaLches are for walLlng for evenLs, barrlers are for walLlng for oLher Lhreads. A barrler lmplemenLs Lhe proLocol
some famllles use Lo rendezvous durlng a day aL Lhe mall: "Lveryone meeL aL Mcuonald's aL 6:00, once you geL Lhere,
sLay Lhere unLll everyone shows up, and Lhen we'll flgure ouL whaL we're dolng nexL."
D7*04*K$994&9 allows a flxed number of parLles Lo rendezvous repeaLedly aL a barrler polnL and ls useful ln parallel
lLeraLlve algorlLhms LhaL break down a problem lnLo a flxed number of lndependenL subproblems. 1hreads call $?$4.
when Lhey reach Lhe barrler polnL, and $?$4. blocks unLll all Lhe Lhreads have reached Lhe barrler polnL. lf all Lhreads
meeL aL Lhe barrler polnL, Lhe barrler has been successfully passed, ln whlch case all Lhreads are released and Lhe barrler
ls reseL so lL can be used agaln. lf a call Lo $?$4. Llmes ouL or a Lhread blocked ln $?$4. ls lnLerrupLed, Lhen Lhe barrler ls
consldered broken and all ouLsLandlng calls Lo $?$4. LermlnaLe wlLh K92C&"K$994&9L-*&G.42". lf Lhe barrler ls
successfully passed, $?$4. reLurns a unlque arrlval lndex for each Lhread, whlch can be used Lo "elecL" a leader LhaL
Lakes some speclal acLlon ln Lhe nexL lLeraLlon. D7*04*K$9 94&9 also leLs you pass a barrler acLlon Lo Lhe consLrucLor,
Lhls ls a N)""$M0& LhaL ls execuLed (ln one of Lhe subLask Lhreads) when Lhe barrler ls successfully passed buL before Lhe
blocked Lhreads are released.

64 !ava Concurrency ln racLlce
2/3./&4 VA@MA 93/&4 '&3$G829& .% ;%(&0 " $%??*'./%&A
G)M04* *0$## K2)";&;E$#8'&.R=T W
G94/$.& %4"$0 '&.R=T #&.[
G94/$.& %4"$0 '&3$G829& #&3[

G)M04* K2)";&;E$#8'&.U4". M2)";V W
.84#+#&. ] D200&*.42"#+#7"*892"4:&;'&.U"&? E$#8'&.R=TUVV[
#&3 ] "&? '&3$G829&UM2)";V[
\

G)M04* M220&$" $;;U= 2V .892?# I".&99)G.&;L-*&G.42" W
#&3+$*()49&UV[
M220&$" ?$#1;;&; ] %$0#&[
.97 W
?$#1;;&; ] #&.+$;;U2V[
9&.)9" ?$#1;;&;[
\
%4"$007 W
4% UY?$#1;;&;V
#&3+9&0&$#&UV[
\
\

G)M04* M220&$" 9&32/&UJM`&*. 2V W
M220&$" ?$#N&32/&; ] #&.+9&32/&U2V[
4% U?$#N&32/&;V
#&3+9&0&$#&UV[
9&.)9" ?$#N&32/&;[
\
\
8arrlers are ofLen used ln slmulaLlons, where Lhe work Lo calculaLe one sLep can be done ln parallel buL all Lhe work
assoclaLed wlLh a glven sLep musL compleLe before advanclng Lo Lhe nexL sLep. lor example, ln nbody parLlcle
slmulaLlons, each sLep calculaLes an updaLe Lo Lhe poslLlon of each parLlcle based on Lhe locaLlons and oLher aLLrlbuLes
of Lhe oLher parLlcles. WalLlng on a barrler beLween each updaLe ensures LhaL all updaLes for sLep k have compleLed
before movlng on Lo sLep k + 1.
D&00)0$91).23$.$ ln LlsLlng 3.13 demonsLraLes uslng a barrler Lo compuLe a cellular auLomaLa slmulaLlon, such as
Conway's Llfe game (Cardner, 1970). When parallellzlng a slmulaLlon, lL ls generally lmpracLlcal Lo asslgn a separaLe
Lhread Lo each elemenL (ln Lhe case of Llfe, a cell), Lhls would requlre Loo many Lhreads, and Lhe overhead of
coordlnaLlng Lhem would dwarf Lhe compuLaLlon. lnsLead, lL makes sense Lo parLlLlon Lhe problem lnLo a number of
subparLs, leL each Lhread solve a subparL, and Lhen merge Lhe resulLs. D&00)0$91).23$.$ parLlLlons Lhe board lnLo n
cpu

parLs, where n
cpu
ls Lhe number of Cus avallable, and asslgns each parL Lo a Lhread.
[3]
AL each sLep, Lhe worker Lhreads
calculaLe new values for all Lhe cells ln Lhelr parL of Lhe board. When all worker Lhreads have reached Lhe barrler, Lhe
barrler acLlon commlLs Lhe new values Lo Lhe daLa model. AfLer Lhe barrler acLlon runs, Lhe worker Lhreads are released
Lo compuLe Lhe nexL sLep of Lhe calculaLlon, whlch lncludes consulLlng an 4#A2"& meLhod Lo deLermlne wheLher furLher
lLeraLlons are requlred.
[3] lor compuLaLlonal problems llke Lhls LhaL do no l/C and access no shared daLa, n
cpu
or n
cpu
+ 1 Lhreads yleld opLlmal LhroughpuL, more Lhreads
do noL help, and may ln facL degrade performance as Lhe Lhreads compeLe for Cu and memory resources.
AnoLher form of barrler ls L-*8$"6&9, a LwoparLy barrler ln whlch Lhe parLles exchange daLa aL Lhe barrler polnL [C!
3.4.3]. Lxchangers are useful when Lhe parLles perform asymmeLrlc acLlvlLles, for example when one Lhread fllls a buffer
wlLh daLa and Lhe oLher Lhread consumes Lhe daLa from Lhe buffer, Lhese Lhreads could use an L-*8$"6&9 Lo meeL and
exchange a full buffer for an empLy one. When Lwo Lhreads exchange ob[ecLs vla an L-*8$"6&9, Lhe exchange
consLlLuLes a safe publlcaLlon of boLh ob[ecLs Lo Lhe oLher parLy.
1he Llmlng of Lhe exchange depends on Lhe responslveness requlremenLs of Lhe appllcaLlon. 1he slmplesL approach ls
LhaL Lhe fllllng Lask exchanges when Lhe buffer ls full, and Lhe empLylng Lask exchanges when Lhe buffer ls empLy, Lhls
mlnlmlzes Lhe number of exchanges buL can delay processlng of some daLa lf Lhe arrlval raLe of new daLa ls
unpredlcLable. AnoLher approach would be LhaL Lhe flller exchanges when Lhe buffer ls full, buL also when Lhe buffer ls
parLlally fllled and a cerLaln amounL of Llme has elapsed.
VAWA ;(/?0/&4 "& =66/'/*&.X C'"?"Y?* N*3(?. $"':*
nearly every server appllcaLlon uses some form of cachlng. 8euslng Lhe resulLs of a prevlous compuLaLlon can reduce
laLency and lncrease LhroughpuL, aL Lhe cosL of some addlLlonal memory usage.
2/3./&4 VA@VA $%%)0/&"./&4 $%5>(."./%& /& " $*??(?") F(.%5".%& 8/.: CyclicBarrierA

63
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 178ChapLer 3. 8ulldlng
8locks
G)M04* *0$## D&00)0$91).23$.$ W
G94/$.& %4"$0 K2$9; 3$4"K2$9;[
G94/$.& %4"$0 D7*04*K$994&9 M$994&9[
G94/$.& %4"$0 P29C&9fg ?29C&9#[

G)M04* D&00)0$91).23$.$UK2$9; M2$9;V W
.84#+3$4"K2$9; ] M2$9;[
4". *2)". ] N)".43&+6&.N)".43&UV+$/$40$M0&@92*&##29#UV[
.84#+M$994&9 ] "&? D7*04*K$994&9U*2)".e
"&? N)""$M0&UV W
G)M04* /24; 9)"UV W
3$4"K2$9;+*2334.,&?B$0)&#UV[
\\V[
.84#+?29C&9# ] "&? P29C&9f*2)".g[
%29 U4". 4 ] Z[ 4 R *2)".[ 4__V
?29C&9#f4g ] "&? P29C&9U3$4"K2$9;+6&.')MK2$9;U*2)".e 4VV[
\

G94/$.& *0$## P29C&9 43G0&3&".# N)""$M0& W
G94/$.& %4"$0 K2$9; M2$9;[

G)M04* P29C&9UK2$9; M2$9;V W .84#+M2$9; ] M2$9;[ \
G)M04* /24; 9)"UV W
?840& UYM2$9;+8$#D2"/&96&;UVV W
%29 U4". - ] Z[ - R M2$9;+6&.F$-oUV[ -__V
%29 U4". 7 ] Z[ 7 R M2$9;+6&.F$-rUV[ 7__V
M2$9;+#&.,&?B$0)&U-e 7e *23G).&B$0)&U-e 7VV[
.97 W
M$994&9+$?$4.UV[
\ *$.*8 UI".&99)G.&;L-*&G.42" &-V W
9&.)9"[
\ *$.*8 UK92C&"K$994&9L-*&G.42" &-V W
9&.)9"[
\
\
\
\

G)M04* /24; #.$9.UV W
%29 U4". 4 ] Z[ 4 R ?29C&9#+0&"6.8[ 4__V
"&? =89&$;U?29C&9#f4gV+#.$9.UV[
3$4"K2$9;+?$4.<29D2"/&96&"*&UV[\
\
\
Llke many oLher frequenLly relnvenLed wheels, cachlng ofLen looks slmpler Lhan lL ls. A nalve cache lmplemenLaLlon ls
llkely Lo Lurn a performance boLLleneck lnLo a scalablllLy boLLleneck, even lf lL does lmprove slngleLhreaded
performance. ln Lhls secLlon we develop an efflclenL and scalable resulL cache for a compuLaLlonally expenslve funcLlon.
LeL's sLarL wlLh Lhe obvlous approach a slmple E$#8F$Gand Lhen look aL some of lLs concurrency dlsadvanLages and
how Lo flx Lhem.
1he D23G).$M0&R1eBT lnLerface ln LlsLlng 3.16 descrlbes a funcLlon wlLh lnpuL of Lype A and resulL of Lype v.
L-G&"#4/&<)"*.42", whlch lmplemenLs D23G).$M0&, Lakes a long Llme Lo compuLe lLs resulL, we'd llke Lo creaLe a
D23G).$M0& wrapper LhaL remembers Lhe resulLs of prevlous compuLaLlons and encapsulaLes Lhe cachlng process. (1hls
Lechnlque ls known as MemorlzaLlon.)

66 !ava Concurrency ln racLlce
2/3./&4 VA@WA ,&/./"? $"':* F..*5>. 93/&4 HashMap "&0 C+&':)%&/O"./%&A

G)M04* 4".&9%$*& D23G).$M0&R1e BT W
B *23G).&U1 $96V .892?# I".&99)G.&;L-*&G.42"[
\

G)M04* *0$## L-G&"#4/&<)"*.42"
43G0&3&".# D23G).$M0&R'.94"6e K46I".&6&9T W
G)M04* K46I".&6&9 *23G).&U'.94"6 $96V W
XX $%.&9 ;&&G .82)68.+++
9&.)9" "&? K46I".&6&9U$96V[
\
\

G)M04* *0$## F&3294:&9^R1e BT 43G0&3&".# D23G).$M0&R1e BT W
bH)$9;&;K7Ud.84#dV
G94/$.& %4"$0 F$GR1e BT *$*8& ] "&? E$#8F$GR1e BTUV[
G94/$.& %4"$0 D23G).$M0&R1e BT *[

G)M04* F&3294:&9^UD23G).$M0&R1e BT *V W
.84#+* ] *[
\

G)M04* #7"*892"4:&; B *23G).&U1 $96V .892?# I".&99)G.&;L-*&G.42" W
B 9&#)0. ] *$*8&+6&.U$96V[
4% U9&#)0. ]] ")00V W
9&#)0. ] *+*23G).&U$96V[
*$*8&+G).U$96e 9&#)0.V[
\
9&.)9" 9&#)0.[
\
\

F&3294:&9^ ln LlsLlng 3.16 shows a flrsL aLLempL: uslng a E$#8F$G Lo sLore Lhe resulLs of prevlous compuLaLlons. 1he
*23G).& meLhod flrsL checks wheLher Lhe deslred resulL ls already cached, and reLurns Lhe precompuLed value lf lL ls.
CLherwlse, Lhe resulL ls compuLed and cached ln Lhe E$#8F$G before reLurnlng.
E$#8F$G ls noL Lhreadsafe, so Lo ensure LhaL Lwo Lhreads do noL access Lhe E$#8F$G aL Lhe same Llme, F&3294:&9^
Lakes Lhe conservaLlve approach of synchronlzlng Lhe enLlre *23G).& meLhod. 1hls ensures Lhread safeLy buL has an
obvlous scalablllLy problem: only one Lhread aL a Llme can execuLe *23G).& aL all. lf anoLher Lhread ls busy compuLlng a
resulL, oLher Lhreads calllng *23G).& may be blocked for a long Llme. lf mulLlple Lhreads are queued up walLlng Lo
compuLe values noL already compuLed, *23G).& may acLually Lake longer Lhan lL would have wlLhouL MemorlzaLlon.
llgure 3.2 lllusLraLes whaL could happen when several Lhreads aLLempL Lo use a funcLlon memorlzed wlLh Lhls approach.
1hls ls noL Lhe sorL of performance lmprovemenL we had hoped Lo achleve Lhrough cachlng.
S/4()* VADA -%%) $%&'())*&'+ %6 Memorizer1A


F&3294:&9j ln LlsLlng 3.17 lmproves on Lhe awful concurrenL behavlor of F&3294:&9^ by replaclng Lhe E$#8F$G wlLh a
D2"*)99&".E$#8F$G. Slnce D2"*)99&".E$#8F$G ls Lhreadsafe, Lhere ls no need Lo synchronlze when accesslng Lhe
backlng F$G, Lhus ellmlnaLlng Lhe serlallzaLlon lnduced by synchronlzlng *23G).& ln F&3294:&9^.

67
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 178ChapLer 3. 8ulldlng
8locks
F&3294:&9j cerLalnly has beLLer concurrenL behavlor Lhan F&3294:&9^: mulLlple Lhreads can acLually use lL
concurrenLly. 8uL lL sLlll has some defecLs as a cache Lhere ls a wlndow of vulnerablllLy ln whlch Lwo Lhreads calllng
*23G).& aL Lhe same Llme could end up compuLlng Lhe same value. ln Lhe case of memorlzaLlon, Lhls ls merely
lnefflclenL Lhe purpose of a cache ls Lo prevenL Lhe same daLa from belng calculaLed mulLlple Llmes. lor a more
generalpurpose cachlng mechanlsm, lL ls far worse, for an ob[ecL cache LhaL ls supposed Lo provlde onceandonlyonce
lnlLlallzaLlon, Lhls vulnerablllLy would also pose a safeLy rlsk.
1he problem wlLh F&3294:&9j ls LhaL lf one Lhread sLarLs an expenslve compuLaLlon, oLher Lhreads are noL aware LhaL
Lhe compuLaLlon ls ln progress and so may sLarL Lhe same compuLaLlon, as lllusLraLed ln llgure 3.3. We'd llke Lo
somehow represenL Lhe noLlon LhaL "Lhread x ls currenLly compuLlng f (27)", so LhaL lf anoLher Lhread arrlves looklng for
f (27), lL knows LhaL Lhe mosL efflclenL way Lo flnd lL ls Lo head over Lo 1hread x's house, hang ouL Lhere unLll x ls
flnlshed, and Lhen ask "Pey, whaL dld you geL for f (27)?"
S/4()* VALA J8% J:)*"03 $%5>(./&4 .:* C"5* H"?(* B:*& 93/&4 Memorizer2A



2/3./&4 VA@ZA N*>?"'/&4 HashMap 8/.: ConcurrentHashMapA

G)M04* *0$## F&3294:&9jR1e BT 43G0&3&".# D23G).$M0&R1e BT W
G94/$.& %4"$0 F$GR1e BT *$*8& ] "&? D2"*)99&".E$#8F$GR1e BTUV[
G94/$.& %4"$0 D23G).$M0&R1e BT *[

G)M04* F&3294:&9jUD23G).$M0&R1e BT *V W .84#+* ] *[ \

G)M04* B *23G).&U1 $96V .892?# I".&99)G.&;L-*&G.42" W
B 9&#)0. ] *$*8&+6&.U$96V[
4% U9&#)0. ]] ")00V W
9&#)0. ] *+*23G).&U$96V[
*$*8&+G).U$96e 9&#)0.V[
\
9&.)9" 9&#)0.[
\
\
We've already seen a class LhaL does almosL exacLly Lhls: <).)9&=$#C. <).)9&=$#C represenLs a compuLaLlonal process
LhaL may or may noL already have compleLed. <).)9&=$#C+6&. reLurns Lhe resulL of Lhe compuLaLlon lmmedlaLely lf lL ls
avallable, oLherwlse lL blocks unLll Lhe resulL has been compuLed and Lhen reLurns lL.
F&3294:&9s ln LlsLlng 3.18 redeflnes Lhe backlng F$G for Lhe value cache as a D2"*)99&".E$#8F$GR1e<).)9&RBTT
lnsLead of a D2"*)99&".E$#8F$GR1eBT. F&3294:&9s flrsL checks Lo see lf Lhe approprlaLe calculaLlon has been sLarLed
(as opposed Lo flnlshed, as ln F&3294:&9j). lf noL, lL creaLes a <).)9&=$#C, reglsLers lL ln Lhe F$G, and sLarLs Lhe
compuLaLlon, oLherwlse lL walLs for Lhe resulL of Lhe exlsLlng compuLaLlon. 1he resulL mlghL be avallable lmmedlaLely or
mlghL be ln Lhe process of belng compuLed buL Lhls ls LransparenL Lo Lhe caller of <).)9&+6&..
1he F&3294:&9s lmplemenLaLlon ls almosL perfecL: lL exhlblLs very good concurrency (mosLly derlved from Lhe excellenL
concurrency of D2"*)99&".E$#8F$G), Lhe resulL ls reLurned efflclenLly lf lL ls already known, and lf Lhe compuLaLlon ls ln
progress by anoLher Lhread, newly arrlvlng Lhreads walL paLlenLly for Lhe resulL. lL has only one defecL Lhere ls sLlll a
small wlndow of vulnerablllLy ln whlch Lwo Lhreads mlghL compuLe Lhe same value. 1hls wlndow ls far smaller Lhan ln
F&3294:&9j, buL because Lhe 4% block ln *23G).& ls sLlll a nonaLomlc checkLhenacL sequence, lL ls posslble for Lwo
Lhreads Lo call *23G).& wlLh Lhe same value aL roughly Lhe same Llme, boLh see LhaL Lhe cache does noL conLaln Lhe
deslred value, and boLh sLarL Lhe compuLaLlon. 1hls unlucky Llmlng ls lllusLraLed ln llgure 3.4.

68 !ava Concurrency ln racLlce
S/4()* VAMA 9&?('<+ J/5/&4 .:". '%(?0 $"(3* Memorizer3 .% $"?'(?".* .:* C"5* H"?(* J8/'*A



2/3./&4 VA@\A K*5%)/O/&4 B)">>*) 93/&4 FutureTaskA

G)M04* *0$## F&3294:&9sR1e BT 43G0&3&".# D23G).$M0&R1e BT W
G94/$.& %4"$0 F$GR1e <).)9&RBTT *$*8&
] "&? D2"*)99&".E$#8F$GR1e <).)9&RBTTUV[
G94/$.& %4"$0 D23G).$M0&R1e BT *[

G)M04* F&3294:&9sUD23G).$M0&R1e BT *V W .84#+* ] *[ \

G)M04* B *23G).&U%4"$0 1 $96V .892?# I".&99)G.&;L-*&G.42" W
<).)9&RBT % ] *$*8&+6&.U$96V[
4% U% ]] ")00V W
D$00$M0&RBT &/$0 ] "&? D$00$M0&RBTUV W
G)M04* B *$00UV .892?# I".&99)G.&;L-*&G.42" W
9&.)9" *+*23G).&U$96V[
\
\[
<).)9&=$#CRBT %. ] "&? <).)9&=$#CRBTU&/$0V[
% ] %.[
*$*8&+G).U$96e %.V[
%.+9)"UV[ XX *$00 .2 *+*23G).& 8$GG&"# 8&9&
\
.97 W
9&.)9" %+6&.UV[
\ *$.*8 UL-&*).42"L-*&G.42" &V W
.892? 0$)";&9=892?$M0&U&+6&.D$)#&UVV[
\
\
\
F&3294:&9s ls vulnerable Lo Lhls problem because a compound acLlon (puLlfabsenL) ls performed on Lhe backlng map
LhaL cannoL be made aLomlc uslng locklng. F&3294:&9 ln LlsLlng 3.19 Lakes advanLage of Lhe aLomlc G).I%1M#&".
meLhod of D2"*)99&".F$G, closlng Lhe wlndow of vulnerablllLy ln F&3294:&9s.
Cachlng a <).)9& lnsLead of a value creaLes Lhe posslblllLy of cache polluLlon: lf a compuLaLlon ls cancelled or falls,
fuLure aLLempLs Lo compuLe Lhe resulL wlll also lndlcaLe cancellaLlon or fallure. 1o avold Lhls, F&3294:&9 removes Lhe
<).)9& from Lhe cache lf lL deLecLs LhaL Lhe compuLaLlon was cancelled, lL mlghL also be deslrable Lo remove Lhe <).)9&
upon deLecLlng a N)".43&L-*&G.42" lf Lhe compuLaLlon mlghL succeed on a fuLure aLLempL. F&3294:&9 also does noL
address cache explraLlon, buL Lhls could be accompllshed by uslng a subclass of <).)9&=$#C LhaL assoclaLes an
explraLlon Llme wlLh each resulL and perlodlcally scannlng Lhe cache for explred enLrles. (Slmllarly, lL does noL address
cache evlcLlon, where old enLrles are removed Lo make room for new ones so LhaL Lhe cache does noL consume Loo
much memory.)
WlLh our concurrenL cache lmplemenLaLlon compleLe, we can now add real cachlng Lo Lhe facLorlzlng servleL from
ChapLer 2, as promlsed. <$*.294:&9 ln LlsLlng 3.20 uses F&3294:&9 Lo cache prevlously compuLed values efflclenLly and
scalably.

69
32888328783286832498323083229832008314884916847228472184720847198439484393843698436884367843
66843638436484427844268441084409843438421884217842168421383636836338346983468834638343383434
834338344983221832208321983214830398303883037823338233482190821898218882 178ChapLer 3. 8ulldlng
8locks
2/3./&4 VA@^A S/&"? ,5>?*5*&."./%& %6 MemorizerA
G)M04* *0$## F&3294:&9R1e BT 43G0&3&".# D23G).$M0&R1e BT W
G94/$.& %4"$0 D2"*)99&".F$GR1e <).)9&RBTT *$*8&
] "&? D2"*)99&".E$#8F$GR1e <).)9&RBTTUV[
G94/$.& %4"$0 D23G).$M0&R1e BT *[

G)M04* F&3294:&9UD23G).$M0&R1e BT *V W .84#+* ] *[ \

G)M04* B *23G).&U%4"$0 1 $96V .892?# I".&99)G.&;L-*&G.42" W
?840& U.9)&V W
<).)9&RBT % ] *$*8&+6&.U$96V[
4% U% ]] ")00V W
D$00$M0&RBT &/$0 ] "&? D$00$M0&RBTUV W
G)M04* B *$00UV .892?# I".&99)G.&;L-*&G.42" W
9&.)9" *+*23G).&U$96V[
\
\[
<).)9&=$#CRBT %. ] "&? <).)9&=$#CRBTU&/$0V[
% ] *$*8&+G).I%1M#&".U$96e %.V[
4% U% ]] ")00V W % ] %.[ %.+9)"UV[ \
\
.97 W
9&.)9" %+6&.UV[
\ *$.*8 UD$"*&00$.42"L-*&G.42" &V W
*$*8&+9&32/&U$96e %V[
\ *$.*8 UL-&*).42"L-*&G.42" &V W
.892? 0$)";&9=892?$M0&U&+6&.D$)#&UVV[
\
\
\
\
2/3./&4 VAD_A S"'.%)/O/&4 C*)#?*. .:". $"':*3 N*3(?.3 93/&4 MemorizerA
b=89&$;'$%&
G)M04* *0$## <$*.294:&9 43G0&3&".# '&9/0&. W
G94/$.& %4"$0 D23G).$M0&RK46I".&6&9e K46I".&6&9fgT * ]
"&? D23G).$M0&RK46I".&6&9e K46I".&6&9fgTUV W
G)M04* K46I".&6&9fg *23G).&UK46I".&6&9 $96V W
9&.)9" %$*.29U$96V[
\
\[
G94/$.& %4"$0 D23G).$M0&RK46I".&6&9e K46I".&6&9fgT *$*8&
] "&? F&3294:&9RK46I".&6&9e K46I".&6&9fgTU*V[

G)M04* /24; #&9/4*&U'&9/0&.N&()&#. 9&(e
'&9/0&.N&#G2"#& 9&#GV W
.97 W
K46I".&6&9 4 ] &-.9$*.<923N&()&#.U9&(V[
&"*2;&I".2N&#G2"#&U9&#Ge *$*8&+*23G).&U4VV[
\ *$.*8 UI".&99)G.&;L-*&G.42" &V W
&"*2;&L9929U9&#Ge d%$*.294:$.42" 4".&99)G.&;dV[
\
\
\
C(55")+ %6 -"). ,
We've covered a loL of maLerlal so far! 1he followlng "concurrency cheaL sheeL" summarlzes Lhe maln concepLs and
rules presenLed ln arL l.
lL's Lhe muLable sLaLe, sLupld.
[1]

All concurrency lssues boll down Lo coordlnaLlng access Lo muLable sLaLe. 1he less muLable sLaLe, Lhe easler lL ls Lo
ensure Lhread safeLy.
Make flelds flnal unless Lhey need Lo be muLable.
lmmuLable ob[ecLs are auLomaLlcally Lhreadsafe.
lmmuLable ob[ecLs slmpllfy concurrenL programmlng Lremendously. 1hey are slmpler and safer, and can be shared
freely wlLhouL locklng or defenslve copylng.
LncapsulaLlon makes lL pracLlcal Lo manage Lhe complexlLy.

70 !ava Concurrency ln racLlce
?ou could wrlLe a Lhreadsafe program wlLh all daLa sLored ln global varlables, buL why would you wanL Lo?
LncapsulaLlng daLa wlLhln ob[ecLs makes lL easler Lo preserve Lhelr lnvarlanLs, encapsulaLlng synchronlzaLlon wlLhln
ob[ecLs makes lL easler Lo comply wlLh Lhelr synchronlzaLlon pollcy.
Cuard each muLable varlable wlLh a lock.
Cuard all varlables ln an lnvarlanL wlLh Lhe same lock.
Pold locks for Lhe duraLlon of compound acLlons.
A program LhaL accesses a muLable varlable from mulLlple Lhreads wlLhouL synchronlzaLlon ls a broken program.
uon'L rely on clever reasonlng abouL why you don'L need Lo synchronlze.
lnclude Lhread safeLy ln Lhe deslgn processor expllclLly documenL LhaL your class ls noL Lhreadsafe.
uocumenL your synchronlzaLlon pollcy.
|1|
uurlng Lhe 1992 u.S. presldenLlal elecLlon, elecLoral sLraLeglsL !ames Carvllle hung a slgn ln 8lll CllnLon's campalgn headquarLers readlng "1he
economy, sLupld", Lo keep Lhe campalgn on message.

71 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 178ChapLer 3. 8ulldlng 8locks
-"). ,,T C.)('.()/&4 $%&'())*&. F>>?/'"./%&3
=>.?@(, :. 1ask LxecuLlon
=>.?@(, ;. CancellaLlon and ShuLdown
=>.?@(, <. Applylng 1hread ools
=>.?@(, 9. Cul AppllcaLlons


72 !ava Concurrency ln racLlce
$:">.*) WA J"3< =1*'(./%&
MosL concurrenL appllcaLlons are organlzed around Lhe execuLlon of Lasks: absLracL, dlscreLe unlLs of work. ulvldlng Lhe
work of an appllcaLlon lnLo Lasks slmpllfles program organlzaLlon, faclllLaLes error recovery by provldlng naLural
LransacLlon boundarles, and promoLes concurrency by provldlng a naLural sLrucLure for parallellzlng work.
WA@A =1*'(./&4 J"3<3 /& J:)*"03
1he flrsL sLep ln organlzlng a program around Lask execuLlon ls ldenLlfylng senslble Lask boundarles. ldeally, Lasks are
lndependenL acLlvlLles: work LhaL doesn'L depend on Lhe sLaLe, resulL, or slde effecLs of oLher Lasks. lndependence
faclllLaLes concurrency, as lndependenL Lasks can be execuLed ln parallel lf Lhere are adequaLe processlng resources. lor
greaLer flexlblllLy ln schedullng and load balanclng Lasks, each Lask should also represenL a small fracLlon of your
appllcaLlon's processlng capaclLy.
Server appllcaLlons should exhlblL boLh good LhroughpuL and good responslveness under normal load. AppllcaLlon
provlders wanL appllcaLlons Lo supporL as many users as posslble, so as Lo reduce provlslonlng cosLs per user, users
wanL Lo geL Lhelr response qulckly. lurLher, appllcaLlons should exhlblL graceful degradaLlon as Lhey become
overloaded, raLher Lhan slmply falllng over under heavy load. Chooslng good Lask boundarles, coupled wlLh a senslble
Lask execuLlon pollcy (see SecLlon 6.2.2), can help achleve Lhese goals.
MosL server appllcaLlons offer a naLural cholce of Lask boundary: lndlvldual cllenL requesLs. Web servers, mall servers,
flle servers, L!8 conLalners, and daLabase servers all accepL requesLs vla neLwork connecLlons from remoLe cllenLs. uslng
lndlvldual requesLs as Lask boundarles usually offers boLh lndependence and approprlaLe Lask slzlng. lor example, Lhe
resulL of submlLLlng a message Lo a mall server ls noL affecLed by Lhe oLher messages belng processed aL Lhe same Llme,
and handllng a slngle message usually requlres a very small percenLage of Lhe server's LoLal capaclLy.
WA@A@A =1*'(./&4 J"3<3 C*Q(*&./"??+
1here are a number of posslble pollcles for schedullng Lasks wlLhln an appllcaLlon, some of whlch explolL Lhe poLenLlal
for concurrency beLLer Lhan oLhers. 1he slmplesL ls Lo execuLe Lasks sequenLlally ln a slngle Lhread. '4"60&=89&$;P&Ma
'&9/&9 ln LlsLlng 6.1 processes lLs Lasks P11 requesLs arrlvlng on porL 80 sequenLlally. 1he deLalls of Lhe requesL
processlng aren'L lmporLanL, we're lnLeresLed ln characLerlzlng Lhe concurrency of varlous schedullng pollcles.
2/3./&4 WA@A C*Q(*&./"? B*Y C*)#*)A

*0$## '4"60&=89&$;P&M'&9/&9 W
G)M04* #.$.4* /24; 3$4"U'.94"6fg $96#V .892?# IJL-*&G.42" W
'&9/&9'2*C&. #2*C&. ] "&? '&9/&9'2*C&.UtZV[
?840& U.9)&V W
'2*C&. *2""&*.42" ] #2*C&.+$**&G.UV[
8$";0&N&()&#.U*2""&*.42"V[
\
\
\
'4"60&=89&$;&;P&M'&9/&9 ls slmple and LheoreLlcally correcL, buL would perform poorly ln producLlon because lL can
handle only one requesL aL a Llme. 1he maln Lhread alLernaLes beLween accepLlng connecLlons and processlng Lhe
assoclaLed requesL. Whlle Lhe server ls handllng a requesL, new connecLlons musL walL unLll lL flnlshes Lhe currenL
requesL and calls $**&G. agaln. 1hls mlghL work lf requesL processlng were so fasL LhaL 8$";0&N&()&#. effecLlvely
reLurned lmmedlaLely, buL Lhls doesn'L descrlbe any web server ln Lhe real world.
rocesslng a web requesL lnvolves a mlx of compuLaLlon and l/C. 1he server musL perform sockeL l/C Lo read Lhe
requesL and wrlLe Lhe response, whlch can block due Lo neLwork congesLlon or connecLlvlLy problems. lL may also
perform flle l/C or make daLabase requesLs, whlch can also block. ln a slngleLhreaded server, blocklng noL only delays
compleLlng Lhe currenL requesL, buL prevenLs pendlng requesLs from belng processed aL all. lf one requesL blocks for an
unusually long Llme, users mlghL Lhlnk Lhe server ls unavallable because lL appears unresponslve. AL Lhe same Llme,
resource uLlllzaLlon ls poor, slnce Lhe Cu slLs ldle whlle Lhe slngle Lhread walLs for lLs l/C Lo compleLe.
ln server appllcaLlons, sequenLlal processlng rarely provldes elLher good LhroughpuL or good responslveness. 1here are
excepLlons such as when Lasks are few and longllved, or when Lhe server serves a slngle cllenL LhaL makes only a slngle
requesL aL a Llme buL mosL server appllcaLlons do noL work Lhls way.
[1]


73 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 188ChapLer 6. 1ask LxecuLlon
[1] ln some slLuaLlons, sequenLlal processlng may offer a slmpllclLy or safeLy advanLage, mosL Cul frameworks process Lasks sequenLlally uslng a
slngle Lhread. We reLurn Lo Lhe sequenLlal model ln ChapLer 9.
WA@ADA =1>?/'/.?+ $)*"./&4 J:)*"03 6%) J"3<3
A more responslve approach ls Lo creaLe a new Lhread for servlclng each requesL, as shown ln
=89&$;@&9=$#CP&M'&9/&9 ln LlsLlng 6.2.
2/3./&4 WADA B*Y C*)#*) .:". C.").3 " P*8 J:)*"0 6%) ="': N*Q(*3.A

*0$## =89&$;@&9=$#CP&M'&9/&9 W
G)M04* #.$.4* /24; 3$4"U'.94"6fg $96#V .892?# IJL-*&G.42" W
'&9/&9'2*C&. #2*C&. ] "&? '&9/&9'2*C&.UtZV[
?840& U.9)&V W
%4"$0 '2*C&. *2""&*.42" ] #2*C&.+$**&G.UV[
N)""$M0& .$#C ] "&? N)""$M0&UV W
G)M04* /24; 9)"UV W
8$";0&N&()&#.U*2""&*.42"V[
\
\[
"&? =89&$;U.$#CV+#.$9.UV[
\
\
\
=89&$;@&9=$#CP&M'&9/&9 ls slmllar ln sLrucLure Lo Lhe slngleLhreaded verslon Lhe maln Lhread sLlll alLernaLes
beLween accepLlng an lncomlng connecLlon and dlspaLchlng Lhe requesL. 1he dlfference ls LhaL for each connecLlon, Lhe
maln loop creaLes a new Lhread Lo process Lhe requesL lnsLead of processlng lL wlLhln Lhe maln Lhread. 1hls has Lhree
maln consequences:
1ask processlng ls offloaded from Lhe maln Lhread, enabllng Lhe maln loop Lo resume walLlng for Lhe nexL
lncomlng connecLlon more qulckly. 1hls enables new connecLlons Lo be accepLed before prevlous requesLs
compleLe, lmprovlng responslveness.
1asks can be processed ln parallel, enabllng mulLlple requesLs Lo be servlced slmulLaneously. 1hls may lmprove
LhroughpuL lf Lhere are mulLlple processors, or lf Lasks need Lo block for any reason such as l/C compleLlon, lock
acqulslLlon, or resource avallablllLy.
1askhandllng code musL be Lhreadsafe, because lL may be lnvoked concurrenLly for mulLlple Lasks.
under llghL Lo moderaLe load, Lhe LhreadperLask approach ls an lmprovemenL over sequenLlal execuLlon. As long as
Lhe requesL arrlval raLe does noL exceed Lhe server's capaclLy Lo handle requesLs, Lhls approach offers beLLer
responslveness and LhroughpuL.
WA@ALA [/3"0#"&."4*3 %6 9&Y%(&0*0 J:)*"0 $)*"./%&
lor producLlon use, however, Lhe LhreadperLask approach has some pracLlcal drawbacks, especlally when a large
number of Lhreads may be creaLed:
1hread llfecycle overhead. 1hread creaLlon and Leardown are noL free. 1he acLual overhead varles across plaLforms, buL
Lhread creaLlon Lakes Llme, lnLroduclng laLency lnLo requesL processlng, and requlres some processlng acLlvlLy by Lhe
!vM and CS. lf requesLs are frequenL and llghLwelghL, as ln mosL server appllcaLlons, creaLlng a new Lhread for each
requesL can consume slgnlflcanL compuLlng resources.
8esource consumpLlon. AcLlve Lhreads consume sysLem resources, especlally memory. When Lhere are more runnable
Lhreads Lhan avallable processors, Lhreads slL ldle. Pavlng many ldle Lhreads can Lle up a loL of memory, puLLlng
pressure on Lhe garbage collecLor, and havlng many Lhreads compeLlng for Lhe Cus can lmpose oLher performance
cosLs as well. lf you have enough Lhreads Lo keep all Lhe Cus busy, creaLlng more Lhreads won'L help and may even
hurL.
SLablllLy. 1here ls a llmlL on how many Lhreads can be creaLed. 1he llmlL varles by plaLform and ls affecLed by facLors
lncludlng !vM lnvocaLlon parameLers, Lhe requesLed sLack slze ln Lhe =89&$; consLrucLor, and llmlLs on Lhreads placed
by Lhe underlylng operaLlng sysLem.
[2]
When you hlL Lhls llmlL, Lhe mosL llkely resulL ls an J).J%F&3297L9929. Lrylng Lo
recover from such an error ls very rlsky, lL ls far easler Lo sLrucLure your program Lo avold hlLLlng Lhls llmlL.
[2] Cn 32blL machlnes, a ma[or llmlLlng facLor ls address space for Lhread sLacks. Lach Lhread malnLalns Lwo execuLlon sLacks, one for !ava code
and one for naLlve code. 1yplcal !vM defaulLs yleld a comblned sLack slze of around half a megabyLe. (?ou can change Lhls wlLh Lhe ao## !vM flag
or Lhrough Lhe 1hread consLrucLor.) lf you dlvlde Lhe perLhread sLack slze lnLo 232, you geL a llmlL of a few Lhousands or Lens of Lhousands of
Lhreads. CLher facLors, such as CS llmlLaLlons, may lmpose sLrlcLer llmlLs.

74 !ava Concurrency ln racLlce
up Lo a cerLaln polnL, more Lhreads can lmprove LhroughpuL, buL beyond LhaL polnL creaLlng more Lhreads [usL slows
down your appllcaLlon, and creaLlng one Lhread Loo many can cause your enLlre appllcaLlon Lo crash horrlbly. 1he way
Lo sLay ouL of danger ls Lo place some bound on how many Lhreads your appllcaLlon creaLes, and Lo LesL your appllcaLlon
Lhoroughly Lo ensure LhaL, even when Lhls bound ls reached, lL does noL run ouL of resources.
1he problem wlLh Lhe LhreadperLask approach ls LhaL noLhlng places any llmlL on Lhe number of Lhreads creaLed
excepL Lhe raLe aL whlch remoLe users can Lhrow P11 requesLs aL lL. Llke oLher concurrency hazards, unbounded
Lhread creaLlon may appear Lo work [usL flne durlng proLoLyplng and developmenL, wlLh problems surfaclng only when
Lhe appllcaLlon ls deployed and under heavy load. So a mallclous user, or enough ordlnary users, can make your web
server crash lf Lhe Lrafflc load ever reaches a cerLaln Lhreshold. lor a server appllcaLlon LhaL ls supposed Lo provlde hlgh
avallablllLy and graceful degradaLlon under load, Lhls ls a serlous falllng.
WADA J:* =1*'(.%) S)"5*8%)<
1asks are loglcal unlLs of work, and Lhreads are a mechanlsm by whlch Lasks can run asynchronously. We've examlned
Lwo pollcles for execuLlng Lasks uslng Lhreads execuLe Lasks sequenLlally ln a slngle Lhread, and execuLe each Lask ln lLs
own Lhread. 8oLh have serlous llmlLaLlons: Lhe sequenLlal approach suffers from poor responslveness and LhroughpuL,
and Lhe LhreadperLask approach suffers from poor resource managemenL.
ln ChapLer 3, we saw how Lo use bounded queues Lo prevenL an overloaded appllcaLlon from runnlng ouL of memory.
1hread pools offer Lhe same beneflL for Lhread managemenL, and `$/$+).40+*2"*)99&". provldes a flexlble Lhread
pool lmplemenLaLlon as parL of Lhe L-&*).29 framework. 1he prlmary absLracLlon for Lask execuLlon ln Lhe !ava class
llbrarles ls noL =89&$;, buL L-&*).29, shown ln LlsLlng 6.3.
2/3./&4 WALA L-&*).29 ,&.*)6"'*A
G)M04* 4".&9%$*& L-&*).29 W
/24; &-&*).&UN)""$M0& *233$";V[
\
L-&*).29 may be a slmple lnLerface, buL lL forms Lhe basls for a flexlble and powerful framework for asynchronous Lask
execuLlon LhaL supporLs a wlde varleLy of Lask execuLlon pollcles. lL provldes a sLandard means of decoupllng Lask
submlsslon from Lask execuLlon, descrlblng Lasks wlLh N)""$M0&. 1he L-&*).29 lmplemenLaLlons also provlde llfecycle
supporL and hooks for addlng sLaLlsLlcs gaLherlng, appllcaLlon managemenL, and monlLorlng.
L-&*).29 ls based on Lhe producerconsumer paLLern, where acLlvlLles LhaL submlL Lasks are Lhe producers (produclng
unlLs of work Lo be done) and Lhe Lhreads LhaL execuLe Lasks are Lhe consumers (consumlng Lhose unlLs of work). uslng
an L-&*).29 ls usually Lhe easlesL paLh Lo lmplemenLlng a producerconsumer deslgn ln your appllcaLlon.
WADA@A =1"5>?*T B*Y C*)#*) 93/&4 =1*'(.%)
8ulldlng a web server wlLh an L-&*).29 ls easy. =$#CL-&*).42"P&M'&9/&9 ln LlsLlng 6.4 replaces Lhe hardcoded Lhread
creaLlon wlLh an L-&*).29. ln Lhls case, we use one of Lhe sLandard L-&*).29 lmplemenLaLlons, a flxedslze Lhread pool
wlLh 100 Lhreads.
ln =$#CL-&*).42"P&M'&9/&9, submlsslon of Lhe requesLhandllng Lask ls decoupled from lLs execuLlon uslng an
L-&*).29, and lLs behavlor can be changed merely by subsLlLuLlng a dlfferenL L-&*).29 lmplemenLaLlon. Changlng
L-&*).29 lmplemenLaLlons or conflguraLlon ls far less lnvaslve Lhan changlng Lhe way Lasks are submlLLed, L-&*).29
conflguraLlon ls generally a oneLlme evenL and can easlly be exposed for deploymenLLlme conflguraLlon, whereas Lask
submlsslon code Lends Lo be sLrewn LhroughouL Lhe program and harder Lo expose.

73 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 188ChapLer 6. 1ask LxecuLlon
2/3./&4 WAMA B*Y C*)#*) 93/&4 " J:)*"0 -%%?A
*0$## =$#CL-&*).42"P&M'&9/&9 W
G94/$.& #.$.4* %4"$0 4". ,=ENL1A' ] ^ZZ[
G94/$.& #.$.4* %4"$0 L-&*).29 &-&*
] L-&*).29#+"&?<4-&;=89&$;@220U,=ENL1A'V[

G)M04* #.$.4* /24; 3$4"U'.94"6fg $96#V .892?# IJL-*&G.42" W
'&9/&9'2*C&. #2*C&. ] "&? '&9/&9'2*C&.UtZV[
?840& U.9)&V W
%4"$0 '2*C&. *2""&*.42" ] #2*C&.+$**&G.UV[
N)""$M0& .$#C ] "&? N)""$M0&UV W
G)M04* /24; 9)"UV W
8$";0&N&()&#.U*2""&*.42"V[
\
\[
&-&*+&-&*).&U.$#CV[
\
\
\
We can easlly modlfy =$#CL-&*).42"P&M'&9/&9 Lo behave llke =89&$;@&9a=$#CP&M'&9/&9 by subsLlLuLlng an L-&*).29
LhaL creaLes a new Lhread for each requesL. WrlLlng such an L-&*).29 ls Lrlvlal, as shown ln =89&$;@&9=$#CL-&*).29 ln
LlsLlng 6.3.
2/3./&4 WAVA L-&*).29 .:". C.").3 " P*8 J:)*"0 6%) ="': J"3<A
G)M04* *0$## =89&$;@&9=$#CL-&*).29 43G0&3&".# L-&*).29 W
G)M04* /24; &-&*).&UN)""$M0& 9V W
"&? =89&$;U9V+#.$9.UV[
\[
\
Slmllarly, lL ls also easy Lo wrlLe an L-&*).29 LhaL would make =$#CL-&*).42"P&M'&9/&9 behave llke Lhe slngle
Lhreaded verslon, execuLlng each Lask synchronously before reLurnlng from &-&*).&, as shown ln
P4.84"=89&$;L-&*).29 ln LlsLlng 6.6.
WADADA =1*'(./%& -%?/'/*3
1he value of decoupllng submlsslon from execuLlon ls LhaL lL leLs you easlly speclfy, and subsequenLly change wlLhouL
greaL dlfflculLy, Lhe execuLlon pollcy for a glven class of Lasks. An execuLlon pollcy speclfles Lhe "whaL, where, when, and
how" of Lask execuLlon, lncludlng:
2/3./&4 WAWA L-&*).29 .:". =1*'(.*3 J"3<3 C+&':)%&%(3?+ /& .:* $"??/&4 J:)*"0A
G)M04* *0$## P4.84"=89&$;L-&*).29 43G0&3&".# L-&*).29 W
G)M04* /24; &-&*).&UN)""$M0& 9V W
9+9)"UV[
\[
\
ln whaL Lhread wlll Lasks be execuLed?
ln whaL order should Lasks be execuLed (lllC, LllC, prlorlLy order)?
Pow many Lasks may execuLe concurrenLly?
Pow many Lasks may be queued pendlng execuLlon?
lf a Lask has Lo be re[ecLed because Lhe sysLem ls overloaded, whlch Lask should be selecLed as Lhe vlcLlm, and
how should Lhe appllcaLlon be noLlfled?
WhaL acLlons should be Laken before or afLer execuLlng a Lask?
LxecuLlon pollcles are a resource managemenL Lool, and Lhe opLlmal pollcy depends on Lhe avallable compuLlng
resources and your quallLyofservlce requlremenLs. 8y llmlLlng Lhe number of concurrenL Lasks, you can ensure LhaL Lhe
appllcaLlon does noL fall due Lo resource exhausLlon or suffer performance problems due Lo conLenLlon for scarce
resources.
[3]
SeparaLlng Lhe speclflcaLlon of execuLlon pollcy from Lask submlsslon makes lL pracLlcal Lo selecL an
execuLlon pollcy aL deploymenL Llme LhaL ls maLched Lo Lhe avallable hardware.
[3] 1hls ls analogous Lo one of Lhe roles of a LransacLlon monlLor ln an enLerprlse appllcaLlon: lL can LhroLLle Lhe raLe aL whlch LransacLlons are
allowed Lo proceed so as noL Lo exhausL or oversLress llmlLed resources.
Whenever you see code of Lhe form:
"&? =89&$;U9)""$M0&V+#.$9.UV


76 !ava Concurrency ln racLlce
and you Lhlnk you mlghL aL some polnL wanL a more flexlble execuLlon pollcy, serlously conslder replaclng lL wlLh Lhe use
of an L-&*).29.
WADALA J:)*"0 -%%?3
A Lhread pool, as lLs name suggesLs, manages a homogeneous pool of worker Lhreads. A Lhread pool ls LlghLly bound Lo
a work queue holdlng Lasks walLlng Lo be execuLed. Worker Lhreads have a slmple llfe: requesL Lhe nexL Lask from Lhe
work queue, execuLe lL, and go back Lo walLlng for anoLher Lask.
LxecuLlng Lasks ln pool Lhreads has a number of advanLages over Lhe LhreadperLask approach. 8euslng an exlsLlng
Lhread lnsLead of creaLlng a new one amorLlzes Lhread creaLlon and Leardown cosLs over mulLlple requesLs. As an added
bonus, slnce Lhe worker Lhread ofLen already exlsLs aL Lhe Llme Lhe requesL arrlves, Lhe laLency assoclaLed wlLh Lhread
creaLlon does noL delay Lask execuLlon, Lhus lmprovlng responslveness. 8y properly Lunlng Lhe slze of Lhe Lhread pool,
you can have enough Lhreads Lo keep Lhe processors busy whlle noL havlng so many LhaL your appllcaLlon runs ouL of
memory or Lhrashes due Lo compeLlLlon among Lhreads for resources.
1he class llbrary provldes a flexlble Lhread pool lmplemenLaLlon along wlLh some useful predeflned conflguraLlons. ?ou
can creaLe a Lhread pool by calllng one of Lhe sLaLlc facLory meLhods ln L-&*).29#:
"&?<4-&;=89&$;@220. A flxedslze Lhread pool creaLes Lhreads as Lasks are submlLLed, up Lo Lhe maxlmum pool slze,
and Lhen aLLempLs Lo keep Lhe pool slze consLanL (addlng new Lhreads lf a Lhread dles due Lo an unexpecLed
L-*&G.42").
"&?D$*8&;=89&$;@220. A cached Lhread pool has more flexlblllLy Lo reap ldle Lhreads when Lhe currenL slze of Lhe pool
exceeds Lhe demand for processlng, and Lo add new Lhreads when demand lncreases, buL places no bounds on Lhe slze
of Lhe pool.
"&?'4"60&=89&$;L-&*).29. A slngleLhreaded execuLor creaLes a slngle worker Lhread Lo process Lasks, replaclng lL lf lL
dles unexpecLedly. 1asks are guaranLeed Lo be processed sequenLlally accordlng Lo Lhe order lmposed by Lhe Lask queue
(lllC, LllC, prlorlLy order).
[4]

[4] SlngleLhreaded execuLors also provlde sufflclenL lnLernal synchronlzaLlon Lo guaranLee LhaL any memory wrlLes made by Lasks are vlslble Lo
subsequenL Lasks, Lhls means LhaL ob[ecLs can be safely conflned Lo Lhe "Lask Lhread" even Lhough LhaL Lhread may be replaced wlLh anoLher from
Llme Lo Llme.
"&?'*8&;)0&;=89&$;@220. A flxedslze Lhread pool LhaL supporLs delayed and perlodlc Lask execuLlon, slmllar Lo =43&9.
(See SecLlon 6.2.3.)
1he "&?<4-&;=89&$;@220 and "&?D$*8&;=89&$;@220 facLorles reLurn lnsLances of Lhe generalpurpose
=89&$;@220L-&*).29, whlch can also be used dlrecLly Lo consLrucL more speclallzed execuLors. We dlscuss Lhread pool
conflguraLlon opLlons ln depLh ln ChapLer 8.
1he web server ln =$#CL-&*).42"P&M'&9/&9 uses an L-&*).29 wlLh a bounded pool of worker Lhreads. SubmlLLlng a
Lask wlLh &-&*).& adds Lhe Lask Lo Lhe work queue, and Lhe worker Lhreads repeaLedly dequeue Lasks from Lhe work
queue and execuLe Lhem.
SwlLchlng from a LhreadperLask pollcy Lo a poolbased pollcy has a blg effecL on appllcaLlon sLablllLy: Lhe web server
wlll no longer fall under heavy load.
[3]
lL also degrades more gracefully, slnce lL does noL creaLe Lhousands of Lhreads
LhaL compeLe for llmlLed Cu and memory resources. And uslng an L-&*).29 opens Lhe door Lo all sorLs of addlLlonal
opporLunlLles for Lunlng, managemenL, monlLorlng, logglng, error reporLlng, and oLher posslblllLles LhaL would have
been far more dlfflculL Lo add wlLhouL a Lask execuLlon framework.
[3] Whlle Lhe server may noL fall due Lo Lhe creaLlon of Loo many Lhreads, lf Lhe Lask arrlval raLe exceeds Lhe Lask servlce raLe for long enough lL ls
sLlll posslble ([usL harder) Lo run ouL of memory because of Lhe growlng queue of N)""$M0&# awalLlng execuLlon. 1hls can be addressed wlLhln
Lhe L-&*).29 framework by uslng a bounded work queue see SecLlon 8.3.2.
WADAMA =1*'(.%) 2/6*'+'?*
We've seen how Lo creaLe an L-&*).29 buL noL how Lo shuL one down. An L-&*).29 lmplemenLaLlon ls llkely Lo creaLe
Lhreads for processlng Lasks. 8uL Lhe !vM can'L exlL unLll all Lhe (nondaemon) Lhreads have LermlnaLed, so falllng Lo
shuL down an L-&*).29 could prevenL Lhe !vM from exlLlng.
8ecause an L-&*).29 processes Lasks asynchronously, aL any glven Llme Lhe sLaLe of prevlously submlLLed Lasks ls noL
lmmedlaLely obvlous. Some may have compleLed, some may be currenLly runnlng, and oLhers may be queued awalLlng
execuLlon. ln shuLLlng down an appllcaLlon, Lhere ls a specLrum from graceful shuLdown (flnlsh whaL you've sLarLed buL
don'L accepL any new work) Lo abrupL shuLdown (Lurn off Lhe power Lo Lhe machlne room), and varlous polnLs ln

77 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 188ChapLer 6. 1ask LxecuLlon
beLween. Slnce L-&*).29s provlde a servlce Lo appllcaLlons, Lhey should be able Lo be shuL down as well, boLh gracefully
and abrupLly, and feedback lnformaLlon Lo Lhe appllcaLlon abouL Lhe sLaLus of Lasks LhaL were affecLed by Lhe shuLdown.
1o address Lhe lssue of execuLlon servlce llfecycle, Lhe L-&*).29'&9/4*& lnLerface exLends L-&*).29, addlng a number
of meLhods for llfecycle managemenL (as well as some convenlence meLhods for Lask submlsslon). 1he llfecycle
managemenL meLhods of L-&*).29'&9/4*& are shown ln LlsLlng 6.7.
2/3./&4 WAZA 2/6*'+'?* K*.:%03 /& L-&*).29'&9/4*&A
G)M04* 4".&9%$*& L-&*).29'&9/4*& &-.&";# L-&*).29 W
/24; #8).;2?"UV[
54#.RN)""$M0&T #8).;2?",2?UV[
M220&$" 4#'8).;2?"UV[
M220&$" 4#=&934"$.&;UV[
M220&$" $?$4.=&934"$.42"U02"6 .43&2).e =43&!"4. )"4.V
.892?# I".&99)G.&;L-*&G.42"[
XX +++ $;;4.42"$0 *2"/&"4&"*& 3&.82;# %29 .$#C #)M34##42"
\
1he llfecycle lmplled by L-&*).29'&9/4*& has Lhree sLaLes runnlng, shuLLlng down, and LermlnaLed.
L-&*).29'&9/4*&s are lnlLlally creaLed ln Lhe runnlng sLaLe. 1he #8).;2?" meLhod lnlLlaLes a graceful shuLdown: no
new Lasks are accepLed buL prevlously submlLLed Lasks are allowed Lo compleLe lncludlng Lhose LhaL have noL yeL
begun execuLlon. 1he #8).;2?",2? meLhod lnlLlaLes an abrupL shuLdown: lL aLLempLs Lo cancel ouLsLandlng Lasks and
does noL sLarL any Lasks LhaL are queued buL noL begun.
1asks submlLLed Lo an L-&*).29'&9/4*& afLer lL has been shuL down are handled by Lhe re[ecLed execuLlon handler (see
SecLlon 8.3.3), whlch mlghL sllenLly dlscard Lhe Lask or mlghL cause &-&*).& Lo Lhrow Lhe unchecked
N&`&*.&;L-&*).42"L-*&G.42". Cnce all Lasks have compleLed, Lhe L-&*).29'&9/4*& LranslLlons Lo Lhe LermlnaLed
sLaLe. ?ou can walL for an L-&*).29'&9/4*& Lo reach Lhe LermlnaLed sLaLe wlLh $?$4.=&934"$.42", or poll for wheLher
lL has yeL LermlnaLed wlLh 4#=&934"$.&;. lL ls common Lo follow #8).;2?" lmmedlaLely by $?$4.=&934"$.42", creaLlng
Lhe effecL of synchronously shuLLlng down Lhe L-&*).29'&9/4*&.(L-&*).29 shuLdown and Lask cancellaLlon are
covered ln more deLall ln ChapLer 7.)
54%&*7*0&P&M'&9/&9 ln LlsLlng 6.8 exLends our web server wlLh llfecycle supporL. lL can be shuL down ln Lwo ways:
programmaLlcally by calllng #.2G, and Lhrough a cllenL requesL by sendlng Lhe web server a speclally formaLLed P11
requesL.
2/3./&4 WA\A B*Y C*)#*) 8/.: C:(.0%8& C(>>%).A
*0$## 54%&*7*0&P&M'&9/&9 W
G94/$.& %4"$0 L-&*).29'&9/4*& &-&* ] +++[

G)M04* /24; #.$9.UV .892?# IJL-*&G.42" W
'&9/&9'2*C&. #2*C&. ] "&? '&9/&9'2*C&.UtZV[
?840& UY&-&*+4#'8).;2?"UVV W
.97 W
%4"$0 '2*C&. *2"" ] #2*C&.+$**&G.UV[
&-&*+&-&*).&U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W 8$";0&N&()&#.U*2""V[ \
\V[
\ *$.*8 UN&`&*.&;L-&*).42"L-*&G.42" &V W
4% UY&-&*+4#'8).;2?"UVV
026Ud.$#C #)M34##42" 9&`&*.&;de &V[
\
\
\

G)M04* /24; #.2GUV W &-&*+#8).;2?"UV[ \

/24; 8$";0&N&()&#.U'2*C&. *2""&*.42"V W
N&()&#. 9&( ] 9&$;N&()&#.U*2""&*.42"V[
4% U4#'8).;2?"N&()&#.U9&(VV
#.2GUV[
&0#&
;4#G$.*8N&()&#.U9&(V[
\
\
WADAVA [*?"+*0 "&0 -*)/%0/' J"3<3
1he =43&9 faclllLy manages Lhe execuLlon of deferred ("run Lhls Lask ln 100 ms") and perlodlc ("run Lhls Lask every 10
ms") Lasks. Powever, =43&9 has some drawbacks, and '*8&;)0&;=89&$;@220L-&*).29 should be LhoughL of as lLs
replacemenL.
[6]
?ou can consLrucL a '*8&;)0&;=89&$;@220L-&*).29 Lhrough lLs consLrucLor or Lhrough Lhe
"&?'*8&;)0&;=89&$;@220 facLory.

78 !ava Concurrency ln racLlce
[6] 1lmer does have supporL for schedullng based on absoluLe, noL relaLlve Llme, so LhaL Lasks can be senslLlve Lo changes ln Lhe sysLem clock,
'*8&;)0&;=89&$;@220L-&*).29 supporLs only relaLlve Llme.
A =43&9 creaLes only a slngle Lhread for execuLlng Llmer Lasks. lf a Llmer Lask Lakes Loo long Lo run, Lhe Llmlng accuracy
of oLher =43&9=$#Cs can suffer. lf a recurrlng =43&9=$#C ls scheduled Lo run every 10 ms and anoLher =43&9a=$#C Lakes
40 ms Lo run, Lhe recurrlng Lask elLher (dependlng on wheLher lL was scheduled aL flxed raLe or flxed delay) geLs called
four Llmes ln rapld successlon afLer Lhe longrunnlng Lask compleLes, or "mlsses" four lnvocaLlons compleLely. Scheduled
Lhread pools address Lhls llmlLaLlon by leLLlng you provlde mulLlple Lhreads for execuLlng deferred and perlodlc Lasks.
AnoLher problem wlLh =43&9 ls LhaL lL behaves poorly lf a =43&9=$#C Lhrows an unchecked excepLlon. 1he =43&9 Lhread
doesn'L caLch Lhe excepLlon, so an unchecked excepLlon Lhrown from a =43&9=$#C LermlnaLes Lhe Llmer Lhread. =43&9
also doesn'L resurrecL Lhe Lhread ln Lhls slLuaLlon, lnsLead, lL erroneously assumes Lhe enLlre =43&9 was cancelled. ln Lhls
case, =43&9=$#Cs LhaL are already scheduled buL noL yeL execuLed are never run, and new Lasks cannoL be scheduled.
(1hls problem, called "Lhread leakage" ls descrlbed ln SecLlon 7.3, along wlLh Lechnlques for avoldlng lL.)
J).J%=43& ln LlsLlng 6.9 lllusLraLes how a =43&9 can become confused ln Lhls manner and, as confuslon loves company,
how Lhe =43&9 shares lLs confuslon wlLh Lhe nexL hapless caller LhaL Lrles Lo submlL a =43&9=$#C. ?ou mlghL expecL Lhe
program Lo run for slx seconds and exlL, buL whaL acLually happens ls LhaL lL LermlnaLes afLer one second wlLh an
I00&6$0'.$.&L-*&G.42" whose message LexL ls "1lmer already cancelled". '*8&;)0&;=89&$;@220L-&*).29 deals
properly wlLh lllbehaved Lasks, Lhere ls llLLle reason Lo use =43&9 ln !ava 3.0 or laLer.
lf you need Lo bulld your own schedullng servlce, you may sLlll be able Lo Lake advanLage of Lhe llbrary by uslng a
A&0$7O)&)&, a K02*C4"6O)&)& lmplemenLaLlon LhaL provldes Lhe schedullng funcLlonallLy of
'*8&;)0&;=89&$;@220L-&*).29. A A&0$7O)&)& manages a collecLlon of A&0$7&; ob[ecLs. A A&0$7&; has a delay Llme
assoclaLed wlLh lL: A&0$7O)&)& leLs you .$C& an elemenL only lf lLs delay has explred. Cb[ecLs are reLurned from a
A&0$7O)&)& ordered by Lhe Llme assoclaLed wlLh Lhelr delay.
WALA S/&0/&4 =1>?%/."Y?* -")"??*?/35
1he L-&*).29 framework makes lL easy Lo speclfy an execuLlon pollcy, buL ln order Lo use an L-&*).29, you have Lo be
able Lo descrlbe your Lask as a N)""$M0&. ln mosL server appllcaLlons, Lhere ls an obvlous Lask boundary: a slngle cllenL
requesL. 8uL someLlmes good Lask boundarles are noL qulLe so obvlous, as ln many deskLop appllcaLlons. 1here may also
be explolLable parallellsm wlLhln a slngle cllenL requesL ln server appllcaLlons, as ls someLlmes Lhe case ln daLabase
servers. (lor a furLher dlscusslon of Lhe compeLlng deslgn forces ln chooslng Lask boundarles, see [C! 4.4.1.1].)
2/3./&4 WA^A $?"33 ,??(3.)"./&4 $%&6(3/&4 =43&9 ;*:"#/%)A

G)M04* *0$## J).J%=43& W
G)M04* #.$.4* /24; 3$4"U'.94"6fg $96#V .892?# L-*&G.42" W
=43&9 .43&9 ] "&? =43&9UV[
.43&9+#*8&;)0&U"&? =892?=$#CUVe ^V[
'LDJ,A'+#0&&GU^V[
.43&9+#*8&;)0&U"&? =892?=$#CUVe ^V[
'LDJ,A'+#0&&GUpV[
\

#.$.4* *0$## =892?=$#C &-.&";# =43&9=$#C W
G)M04* /24; 9)"UV W .892? "&? N)".43&L-*&G.42"UV[ \
\
\
ln Lhls secLlon we develop several verslons of a componenL LhaL admlL varylng degrees of concurrency. Cur sample
componenL ls Lhe pagerenderlng porLlon of a browser appllcaLlon, whlch Lakes a page of P1ML and renders lL lnLo an
lmage buffer. 1o keep lL slmple, we assume LhaL Lhe P1ML conslsLs only of marked up LexL lnLerspersed wlLh lmage
elemenLs wlLh prespeclfled dlmenslons and u8Ls.
WALA@A =1"5>?*T C*Q(*&./"? -"4* N*&0*)*)
1he slmplesL approach ls Lo process Lhe P1ML documenL sequenLlally. As LexL markup ls encounLered, render lL lnLo Lhe
lmage buffer, as lmage references are encounLered, feLch Lhe lmage over Lhe neLwork and draw lL lnLo Lhe lmage buffer
as well. 1hls ls easy Lo lmplemenL and requlres Louchlng each elemenL of Lhe lnpuL only once (lL doesn'L even requlre
bufferlng Lhe documenL), buL ls llkely Lo annoy Lhe user, who may have Lo walL a long Llme before all Lhe LexL ls
rendered.

79 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 188ChapLer 6. 1ask LxecuLlon
A less annoylng buL sLlll sequenLlal approach lnvolves renderlng Lhe LexL elemenLs flrsL, leavlng recLangular placeholders
for Lhe lmages, and afLer compleLlng Lhe lnlLlal pass on Lhe documenL, golng back and downloadlng Lhe lmages and
drawlng Lhem lnLo Lhe assoclaLed placeholder. 1hls approach ls shown ln '4"60&=89&$;N&";&9&9 ln LlsLlng 6.10.
uownloadlng an lmage mosLly lnvolves walLlng for l/C Lo compleLe, and durlng Lhls Llme Lhe Cu does llLLle work. So Lhe
sequenLlal approach may underuLlllze Lhe Cu, and also makes Lhe user walL longer Lhan necessary Lo see Lhe flnlshed
page. We can achleve beLLer uLlllzaLlon and responslveness by breaklng Lhe problem lnLo lndependenL Lasks LhaL can
execuLe concurrenLly.
2/3./&4 WA@_A N*&0*)/&4 -"4* =?*5*&.3 C*Q(*&./"??+A

G)M04* *0$## '4"60&=89&$;N&";&9&9 W
/24; 9&";&9@$6&UD8$9'&()&"*& #2)9*&V W
9&";&9=&-.U#2)9*&V[
54#.RI3$6&A$.$T 43$6&A$.$ ] "&? 199$754#.RI3$6&A$.$TUV[
%29 UI3$6&I"%2 43$6&I"%2 h #*$"<29I3$6&I"%2U#2)9*&VV
43$6&A$.$+$;;U43$6&I"%2+;2?"02$;I3$6&UVV[
%29 UI3$6&A$.$ ;$.$ h 43$6&A$.$V
9&";&9I3$6&U;$.$V[
\
\
WALADA N*3(?.Y*")/&4 J"3<3T D$00$M0& "&0 <).)9&
1he L-&*).29 framework uses N)""$M0& as lLs baslc Lask represenLaLlon. N)""$M0& ls a falrly llmlLlng absLracLlon, 9)"
cannoL reLurn a value or Lhrow checked excepLlons, alLhough lL can have slde effecLs such as wrlLlng Lo a log flle or
placlng a resulL ln a shared daLa sLrucLure.
Many Lasks are effecLlvely deferred compuLaLlons execuLlng a daLabase query, feLchlng a resource over Lhe neLwork,
or compuLlng a compllcaLed funcLlon. lor Lhese Lypes of Lasks, D$00$M0& ls a beLLer absLracLlon: lL expecLs LhaL Lhe maln
enLry polnL, *$00, wlll reLurn a value and anLlclpaLes LhaL lL mlghL Lhrow an excepLlon.
[7]
L-&*).29# lncludes several
uLlllLy meLhods for wrapplng oLher Lypes of Lasks, lncludlng N)""$M0& and `$/$+#&*)94.7+@94/40&6&;1*.42", wlLh a
D$00$M0&.
[7] 1o express a nonvaluereLurnlng Lask wlLh D$00$M0&, use D$00$M0&RB24;T.
N)""$M0& and D$00$M0& descrlbe absLracL compuLaLlonal Lasks. 1asks are usually flnlLe: Lhey have a clear sLarLlng polnL
and Lhey evenLually LermlnaLe. 1he llfecycle of a Lask execuLed by an L-&*).29 has four phases: creaLed, submlLLed,
sLarLed, and compleLed. Slnce Lasks can Lake a long Llme Lo run, we also wanL Lo be able Lo cancel a Lask. ln Lhe
L-&*).29 framework, Lasks LhaL have been submlLLed buL noL yeL sLarLed can always be cancelled, and Lasks LhaL have
sLarLed can someLlmes be cancelled lf Lhey are responslve Lo lnLerrupLlon. Cancelllng a Lask LhaL has already compleLed
has no effecL. (CancellaLlon ls covered ln greaLer deLall ln ChapLer 7.)
<).)9& represenLs Lhe llfecycle of a Lask and provldes meLhods Lo LesL wheLher Lhe Lask has compleLed or been
cancelled, reLrleve lLs resulL, and cancel Lhe Lask. D$00$M0& and <).)9& are shown ln LlsLlng 6.11. lmpllclL ln Lhe
speclflcaLlon of <).)9& ls LhaL Lask llfecycle can only move forwards, noL backwards [usL llke Lhe L-&*).29'&9/4*&
llfecycle. Cnce a Lask ls compleLed, lL sLays ln LhaL sLaLe forever.
1he behavlor of 6&. varles dependlng on Lhe Lask sLaLe (noL yeL sLarLed, runnlng, compleLed). lL reLurns lmmedlaLely or
Lhrows an L-*&G.42" lf Lhe Lask has already compleLed, buL lf noL lL blocks unLll Lhe Lask compleLes. lf Lhe Lask
compleLes by Lhrowlng an excepLlon, 6&. reLhrows lL wrapped ln an L-&*).42"L-*&G.42", lf lL was cancelled, 6&.
Lhrows D$"*&00$.42"L-*&G.42". lf 6&. Lhrows L-&*).42"L-*&G.42", Lhe underlylng excepLlon can be reLrleved wlLh
6&.D$)#&.

80 !ava Concurrency ln racLlce
2/3./&4 WA@@A D$00$M0& "&0 <).)9& ,&.*)6"'*3A
G)M04* 4".&9%$*& D$00$M0&RBT W
B *$00UV .892?# L-*&G.42"[
\

G)M04* 4".&9%$*& <).)9&RBT W
M220&$" *$"*&0UM220&$" 3$7I".&99)G.I%N)""4"6V[
M220&$" 4#D$"*&00&;UV[
M220&$" 4#A2"&UV[
B 6&.UV .892?# I".&99)G.&;L-*&G.42"e L-&*).42"L-*&G.42"e
D$"*&00$.42"L-*&G.42"[
B 6&.U02"6 .43&2).e =43&!"4. )"4.V
.892?# I".&99)G.&;L-*&G.42"e L-&*).42"L-*&G.42"e
D$"*&00$.42"L-*&G.42"e =43&2).L-*&G.42"[
\
1here are several ways Lo creaLe a <).)9& Lo descrlbe a Lask. 1he #)M34. meLhods ln L-&*).29'&9/4*& all reLurn a
<).)9&, so LhaL you can submlL a N)""$M0& or a D$00$M0& Lo an execuLor and geL back a <).)9& LhaL can be used Lo
reLrleve Lhe resulL or cancel Lhe Lask. ?ou can also expllclLly lnsLanLlaLe a <).)9&=$#C for a glven N)""$M0& or D$00$M0&.
(8ecause <).)9&=$#C lmplemenLs N)""$M0&, lL can be submlLLed Lo an L-&*).29 for execuLlon or execuLed dlrecLly by
calllng lLs 9)" meLhod.)
As of !ava 6, L-&*).29'&9/4*& lmplemenLaLlons can overrlde "&?=$#C<29 ln 1M#.9$*.L-&*).29'&9/4*& Lo conLrol
lnsLanLlaLlon of Lhe <).)9& correspondlng Lo a submlLLed D$00$M0& or N)""$M0&. 1he defaulL lmplemenLaLlon [usL
creaLes a new <).)9&=$#C, as shown ln LlsLlng 6.12.
2/3./&4 WA@DA [*6"(?. ,5>?*5*&."./%& %6 "&?=$#C<29 /& =89&$;@220L-&*).29A
G92.&*.&; R=T N)""$M0&<).)9&R=T "&?=$#C<29UD$00$M0&R=T .$#CV W
9&.)9" "&? <).)9&=$#CR=TU.$#CV[
\
SubmlLLlng a N)""$M0& or D$00$M0& Lo an L-&*).29 consLlLuLes a safe publlcaLlon (see SecLlon 3.3) of Lhe N)""$M0& or
D$00$M0& from Lhe submlLLlng Lhread Lo Lhe Lhread LhaL wlll evenLually execuLe Lhe Lask. Slmllarly, seLLlng Lhe resulL
value for a <).)9& consLlLuLes a safe publlcaLlon of Lhe resulL from Lhe Lhread ln whlch lL was compuLed Lo any Lhread
LhaL reLrleves lL vla 6&..
WALALA =1"5>?*T -"4* N*&0*)*) 8/.: S(.()*
As a flrsL sLep Lowards maklng Lhe page renderer more concurrenL, leL's dlvlde lL lnLo Lwo Lasks, one LhaL renders Lhe
LexL and one LhaL downloads all Lhe lmages. (8ecause one Lask ls largely Cubound and Lhe oLher ls largely l/Cbound,
Lhls approach may yleld lmprovemenLs even on slngleCu sysLems.)
D$00$M0& and <).)9& can help us express Lhe lnLeracLlon beLween Lhese cooperaLlng Lasks. ln <).)9&N&";&9&9 ln
LlsLlng 6.13, we creaLe a D$00$M0& Lo download all Lhe lmages, and submlL lL Lo an L-&*).29'&9/4*&. 1hls reLurns a
<).)9& descrlblng Lhe Lask's execuLlon, when Lhe maln Lask geLs Lo Lhe polnL where lL needs Lhe lmages, lL walLs for Lhe
resulL by calllng <).)9&+6&.. lf we're lucky, Lhe resulLs wlll already be ready by Lhe Llme we ask, oLherwlse, aL leasL we
goL a head sLarL on downloadlng Lhe lmages.
1he sLaLedependenL naLure of 6&. means LhaL Lhe caller need noL be aware of Lhe sLaLe of Lhe Lask, and Lhe safe
publlcaLlon properLles of Lask submlsslon and resulL reLrleval make Lhls approach Lhreadsafe. 1he excepLlon handllng
code surroundlng <).)9&+6&. deals wlLh Lwo posslble problems: LhaL Lhe Lask encounLered an L-*&G.42", or Lhe Lhread
calllng 6&. was lnLerrupLed before Lhe resulLs were avallable. (See SecLlons 3.3.2 and 3.4.)
<).)9&N&";&9&9 allows Lhe LexL Lo be rendered concurrenLly wlLh downloadlng Lhe lmage daLa. When all Lhe lmages
are downloaded, Lhey are rendered onLo Lhe page. 1hls ls an lmprovemenL ln LhaL Lhe user sees a resulL qulckly and lL
explolLs some parallellsm, buL we can do conslderably beLLer. 1here ls no need for users Lo walL for all Lhe lmages Lo be
downloaded, Lhey would probably prefer Lo see lndlvldual lmages drawn as Lhey become avallable.
WALAMA 2/5/."./%&3 %6 -")"??*?/O/&4 7*.*)%4*&*%(3 J"3<3
ln Lhe lasL example, we Lrled Lo execuLe Lwo dlfferenL Lypes of Lasks ln parallel downloadlng Lhe lmages and renderlng
Lhe page. 8uL obLalnlng slgnlflcanL performance lmprovemenLs by Lrylng Lo parallellze sequenLlal heLerogeneous Lasks
can be Lrlcky.
1wo people can dlvlde Lhe work of cleanlng Lhe dlnner dlshes falrly effecLlvely: one person washes whlle Lhe oLher drles.
Powever, asslgnlng a dlfferenL Lype of Lask Lo each worker does noL scale well, lf several more people show up, lL ls noL
obvlous how Lhey can help wlLhouL geLLlng ln Lhe way or slgnlflcanLly resLrucLurlng Lhe dlvlslon of labor. WlLhouL flndlng
flnergralned parallellsm among slmllar Lasks, Lhls approach wlll yleld dlmlnlshlng reLurns.

81 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 188ChapLer 6. 1ask LxecuLlon
A furLher problem wlLh dlvldlng heLerogeneous Lasks among mulLlple workers ls LhaL Lhe Lasks may have dlsparaLe slzes.
lf you dlvlde Lasks A and 8 beLween Lwo workers buL A Lakes Len Llmes as long as 8, you've only speeded up Lhe LoLal
process by 9. llnally, dlvldlng a Lask among mulLlple workers always lnvolves some amounL of coordlnaLlon overhead,
for Lhe dlvlslon Lo be worLhwhlle, Lhls overhead musL be more Lhan compensaLed by producLlvlLy lmprovemenLs due Lo
parallellsm.
<).)9&N&";&9&9 uses Lwo Lasks: one for renderlng LexL and one for downloadlng Lhe lmages. lf renderlng Lhe LexL ls
much fasLer Lhan downloadlng Lhe lmages, as ls enLlrely posslble, Lhe resulLlng performance ls noL much dlfferenL from
Lhe sequenLlal verslon, buL Lhe code ls a loL more compllcaLed. And Lhe besL we can do wlLh Lwo Lhreads ls speed Lhlngs
up by a facLor of Lwo. 1hus, Lrylng Lo lncrease concurrency by parallellzlng heLerogeneous acLlvlLles can be a loL of work,
and Lhere ls a llmlL Lo how much addlLlonal concurrency you can geL ouL of lL. (See SecLlons 11.4.2 and 11.4.3 for
anoLher example of Lhe same phenomenon.)
2/3./&4 WA@LA B"/./&4 6%) ,5"4* [%8&?%"0 8/.: <).)9&A

G)M04* *0$## <).)9&N&";&9&9 W
G94/$.& %4"$0 L-&*).29'&9/4*& &-&*).29 ] +++[

/24; 9&";&9@$6&UD8$9'&()&"*& #2)9*&V W
%4"$0 54#.RI3$6&I"%2T 43$6&I"%2# ] #*$"<29I3$6&I"%2U#2)9*&V[
D$00$M0&R54#.RI3$6&A$.$TT .$#C ]
"&? D$00$M0&R54#.RI3$6&A$.$TTUV W
G)M04* 54#.RI3$6&A$.$T *$00UV W
54#.RI3$6&A$.$T 9&#)0.
] "&? 199$754#.RI3$6&A$.$TUV[
%29 UI3$6&I"%2 43$6&I"%2 h 43$6&I"%2#V
9&#)0.+$;;U43$6&I"%2+;2?"02$;I3$6&UVV[
9&.)9" 9&#)0.[
\
\[

<).)9&R54#.RI3$6&A$.$TT %).)9& ] &-&*).29+#)M34.U.$#CV[
9&";&9=&-.U#2)9*&V[

.97 W
54#.RI3$6&A$.$T 43$6&A$.$ ] %).)9&+6&.UV[
%29 UI3$6&A$.$ ;$.$ h 43$6&A$.$V
9&";&9I3$6&U;$.$V[
\ *$.*8 UI".&99)G.&;L-*&G.42" &V W
XX N&a$##&9. .8& .89&$;m# 4".&99)G.&; #.$.)#
=89&$;+*)99&".=89&$;UV+4".&99)G.UV[
XX P& ;2"m. "&&; .8& 9&#)0.e #2 *$"*&0 .8& .$#C .22
%).)9&+*$"*&0U.9)&V[
\ *$.*8 UL-&*).42"L-*&G.42" &V W
.892? 0$)";&9=892?$M0&U&+6&.D$)#&UVV[
\
\
\

1he real performance payoff of dlvldlng a program's workload lnLo Lasks comes when Lhere are a large number of
lndependenL, homogeneous Lasks LhaL can be processed concurrenLly.
WALAVA D23G0&.42"'&9/4*&T =1*'(.%) K**.3 K02*C4"6O)&)&
lf you have a baLch of compuLaLlons Lo submlL Lo an L-&*).29 and you wanL Lo reLrleve Lhelr resulLs as Lhey become
avallable, you could reLaln Lhe <).)9& assoclaLed wlLh each Lask and repeaLedly poll for compleLlon by calllng 6&. wlLh a
LlmeouL of zero. 1hls ls posslble, buL Ledlous. lorLunaLely Lhere ls a beLLer way: a compleLlon servlce.
D23G0&.42"'&9/4*& comblnes Lhe funcLlonallLy of an L-&*).29 and a K02*C4"6O)&)&. ?ou can submlL D$00$M0& Lasks
Lo lL for execuLlon and use Lhe queuellke meLhods .$C& and G200 Lo reLrleve compleLed resulLs, packaged as <).)9&s,
as Lhey become avallable. L-&*).29D23G0&.42"'&9/4*& lmplemenLs D23G0&.42"'&9/4*&, delegaLlng Lhe compuLaLlon
Lo an L-&*).29.
1he lmplemenLaLlon of L-&*).29D23G0&.42"'&9/4*& ls qulLe sLralghLforward. 1he consLrucLor creaLes a
K02*C4"6O)&)& Lo hold Lhe compleLed resulLs. <).)9&a=$#C has a ;2"& meLhod LhaL ls called when Lhe compuLaLlon
compleLes. When a Lask ls submlLLed, lL ls wrapped wlLh a O)&)&4"6<).)9&, a subclass of <).)9&=$#C LhaL overrldes
;2"& Lo place Lhe resulL on Lhe K02*C4"6O)&)&, as shown ln LlsLlng 6.14. 1he .$C& and G200 meLhods delegaLe Lo Lhe
K02*C4"6O)&)&, blocklng lf resulLs are noL yeL avallable.

82 !ava Concurrency ln racLlce
2/3./&4 WA@MA O)&)&4"6<).)9& $?"33 93*0 ;+ L-&*).29D23G0&.42"'&9/4*&A
G94/$.& *0$## O)&)&4"6<).)9&RBT &-.&";# <).)9&=$#CRBT W
O)&)&4"6<).)9&UD$00$M0&RBT *V W #)G&9U*V[ \
O)&)&4"6<).)9&UN)""$M0& .e B 9V W #)G&9U.e 9V[ \

G92.&*.&; /24; ;2"&UV W
*23G0&.42"O)&)&+$;;U.84#V[
\
\
WALAWA =1"5>?*T -"4* N*&0*)*) 8/.: D23G0&.42"'&9/4*&
We can use a D23G0&.42"'&9/4*& Lo lmprove Lhe performance of Lhe page renderer ln Lwo ways: shorLer LoLal runLlme
and lmproved responslveness. We can creaLe a separaLe Lask for downloadlng each lmage and execuLe Lhem ln a Lhread
pool, Lurnlng Lhe sequenLlal download lnLo a parallel one: Lhls reduces Lhe amounL of Llme Lo download all Lhe lmages.
And by feLchlng resulLs from Lhe D23G0&.42"'&9/4*& and renderlng each lmage as soon as lL ls avallable, we can glve
Lhe user a more dynamlc and responslve user lnLerface. 1hls lmplemenLaLlon ls shown ln N&";&9&9 ln LlsLlng 6.13.
2/3./&4 WA@VA 93/&4 D23G0&.42"'&9/4*& .% N*&0*) -"4* =?*5*&.3 "3 .:*+ ;*'%5* F#"/?"Y?*A
G)M04* *0$## N&";&9&9 W
G94/$.& %4"$0 L-&*).29'&9/4*& &-&*).29[

N&";&9&9UL-&*).29'&9/4*& &-&*).29V W .84#+&-&*).29 ] &-&*).29[ \

/24; 9&";&9@$6&UD8$9'&()&"*& #2)9*&V W
%4"$0 54#.RI3$6&I"%2T 4"%2 ] #*$"<29I3$6&I"%2U#2)9*&V[
D23G0&.42"'&9/4*&RI3$6&A$.$T *23G0&.42"'&9/4*& ]
"&? L-&*).29D23G0&.42"'&9/4*&RI3$6&A$.$TU&-&*).29V[
%29 U%4"$0 I3$6&I"%2 43$6&I"%2 h 4"%2V
*23G0&.42"'&9/4*&+#)M34.U"&? D$00$M0&RI3$6&A$.$TUV W
G)M04* I3$6&A$.$ *$00UV W
9&.)9" 43$6&I"%2+;2?"02$;I3$6&UV[
\
\V[

9&";&9=&-.U#2)9*&V[

.97 W
%29 U4". . ] Ze " ] 4"%2+#4:&UV[ . R "[ .__V W
<).)9&RI3$6&A$.$T % ] *23G0&.42"'&9/4*&+.$C&UV[
I3$6&A$.$ 43$6&A$.$ ] %+6&.UV[
9&";&9I3$6&U43$6&A$.$V[
\
\ *$.*8 UI".&99)G.&;L-*&G.42" &V W
=89&$;+*)99&".=89&$;UV+4".&99)G.UV[
\ *$.*8 UL-&*).42"L-*&G.42" &V W
.892? 0$)";&9=892?$M0&U&+6&.D$)#&UVV[
\
\
\
MulLlple L-&*).29D23G0&.42"'&9/4*&# can share a slngle L-&*).29, so lL ls perfecLly senslble Lo creaLe an
L-&*).29D23G0&.42"'&9/4*& LhaL ls prlvaLe Lo a parLlcular compuLaLlon whlle sharlng a common L-&*).29. When used
ln Lhls way, a D23G0&.42"'&9/4*& acLs as a handle for a baLch of compuLaLlons ln much Lhe same way LhaL a <).)9&
acLs as a handle for a slngle compuLaLlon. 8y rememberlng how many Lasks were submlLLed Lo Lhe D23G0&.42"'&9/4*&
and counLlng how many compleLed resulLs are reLrleved, you can know when all Lhe resulLs for a glven baLch have been
reLrleved, even lf you use a shared L-&*).29.
WALAZA -?"'/&4 J/5* 2/5/.3 %& J"3<3
SomeLlmes, lf an acLlvlLy does noL compleLe wlLhln a cerLaln amounL of Llme, Lhe resulL ls no longer needed and Lhe
acLlvlLy can be abandoned. lor example, a web appllcaLlon may feLch lLs adverLlsemenLs from an exLernal ad server, buL
lf Lhe ad ls noL avallable wlLhln Lwo seconds, lL lnsLead dlsplays a defaulL adverLlsemenL so LhaL ad unavallablllLy does
noL undermlne Lhe slLe's responslveness requlremenLs. Slmllarly, a porLal slLe may feLch daLa ln parallel from mulLlple
daLa sources, buL may be wllllng Lo walL only a cerLaln amounL of Llme for daLa Lo be avallable before renderlng Lhe page
wlLhouL lL.
1he prlmary challenge ln execuLlng Lasks wlLhln a Llme budgeL ls maklng sure LhaL you don'L walL longer Lhan Lhe Llme
budgeL Lo geL an answer or flnd ouL LhaL one ls noL forLhcomlng. 1he Llmed verslon of <).)9&+6&. supporLs Lhls
requlremenL: lL reLurns as soon as Lhe resulL ls ready, buL Lhrows =43&2).L-*&G.42" lf Lhe resulL ls noL ready wlLhln Lhe
LlmeouL perlod.

83 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 188ChapLer 6. 1ask LxecuLlon
A secondary problem when uslng Llmed Lasks ls Lo sLop Lhem when Lhey run ouL of Llme, so Lhey do noL wasLe
compuLlng resources by conLlnulng Lo compuLe a resulL LhaL wlll noL be used. 1hls can be accompllshed by havlng Lhe
Lask sLrlcLly manage lLs own Llme budgeL and aborL lf lL runs ouL of Llme, or by cancelllng Lhe Lask lf Lhe LlmeouL explres.
Agaln, <).)9& can help, lf a Llmed 6&. compleLes wlLh a =43&2).L-*&G.42", you can cancel Lhe Lask Lhrough Lhe
<).)9&. lf Lhe Lask ls wrlLLen Lo be cancellable (see ChapLer 7), lL can be LermlnaLed early so as noL Lo consume excesslve
resources. 1hls Lechnlque ls used ln LlsLlngs 6.13 and 6.16.
LlsLlng 6.16 shows a Lyplcal appllcaLlon of a Llmed <).)9&+6&.. lL generaLes a composlLe web page LhaL conLalns Lhe
requesLed conLenL plus an adverLlsemenL feLched from an ad server. lL submlLs Lhe adfeLchlng Lask Lo an execuLor,
compuLes Lhe resL of Lhe page conLenL, and Lhen walLs for Lhe ad unLll lLs Llme budgeL runs ouL.
[8]
lf Lhe 6&. Llmes ouL, lL
cancels
[9]
Lhe adfeLchlng Lask and uses a defaulL adverLlsemenL lnsLead.
[8] 1he LlmeouL passed Lo geL ls compuLed by subLracLlng Lhe currenL Llme from Lhe deadllne, Lhls may ln facL yleld a negaLlve number, buL all Lhe
Llmed meLhods ln `$/$+).40+*2"*)99&". LreaL negaLlve LlmeouLs as zero, so no exLra code ls needed Lo deal wlLh Lhls case.
[9] 1he .9)& parameLer Lo <).)9&+*$"*&0 means LhaL Lhe Lask Lhread can be lnLerrupLed lf Lhe Lask ls currenLly runnlng, see ChapLer 7.
WALA\A =1"5>?*T F J)"#*? N*3*)#"./%&3 -%)."?
1he LlmebudgeLlng approach ln Lhe prevlous secLlon can be easlly generallzed Lo an arblLrary number of Lasks. Conslder
a Lravel reservaLlon porLal: Lhe user enLers Lravel daLes and requlremenLs and Lhe porLal feLches and dlsplays blds from
a number of alrllnes, hoLels or car renLal companles. uependlng on Lhe company, feLchlng a bld mlghL lnvolve lnvoklng a
web servlce, consulLlng a daLabase, performlng an Lul LransacLlon, or some oLher mechanlsm. 8aLher Lhan have Lhe
response Llme for Lhe page be drlven by Lhe slowesL response, lL may be preferable Lo presenL only Lhe lnformaLlon
avallable wlLhln a glven Llme budgeL. lor provlders LhaL do noL respond ln Llme, Lhe page could elLher omlL Lhem
compleLely or dlsplay a placeholder such as "uld noL hear from Alr !ava ln Llme."
2/3./&4 WA@WA S*.':/&4 "& F0#*)./3*5*&. 8/.: " J/5* ;(04*.A
@$6& 9&";&9@$6&P4.81;UV .892?# I".&99)G.&;L-*&G.42" W
02"6 &";,$"2# ] '7#.&3+"$"2=43&UV _ =IFLkK!AHL=[
<).)9&R1;T % ] &-&*+#)M34.U"&? <&.*81;=$#CUVV[
XX N&";&9 .8& G$6& ?840& ?$4.4"6 %29 .8& $;
@$6& G$6& ] 9&";&9@$6&K2;7UV[
1; $;[
.97 W
XX J"07 ?$4. %29 .8& 9&3$4"4"6 .43& M);6&.
02"6 .43&5&%. ] &";,$"2# a '7#.&3+"$"2=43&UV[
$; ] %+6&.U.43&5&%.e ,1,J'LDJ,A'V[
\ *$.*8 UL-&*).42"L-*&G.42" &V W
$; ] AL<1!5=k1A[
\ *$.*8 U=43&2).L-*&G.42" &V W
$; ] AL<1!5=k1A[
%+*$"*&0U.9)&V[
\
G$6&+#&.1;U$;V[
9&.)9" G$6&[
\
leLchlng a bld from one company ls lndependenL of feLchlng blds from anoLher, so feLchlng a slngle bld ls a senslble Lask
boundary LhaL allows bld reLrleval Lo proceed concurrenLly. lL would be easy enough Lo creaLe n Lasks, submlL Lhem Lo a
Lhread pool, reLaln Lhe <).)9&s, and use a Llmed 6&. Lo feLch each resulL sequenLlally vla lLs <).)9&, buL Lhere ls an
even easlerway 4"/2C&100.
LlsLlng 6.17 uses Lhe Llmed verslon of 4"/2C&100 Lo submlL mulLlple Lasks Lo an L-&*).29'&9/4*& and reLrleve Lhe
resulLs. 1he 4"/2C&100 meLhod Lakes a collecLlon of Lasks and reLurns a collecLlon of <).)9&s. 1he Lwo collecLlons have
ldenLlcal sLrucLures, 4"/2C&100 adds Lhe <).)9&s Lo Lhe reLurned collecLlon ln Lhe order lmposed by Lhe Lask collecLlon's
lLeraLor, Lhus allowlng Lhe caller Lo assoclaLe a <).)9& wlLh Lhe D$00$M0& lL represenLs. 1he Llmed verslon of 4"/2C&100
wlll reLurn when all Lhe Lasks have compleLed, Lhe calllng Lhread ls lnLerrupLed, or Lhe LlmeouL explres. Any Lasks LhaL
are noL compleLe when Lhe LlmeouL explres are cancelled. Cn reLurn from 4"/2C&100, each Lask wlll have elLher
compleLed normally or been cancelled, Lhe cllenL code can call 6&. or 4#D$"*&00&; Lo flnd ouL whlch.
C(55")+
SLrucLurlng appllcaLlons around Lhe execuLlon of Lasks can slmpllfy developmenL and faclllLaLe concurrency. 1he
L-&*).29 framework permlLs you Lo decouple Lask submlsslon from execuLlon pollcy and supporLs a rlch varleLy of
execuLlon pollcles, whenever you flnd yourself creaLlng Lhreads Lo perform Lasks, conslder uslng an L-&*).29 lnsLead.
1o maxlmlze Lhe beneflL of decomposlng an appllcaLlon lnLo Lasks, you musL ldenLlfy senslble Lask boundarles. ln some

84 !ava Concurrency ln racLlce
appllcaLlons, Lhe obvlous Lask boundarles work well, whereas ln oLhers some analysls may be requlred Lo uncover flner
gralned explolLable parallellsm.
2/3./&4 WA@ZA N*Q(*3./&4 J)"#*? a(%.*3 9&0*) " J/5* ;(04*.A
G94/$.& *0$## O)2.&=$#C 43G0&3&".# D$00$M0&R=9$/&0O)2.&T W
G94/$.& %4"$0 =9$/&0D23G$"7 *23G$"7[
G94/$.& %4"$0 =9$/&0I"%2 .9$/&0I"%2[
+++
G)M04* =9$/&0O)2.& *$00UV .892?# L-*&G.42" W
9&.)9" *23G$"7+#204*4.O)2.&U.9$/&0I"%2V[
\
\

G)M04* 54#.R=9$/&0O)2.&T 6&.N$"C&;=9$/&0O)2.&#U
=9$/&0I"%2 .9$/&0I"%2e '&.R=9$/&0D23G$"7T *23G$"4&#e
D23G$9$.29R=9$/&0O)2.&T 9$"C4"6e 02"6 .43&e =43&!"4. )"4.V
.892?# I".&99)G.&;L-*&G.42" W
54#.RO)2.&=$#CT .$#C# ] "&? 199$754#.RO)2.&=$#CTUV[
%29 U=9$/&0D23G$"7 *23G$"7 h *23G$"4&#V
.$#C#+$;;U"&? O)2.&=$#CU*23G$"7e .9$/&0I"%2VV[

54#.R<).)9&R=9$/&0O)2.&TT %).)9&# ]
&-&*+4"/2C&100U.$#C#e .43&e )"4.V[

54#.R=9$/&0O)2.&T ()2.&# ]
"&? 199$754#.R=9$/&0O)2.&TU.$#C#+#4:&UVV[
I.&9$.29RO)2.&=$#CT .$#CI.&9 ] .$#C#+4.&9$.29UV[
%29 U<).)9&R=9$/&0O)2.&T % h %).)9&#V W
O)2.&=$#C .$#C ] .$#CI.&9+"&-.UV[
.97 W
()2.&#+$;;U%+6&.UVV[
\ *$.*8 UL-&*).42"L-*&G.42" &V W
()2.&#+$;;U.$#C+6&.<$40)9&O)2.&U&+6&.D$)#&UVVV[
\ *$.*8 UD$"*&00$.42"L-*&G.42" &V W
()2.&#+$;;U.$#C+6&.=43&2).O)2.&U&VV[
\
\

D200&*.42"#+#29.U()2.&#e 9$"C4"6V[
9&.)9" ()2.&#[
\


83 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 198ChapLer 7. CancellaLlon and ShuLdown
$:">.*) ZA $"&'*??"./%& "&0 C:(.0%8&
lL ls easy Lo sLarL Lasks and Lhreads. MosL of Lhe Llme we allow Lhem Lo declde when Lo sLop by leLLlng Lhem run Lo
compleLlon. SomeLlmes, however, we wanL Lo sLop Lasks or Lhreads earller Lhan Lhey would on Lhelr own, perhaps
because Lhe user cancelled an operaLlon or Lhe appllcaLlon needs Lo shuL down qulckly.
CeLLlng Lasks and Lhreads Lo sLop safely, qulckly, and rellably ls noL always easy. !ava does noL provlde any mechanlsm
for safely forclng a Lhread Lo sLop whaL lL ls dolng.
[1]
lnsLead, lL provldes lnLerrupLlon, a cooperaLlve mechanlsm LhaL leLs
one Lhread ask anoLher Lo sLop whaL lL ls dolng.
[1] 1he deprecaLed =89&$;+#.2G and suspend meLhods were an aLLempL Lo provlde such a mechanlsm, buL were qulckly reallzed Lo be serlously
flawed and should be avolded. See
8..GhXX`$/$+#)"+*23X`j#&X^+p+ZX;2*#X6)4;&X34#*X.89&$;@9434.4/&A&G9&*$.42"+8.30 for an explanaLlon of Lhe
problems wlLh Lhese meLhods.
1he cooperaLlve approach ls requlred because we rarely wanL a Lask, Lhread, or servlce Lo sLop lmmedlaLely, slnce LhaL
could leave shared daLa sLrucLures ln an lnconslsLenL sLaLe. lnsLead, Lasks and servlces can be coded so LhaL, when
requesLed, Lhey clean up any work currenLly ln progress and Lhen LermlnaLe. 1hls provldes greaLer flexlblllLy, slnce Lhe
Lask code lLself ls usually beLLer able Lo assess Lhe cleanup requlred Lhan ls Lhe code requesLlng cancellaLlon.
Lndofllfecycle lssues can compllcaLe Lhe deslgn and lmplemenLaLlon of Lasks, servlces, and appllcaLlons, and Lhls
lmporLanL elemenL of program deslgn ls Loo ofLen lgnored. ueallng well wlLh fallure, shuLdown, and cancellaLlon ls one
of Lhe characLerlsLlcs LhaL dlsLlngulsh a wellbehaved appllcaLlon from one LhaL merely works. 1hls chapLer addresses
mechanlsms for cancellaLlon and lnLerrupLlon, and how Lo code Lasks and servlces Lo be responslve Lo cancellaLlon
requesLs.
ZA@A J"3< $"&'*??"./%&
An acLlvlLy ls cancellable lf exLernal code can move lL Lo compleLlon before lLs normal compleLlon. 1here are a number
of reasons why you mlghL wanL Lo cancel an acLlvlLy:
userrequesLed cancellaLlon. 1he user cllcked on Lhe "cancel" buLLon ln a Cul appllcaLlon, or requesLed cancellaLlon
Lhrough a managemenL lnLerface such as !Mx (!ava ManagemenL LxLenslons).
1lmellmlLed acLlvlLles. An appllcaLlon searches a problem space for a flnlLe amounL of Llme and chooses Lhe besL
soluLlon found wlLhln LhaL Llme. When Lhe Llmer explres, any Lasks sLlll searchlng are cancelled.
AppllcaLlon evenLs. An appllcaLlon searches a problem space by decomposlng lL so LhaL dlfferenL Lasks search dlfferenL
reglons of Lhe problem space. When one Lask flnds a soluLlon, all oLher Lasks sLlll searchlng are cancelled.
Lrrors. A web crawler searches for relevanL pages, sLorlng pages or summary daLa Lo dlsk. When a crawler Lask
encounLers an error (for example, Lhe dlsk ls full), oLher crawllng Lasks are cancelled, posslbly recordlng Lhelr currenL
sLaLe so LhaL Lhey can be resLarLed laLer.
ShuLdown. When an appllcaLlon or servlce ls shuL down, someLhlng musL be done abouL work LhaL ls currenLly belng
processed or queued for processlng. ln a graceful shuLdown, Lasks currenLly ln progress mlghL be allowed Lo compleLe,
ln a more lmmedlaLe shuLdown, currenLly execuLlng Lasks mlghL be cancelled.
1here ls no safe way Lo preempLlvely sLop a Lhread ln !ava, and Lherefore no safe way Lo preempLlvely sLop a Lask. 1here
are only cooperaLlve mechanlsms, by whlch Lhe Lask and Lhe code requesLlng cancellaLlon follow an agreedupon
proLocol.
Cne such cooperaLlve mechanlsm ls seLLlng a "cancellaLlon requesLed" flag LhaL Lhe Lask checks perlodlcally, lf lL flnds
Lhe flag seL, Lhe Lask LermlnaLes early. @943&H&"&9$.29 ln LlsLlng 7.1, whlch enumeraLes prlme numbers unLll lL ls
cancelled, lllusLraLes Lhls Lechnlque. 1he *$"*&0 meLhod seLs Lhe *$"*&00&; flag, and Lhe maln loop polls Lhls flag
before searchlng for Lhe nexL prlme number. (lor Lhls Lo work rellably, *$"*&00&; musL be /20$.40&.)
LlsLlng 7.2 shows a sample use of Lhls class LhaL leLs Lhe prlme generaLor run for one second before cancelllng lL. 1he
generaLor won'L necessarlly sLop afLer exacLly one second, slnce Lhere may be some delay beLween Lhe Llme LhaL
cancellaLlon ls requesLed and Lhe Llme LhaL Lhe 9)" loop nexL checks for cancellaLlon. 1he *$"*&0 meLhod ls called from
a %4"$007 block Lo ensure LhaL Lhe prlme generaLor ls cancelled even lf Lhe call Lo #0&&G ls lnLerrupLed. lf *$"*&0 were
noL called, Lhe prlmeseeklng Lhread would run forever, consumlng Cu cycles and prevenLlng Lhe !vM from exlLlng.

86 !ava Concurrency ln racLlce
A Lask LhaL wanLs Lo be cancellable musL have a cancellaLlon pollcy LhaL speclfles Lhe "how", "when", and "whaL" of
cancellaLlon how oLher code can requesL cancellaLlon, when Lhe Lask checks wheLher cancellaLlon has been requesLed,
and whaL acLlons Lhe Lask Lakes ln response Lo a cancellaLlon requesL.
Conslder Lhe realworld example of sLopplng paymenL on a check. 8anks have rules abouL how Lo submlL a sLop
paymenL requesL, whaL responslveness guaranLees lL makes ln processlng such requesLs, and whaL procedures lL follows
when paymenL ls acLually sLopped (such as noLlfylng Lhe oLher bank lnvolved ln Lhe LransacLlon and assesslng a fee
agalnsL Lhe payer's accounL). 1aken LogeLher, Lhese procedures and guaranLees comprlse Lhe cancellaLlon pollcy for
check paymenL.
2/3./&4 ZA@A 93/&4 " B20$.40& S/*?0 .% 7%?0 $"&'*??"./%& C.".*A
b=89&$;'$%&
G)M04* *0$## @943&H&"&9$.29 43G0&3&".# N)""$M0& W
bH)$9;&;K7Ud.84#dV
G94/$.& %4"$0 54#.RK46I".&6&9T G943&#
] "&? 199$754#.RK46I".&6&9TUV[
G94/$.& /20$.40& M220&$" *$"*&00&;[

G)M04* /24; 9)"UV W
K46I".&6&9 G ] K46I".&6&9+J,L[
?840& UY*$"*&00&; V W
G ] G+"&-.@92M$M0&@943&UV[
#7"*892"4:&; U.84#V W
G943&#+$;;UGV[
\
\
\

G)M04* /24; *$"*&0UV W *$"*&00&; ] .9)&[ \

G)M04* #7"*892"4:&; 54#.RK46I".&6&9T 6&.UV W
9&.)9" "&? 199$754#.RK46I".&6&9TUG943&#V[
\
\
2/3./&4 ZADA R*&*)"./&4 " C*'%&0b3 B%).: %6 -)/5* P(5Y*)3A
54#.RK46I".&6&9T $'&*2";J%@943&#UV .892?# I".&99)G.&;L-*&G.42" W
@943&H&"&9$.29 6&"&9$.29 ] "&? @943&H&"&9$.29UV[
"&? =89&$;U6&"&9$.29V+#.$9.UV[
.97 W
'LDJ,A'+#0&&GU^V[
\ %4"$007 W
6&"&9$.29+*$"*&0UV[
\
9&.)9" 6&"&9$.29+6&.UV[
\
@943&H&"&9$.29 uses a slmple cancellaLlon pollcy: cllenL code requesLs cancellaLlon by calllng *$"*&0, @943&H&"&9$.29
checks for cancellaLlon once per prlme found and exlLs when lL deLecLs cancellaLlon has been requesLed.
ZA@A@A ,&.*))(>./%&
1he cancellaLlon mechanlsm ln @943&H&"&9$.29 wlll evenLually cause Lhe prlmeseeklng Lask Lo exlL, buL lL mlghL Lake a
whlle. lf, however, a Lask LhaL uses Lhls approach calls a blocklng meLhod such as K02*C4"6O)&)&+G)., we could have a
more serlous problem Lhe Lask mlghL never check Lhe cancellaLlon flag and Lherefore mlghL never LermlnaLe.
K92C&"@943&@92;)*&9 ln LlsLlng 7.3 lllusLraLes Lhls problem. 1he producer Lhread generaLes prlmes and places Lhem on
a blocklng queue. lf Lhe producer geLs ahead of Lhe consumer, Lhe queue wlll flll up and G). wlll block. WhaL happens lf
Lhe consumer Lrles Lo cancel Lhe producer Lask whlle lL ls blocked ln G).? lL can call *$"*&0 whlch wlll seL Lhe *$"*&00&;
flag buL Lhe producer wlll never check Lhe flag because lL wlll never emerge from Lhe blocklng G). (because Lhe
consumer has sLopped reLrlevlng prlmes from Lhe queue).
As we hlnLed ln ChapLer 3, cerLaln blocklng llbrary meLhods supporL lnLerrupLlon. 1hread lnLerrupLlon ls a cooperaLlve
mechanlsm for a Lhread Lo slgnal anoLher Lhread LhaL lL should, aL lLs convenlence and lf lL feels llke lL, sLop whaL lL ls
dolng and do someLhlng else.
1here ls noLhlng ln Lhe Al or language speclflcaLlon LhaL Lles lnLerrupLlon Lo any speclflc cancellaLlon semanLlcs, buL ln
pracLlce, uslng lnLerrupLlon for anyLhlng buL cancellaLlon ls fraglle and dlfflculL Lo susLaln ln larger appllcaLlons.
Lach Lhread has a boolean lnLerrupLed sLaLus, lnLerrupLlng a Lhread seLs lLs lnLerrupLed sLaLus Lo Lrue. =89&$; conLalns
meLhods for lnLerrupLlng a Lhread and querylng Lhe lnLerrupLed sLaLus of a Lhread, as shown ln LlsLlng 7.4. 1he

87 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 198ChapLer 7. CancellaLlon and ShuLdown
4".&99)G. meLhod lnLerrupLs Lhe LargeL Lhread, and 4#I".&99)G.&; reLurns Lhe lnLerrupLed sLaLus of Lhe LargeL Lhread.
1he poorly named sLaLlc 4".&99)G.&; meLhod clears Lhe lnLerrupLed sLaLus of Lhe currenL Lhread and reLurns lLs
prevlous value, Lhls ls Lhe only way Lo clear Lhe lnLerrupLed sLaLus.
8locklng llbrary meLhods llke =89&$;+#0&&G and JM`&*.+?$4. Lry Lo deLecL when a Lhread has been lnLerrupLed and
reLurn early. 1hey respond Lo lnLerrupLlon by clearlng Lhe lnLerrupLed sLaLus and Lhrowlng I".&99)G.&;L-*&G.42",
lndlcaLlng LhaL Lhe blocklng operaLlon compleLed early due Lo lnLerrupLlon. 1he !vM makes no guaranLees on how
qulckly a blocklng meLhod wlll deLecL lnLerrupLlon, buL ln pracLlce Lhls happens reasonably qulckly.
2/3./&4 ZALA 9&)*?/"Y?* $"&'*??"./%& .:". '"& 2*"#* -)%0('*)3 C.('< /& " ;?%'</&4 E>*)"./%&A !"#$% !" %&'()

*0$## K92C&"@943&@92;)*&9 &-.&";# =89&$; W
G94/$.& %4"$0 K02*C4"6O)&)&RK46I".&6&9T ()&)&[
G94/$.& /20$.40& M220&$" *$"*&00&; ] %$0#&[

K92C&"@943&@92;)*&9UK02*C4"6O)&)&RK46I".&6&9T ()&)&V W
.84#+()&)& ] ()&)&[
\

G)M04* /24; 9)"UV W
.97 W
K46I".&6&9 G ] K46I".&6&9+J,L[
?840& UY*$"*&00&;V
()&)&+G).UG ] G+"&-.@92M$M0&@943&UVV[
\ *$.*8 UI".&99)G.&;L-*&G.42" *2"#)3&;V W \
\

G)M04* /24; *$"*&0UV W *$"*&00&; ] .9)&[ \
\

/24; *2"#)3&@943&#UV .892?# I".&99)G.&;L-*&G.42" W
K02*C4"6O)&)&RK46I".&6&9T G943&# ] +++[
K92C&"@943&@92;)*&9 G92;)*&9 ] "&? K92C&"@943&@92;)*&9UG943&#V[
G92;)*&9+#.$9.UV[
.97 W
?840& U"&&;F29&@943&#UVV
*2"#)3&UG943&#+.$C&UVV[
\ %4"$007 W
G92;)*&9+*$"*&0UV[
\
\
2/3./&4 ZAMA ,&.*))(>./%& K*.:%03 /& =89&$;A
G)M04* *0$## =89&$; W
G)M04* /24; 4".&99)G.UV W +++ \
G)M04* M220&$" 4#I".&99)G.&;UV W +++ \
G)M04* #.$.4* M220&$" 4".&99)G.&;UV W +++ \
+++
\
lf a Lhread ls lnLerrupLed when lL ls noL blocked, lLs lnLerrupLed sLaLus ls seL, and lL ls up Lo Lhe acLlvlLy belng cancelled Lo
poll Lhe lnLerrupLed sLaLus Lo deLecL lnLerrupLlon. ln Lhls way lnLerrupLlon ls "sLlcky" lf lL doesn'L Lrlgger an
I".&99)G.&;L-*&G.42", evldence of lnLerrupLlon perslsLs unLll someone dellberaLely clears Lhe lnLerrupLed sLaLus.
Calllng 4".&99)G. does noL necessarlly sLop Lhe LargeL Lhread from dolng whaL lL ls dolng, lL merely dellvers Lhe
message LhaL lnLerrupLlon has been requesLed.
A good way Lo Lhlnk abouL lnLerrupLlon ls LhaL lL does noL acLually lnLerrupL a runnlng Lhread, lL [usL requesLs LhaL Lhe
Lhread lnLerrupL lLself aL Lhe nexL convenlenL opporLunlLy. (1hese opporLunlLles are called cancellaLlon polnLs.) Some
meLhods, such as ?$4., #0&&G, and `24", Lake such requesLs serlously, Lhrowlng an excepLlon when Lhey recelve an
lnLerrupL requesL or encounLer an already seL lnLerrupL sLaLus upon enLry. Well behaved meLhods may LoLally lgnore
such requesLs so long as Lhey leave Lhe lnLerrupLlon requesL ln place so LhaL calllng code can do someLhlng wlLh lL.
oorly behaved meLhods swallow Lhe lnLerrupL requesL, Lhus denylng code furLher up Lhe call sLack Lhe opporLunlLy Lo
acL on lL.
1he sLaLlc 4".&99)G.&; meLhod should be used wlLh cauLlon, because lL clears Lhe currenL Lhread's lnLerrupLed sLaLus. lf
you call 4".&99)G.&; and lL reLurns =N)&, unless you are plannlng Lo swallow Lhe lnLerrupLlon, you should do someLhlng

88 !ava Concurrency ln racLlce
wlLh lL elLher Lhrow I".&99)G.&;L-*&G.42" or resLore Lhe lnLerrupLed sLaLus by calllng 4".&99)G. agaln, as ln LlsLlng
3.10 on page 94.
K92C&"@943&@92;)*&9 lllusLraLes how cusLom cancellaLlon mechanlsms do noL always lnLeracL well wlLh blocklng llbrary
meLhods. lf you code your Lasks Lo be responslve Lo lnLerrupLlon, you can use lnLerrupLlon as your cancellaLlon
mechanlsm and Lake advanLage of Lhe lnLerrupLlon supporL provlded by many llbrary classes.
lnLerrupLlon ls usually Lhe mosL senslble way Lo lmplemenL cancellaLlon.
K92C&"@943&@92;)*&9 can be easlly flxed (and slmpllfled) by uslng lnLerrupLlon lnsLead of a boolean flag Lo requesL
cancellaLlon, as shown ln LlsLlng 7.3. 1here are Lwo polnLs ln each loop lLeraLlon where lnLerrupLlon may be deLecLed: ln
Lhe blocklng G). call, and by expllclLly polllng Lhe lnLerrupLed sLaLus ln Lhe loop header. 1he expllclL LesL ls noL sLrlcLly
necessary here because of Lhe blocklng G). call, buL lL makes @943&@92;)*&9 more responslve Lo lnLerrupLlon because
lL checks for lnLerrupLlon before sLarLlng Lhe lengLhy Lask of searchlng for a prlme, raLher Lhan afLer. When calls Lo
lnLerrupLlble blocklng meLhods are noL frequenL enough Lo dellver Lhe deslred responslveness, expllclLly LesLlng Lhe
lnLerrupLed sLaLus can help.
2/3./&4 ZAVA 93/&4 ,&.*))(>./%& 6%) $"&'*??"./%&A
*0$## @943&@92;)*&9 &-.&";# =89&$; W
G94/$.& %4"$0 K02*C4"6O)&)&RK46I".&6&9T ()&)&[

@943&@92;)*&9UK02*C4"6O)&)&RK46I".&6&9T ()&)&V W
.84#+()&)& ] ()&)&[
\

G)M04* /24; 9)"UV W
.97 W
K46I".&6&9 G ] K46I".&6&9+J,L[
?840& UY=89&$;+*)99&".=89&$;UV+4#I".&99)G.&;UVV
()&)&+G).UG ] G+"&-.@92M$M0&@943&UVV[
\ *$.*8 UI".&99)G.&;L-*&G.42" *2"#)3&;V W
Xc 1002? .89&$; .2 &-4. cX
\
\
G)M04* /24; *$"*&0UV W 4".&99)G.UV[ \
\
ZA@ADA ,&.*))(>./%& -%?/'/*3
!usL as Lasks should have a cancellaLlon pollcy, Lhreads should have an lnLerrupLlon pollcy. An lnLerrupLlon pollcy
deLermlnes how a Lhread lnLerpreLs an lnLerrupLlon requesL whaL lL does (lf anyLhlng) when one ls deLecLed, whaL unlLs
of work are consldered aLomlc wlLh respecL Lo lnLerrupLlon, and how qulckly lL reacLs Lo lnLerrupLlon.
1he mosL senslble lnLerrupLlon pollcy ls some form of Lhreadlevel or servlcelevel cancellaLlon: exlL as qulckly as
pracLlcal, cleanlng up lf necessary, and posslbly noLlfylng some ownlng enLlLy LhaL Lhe Lhread ls exlLlng. lL ls posslble Lo
esLabllsh oLher lnLerrupLlon pollcles, such as pauslng or resumlng a servlce, buL Lhreads or Lhread pools wlLh
nonsLandard lnLerrupLlon pollcles may need Lo be resLrlcLed Lo Lasks LhaL have been wrlLLen wlLh an awareness of Lhe
pollcy.
lL ls lmporLanL Lo dlsLlngulsh beLween how Lasks and Lhreads should reacL Lo lnLerrupLlon. A slngle lnLerrupL requesL
may have more Lhan one deslred reclplenL lnLerrupLlng a worker Lhread ln a Lhread pool can mean boLh "cancel Lhe
currenL Lask" and "shuL down Lhe worker Lhread".
1asks do noL execuLe ln Lhreads Lhey own, Lhey borrow Lhreads owned by a servlce such as a Lhread pool. Code LhaL
doesn'L own Lhe Lhread (for a Lhread pool, any code ouLslde of Lhe Lhread pool lmplemenLaLlon) should be careful Lo
preserve Lhe lnLerrupLed sLaLus so LhaL Lhe ownlng code can evenLually acL on lL, even lf Lhe "guesL" code acLs on Lhe
lnLerrupLlon as well. (lf you are houseslLLlng for someone, you don'L Lhrow ouL Lhe mall LhaL comes whlle Lhey're away
you save lL and leL Lhem deal wlLh lL when Lhey geL back, even lf you do read Lhelr magazlnes.)
1hls ls why mosL blocklng llbrary meLhods slmply Lhrow I".&99)G.&;L-*&G.42" ln response Lo an lnLerrupL. 1hey wlll
never execuLe ln a Lhread Lhey own, so Lhey lmplemenL Lhe mosL reasonable cancellaLlon pollcy for Lask or llbrary code:
geL ouL of Lhe way as qulckly as posslble and communlcaLe Lhe lnLerrupLlon back Lo Lhe caller so LhaL code hlgher up on
Lhe call sLack can Lake furLher acLlon.
A Lask needn'L necessarlly drop everyLhlng when lL deLecLs an lnLerrupLlon requesL lL can choose Lo posLpone lL unLll a
more opporLune Llme by rememberlng LhaL lL was lnLerrupLed, flnlshlng Lhe Lask lL was performlng, and Lhen Lhrowlng

89 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 198ChapLer 7. CancellaLlon and ShuLdown
I".&99)G.&;L-*&G.42" or oLherwlse lndlcaLlng lnLerrupLlon. 1hls Lechnlque can proLecL daLa sLrucLures from
corrupLlon when an acLlvlLy ls lnLerrupLed ln Lhe mlddle of an updaLe.
A Lask should noL assume anyLhlng abouL Lhe lnLerrupLlon pollcy of lLs execuLlng Lhread unless lL ls expllclLly deslgned Lo
run wlLhln a servlce LhaL has a speclflc lnLerrupLlon pollcy. WheLher a Lask lnLerpreLs lnLerrupLlon as cancellaLlon or
Lakes some oLher acLlon on lnLerrupLlon, lL should Lake care Lo preserve Lhe execuLlng Lhread's lnLerrupLlon sLaLus. lf lL ls
noL slmply golng Lo propagaLe I".&99)G.&;L-*&G.42" Lo lLs caller, lL should resLore Lhe lnLerrupLlon sLaLus afLer
caLchlng I".&99)G.&;L-*&G.42":
=89&$;+*)99&".=89&$;UV+4".&99)G.UV[
!usL as Lask code should noL make assumpLlons abouL whaL lnLerrupLlon means Lo lLs execuLlng Lhread, cancellaLlon
code should noL make assumpLlons abouL Lhe lnLerrupLlon pollcy of arblLrary Lhreads. A Lhread should be lnLerrupLed
only by lLs owner, Lhe owner can encapsulaLe knowledge of Lhe Lhread's lnLerrupLlon pollcy ln an approprlaLe
cancellaLlon mechanlsm such as a shuLdown meLhod.
8ecause each Lhread has lLs own lnLerrupLlon pollcy, you should noL lnLerrupL a Lhread unless you know whaL
lnLerrupLlon means Lo LhaL Lhread.
CrlLlcs have derlded Lhe !ava lnLerrupLlon faclllLy because lL does noL provlde a preempLlve lnLerrupLlon capablllLy and
yeL forces developers Lo handle I".&99)G.&;L-*&G.42". Powever, Lhe ablllLy Lo posLpone an lnLerrupLlon requesL
enables developers Lo crafL flexlble lnLerrupLlon pollcles LhaL balance responslveness and robusLness as approprlaLe for
Lhe appllcaLlon.
ZA@ALA N*3>%&0/&4 .% ,&.*))(>./%&
As menLloned ln SecLlon 3.4, when you call an lnLerrupLlble blocklng meLhod such as =89&$;+#0&&G or
K02*C4"6O)&)&+G)., Lhere are Lwo pracLlcal sLraLegles for handllng I".&99)G.&;L-*&G.42":
ropagaLe Lhe excepLlon (posslbly afLer some Laskspeclflc cleanup), maklng your meLhod an lnLerrupLlble
blocklng meLhod, Loo, or
8esLore Lhe lnLerrupLlon sLaLus so LhaL code hlgher up on Lhe call sLack can deal wlLh lL.
ropagaLlng I".&99)G.&;L-*&G.42" can be as easy as addlng I".&99)G.&;L-*&G.42" Lo Lhe .892?# clause, as shown
by 6&.,&-.=$#C ln LlsLlng 7.6.
2/3./&4 ZAWA -)%>"4"./&4 I".&99)G.&;L-*&G.42" .% $"??*)3A
K02*C4"6O)&)&R=$#CT ()&)&[
+++
G)M04* =$#C 6&.,&-.=$#CUV .892?# I".&99)G.&;L-*&G.42" W
9&.)9" ()&)&+.$C&UV[
\
lf you don'L wanL Lo or cannoL propagaLe I".&99)G.&;L-*&G.42" (perhaps because your Lask ls deflned by a N)""$M0&),
you need Lo flnd anoLher way Lo preserve Lhe lnLerrupLlon requesL. 1he sLandard way Lo do Lhls ls Lo resLore Lhe
lnLerrupLed sLaLus by calllng 4".&99)G. agaln. WhaL you should noL do ls swallow Lhe I".&99)G.&;L-*&G.42" by
caLchlng lL and dolng noLhlng ln Lhe *$.*8 block, unless your code ls acLually lmplemenLlng Lhe lnLerrupLlon pollcy for a
Lhread. @943&@92;)*&9 swallows Lhe lnLerrupL, buL does so wlLh Lhe knowledge LhaL Lhe Lhread ls abouL Lo LermlnaLe
and LhaL Lherefore Lhere ls no code hlgher up on Lhe call sLack LhaL needs Lo know abouL Lhe lnLerrupLlon. MosL code
does noL know whaL Lhread lL wlll run ln and so should preserve Lhe lnLerrupLed sLaLus.
Cnly code LhaL lmplemenLs a Lhread's lnLerrupLlon pollcy may swallow an lnLerrupLlon requesL. Ceneralpurpose Lask
and llbrary code should never swallow lnLerrupLlon requesLs.
AcLlvlLles LhaL do noL supporL cancellaLlon buL sLlll call lnLerrupLlble blocklng meLhods wlll have Lo call Lhem ln a loop,
reLrylng when lnLerrupLlon ls deLecLed. ln Lhls case, Lhey should save Lhe lnLerrupLlon sLaLus locally and resLore lL [usL
before reLurnlng, as shown ln LlsLlng 7.7, raLher Lhan lmmedlaLely upon caLchlng I".&99)G.&;L-*&G.42". SeLLlng Lhe
lnLerrupLed sLaLus Loo early could resulL ln an lnflnlLe loop, because mosL lnLerrupLlble blocklng meLhods check Lhe
lnLerrupLed sLaLus on enLry and Lhrow I".&99)G.&;L-*&G.42" lmmedlaLely lf lL ls seL. (lnLerrupLlble meLhods usually
poll for lnLerrupLlon before blocklng or dolng any slgnlflcanL work, so as Lo be as responslve Lo lnLerrupLlon as posslble.)
lf your code does noL call lnLerrupLlble blocklng meLhods, lL can sLlll be made responslve Lo lnLerrupLlon by polllng Lhe
currenL Lhread's lnLerrupLed sLaLus LhroughouL Lhe Lask code. Chooslng a polllng frequency ls a Lradeoff beLween

90 !ava Concurrency ln racLlce
efflclency and responslveness. lf you have hlgh responslveness requlremenLs, you cannoL call poLenLlally longrunnlng
meLhods LhaL are noL Lhemselves responslve Lo lnLerrupLlon, poLenLlally resLrlcLlng your opLlons for calllng llbrary code.
CancellaLlon can lnvolve sLaLe oLher Lhan Lhe lnLerrupLlon sLaLus, lnLerrupLlon can be used Lo geL Lhe Lhread's aLLenLlon,
and lnformaLlon sLored elsewhere by Lhe lnLerrupLlng Lhread can be used Lo provlde furLher lnsLrucLlons for Lhe
lnLerrupLed Lhread. (8e sure Lo use synchronlzaLlon when accesslng LhaL lnformaLlon.)
2/3./&4 ZAZA P%&'"&'*?"Y?* J"3< .:". N*3.%)*3 ,&.*))(>./%& ;*6%)* =1/.A
G)M04* =$#C 6&.,&-.=$#CUK02*C4"6O)&)&R=$#C6.[ ()&)&V W
M220&$" 4".&99)G.&; ] %$0#&[
.97 W
?840& U.9)&V W
.97 W
9&.)9" ()&)&+.$C&UV[
\ *$.*8 UI".&99)G.&;L-*&G.42" &V W
4".&99)G.&; ] .9)&[
XX %$00 .892)68 $"; 9&.97
\
\
\ %4"$007 W
4% U4".&99)G.&;V
=89&$;+*)99&".=89&$;UV+4".&99)G.UV[
\
\
lor example, when a worker Lhread owned by a =89&$;@220L-&*).29 deLecLs lnLerrupLlon, lL checks wheLher Lhe pool
ls belng shuL down. lf so, lL performs some pool cleanup before LermlnaLlng, oLherwlse lL may creaLe a new Lhread Lo
resLore Lhe Lhread pool Lo Lhe deslred slze.
ZA@AMA =1"5>?*T J/5*0 N(&
Many problems can Lake forever Lo solve (e.g., enumeraLe all Lhe prlme numbers), for oLhers, Lhe answer mlghL be
found reasonably qulckly buL also mlghL Lake forever. 8elng able Lo say "spend up Lo Len mlnuLes looklng for Lhe
answer" or "enumeraLe all Lhe answers you can ln Len mlnuLes" can be useful ln Lhese slLuaLlons.
1he $'&*2";J%@943&# meLhod ln LlsLlng 7.2 sLarLs a @943&H&"&9$.29 and lnLerrupLs lL afLer a second. Whlle Lhe
@943&H&"&9$.29 mlghL Lake somewhaL longer Lhan a second Lo sLop, lL wlll evenLually noLlce Lhe lnLerrupL and sLop,
allowlng Lhe Lhread Lo LermlnaLe. 8uL anoLher aspecL of execuLlng a Lask ls LhaL you wanL Lo flnd ouL lf Lhe Lask Lhrows
an excepLlon. lf @943&H&"&9$.29 Lhrows an unchecked excepLlon before Lhe LlmeouL explres, lL wlll probably go
unnoLlced, slnce Lhe prlme generaLor runs ln a separaLe Lhread LhaL does noL expllclLly handle excepLlons.
LlsLlng 7.8 shows an aLLempL aL runnlng an arblLrary N)""$M0& for a glven amounL of Llme. lL runs Lhe Lask ln Lhe calllng
Lhread and schedules a cancellaLlon Lask Lo lnLerrupL lL afLer a glven Llme lnLerval. 1hls addresses Lhe problem of
unchecked excepLlons Lhrown from Lhe Lask, slnce Lhey can Lhen be caughL by Lhe caller of .43&;N)".
1hls ls an appeallngly slmple approach, buL lL vlolaLes Lhe rules: you should know a Lhread's lnLerrupLlon pollcy before
lnLerrupLlng lL. Slnce .43&;N)" can be called from an arblLrary Lhread, lL cannoL know Lhe calllng Lhread's lnLerrupLlon
pollcy. lf Lhe Lask compleLes before Lhe LlmeouL, Lhe cancellaLlon Lask LhaL lnLerrupLs Lhe Lhread ln whlch .43&;N)" was
called could go off afLer .43&;N)" has reLurned Lo lLs caller. We don'L know whaL code wlll be runnlng when LhaL
happens, buL Lhe resulL won'L be good. (lL ls posslble buL surprlslngly Lrlcky Lo ellmlnaLe Lhls rlsk by uslng Lhe
'*8&;)0&;<).)9& reLurned by #*8&;)0& Lo cancel Lhe cancellaLlon Lask.)
2/3./&4 ZA\A C':*0(?/&4 "& ,&.*))(>. %& " ;%))%8*0 J:)*"0A !"#$% !" %&'()

G94/$.& #.$.4* %4"$0 '*8&;)0&;L-&*).29'&9/4*& *$"*&0L-&* ] +++[

G)M04* #.$.4* /24; .43&;N)"UN)""$M0& 9e
02"6 .43&2).e =43&!"4. )"4.V W
%4"$0 =89&$; .$#C=89&$; ] =89&$;+*)99&".=89&$;UV[
*$"*&0L-&*+#*8&;)0&U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W .$#C=89&$;+4".&99)G.UV[ \
\e .43&2).e )"4.V[
9+9)"UV[
\
lurLher, lf Lhe Lask ls noL responslve Lo lnLerrupLlon, .43&;N)" wlll noL reLurn unLll Lhe Lask flnlshes, whlch may be long
afLer Lhe deslred LlmeouL (or even noL aL all). A Llmed run servlce LhaL doesn'L reLurn afLer Lhe speclfled Llme ls llkely Lo
be lrrlLaLlng Lo lLs callers.

91 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 198ChapLer 7. CancellaLlon and ShuLdown
LlsLlng 7.9 addresses Lhe excepLlonhandllng problem of $'&*2";J%@943&# and Lhe problems wlLh Lhe prevlous aLLempL.
1he Lhread creaLed Lo run Lhe Lask can have lLs own execuLlon pollcy, and even lf Lhe Lask doesn'L respond Lo Lhe
lnLerrupL, Lhe Llmed run meLhod can sLlll reLurn Lo lLs caller. AfLer sLarLlng Lhe Lask Lhread, .43&;N)" execuLes a Llmed
`24" wlLh Lhe newly creaLed Lhread. AfLer `24" reLurns, lL checks lf an excepLlon was Lhrown from Lhe Lask and lf so,
reLhrows lL ln Lhe Lhread calllng .43&;N)". 1he saved =892?$M0& ls shared beLween Lhe Lwo Lhreads, and so ls declared
/20$.40& Lo safely publlsh lL from Lhe Lask Lhread Lo Lhe .43&;N)" Lhread.
1hls verslon addresses Lhe problems ln Lhe prevlous examples, buL because lL relles on a Llmed `24", lL shares a
deflclency wlLh `24": we don'L know lf conLrol was reLurned because Lhe Lhread exlLed normally or because Lhe `24"
Llmed ouL.
[2]

[2]
1hls ls a flaw ln Lhe =89&$; Al, because wheLher or noL Lhe `24" compleLes successfully has memory vlslblllLy consequences ln Lhe !ava
Memory Model, buL `24" does noL reLurn a sLaLus lndlcaLlng wheLher lL was successful.
ZA@AVA $"&'*??"./%& H/" <).)9&
We've already used an absLracLlon for managlng Lhe llfecycle of a Lask, deallng wlLh excepLlons, and faclllLaLlng
cancellaLlon<).)9&. lollowlng Lhe general prlnclple LhaL lL ls beLLer Lo use exlsLlng llbrary classes Lhan Lo roll your own,
leL's bulld .43&;N)" uslng <).)9& and Lhe Lask execuLlon framework.
2/3./&4 ZA^A ,&.*))(>./&4 " J"3< /& " [*0/'".*0 J:)*"0A

G)M04* #.$.4* /24; .43&;N)"U%4"$0 N)""$M0& 9e
02"6 .43&2).e =43&!"4. )"4.V
.892?# I".&99)G.&;L-*&G.42" W
*0$## N&.892?$M0&=$#C 43G0&3&".# N)""$M0& W
G94/$.& /20$.40& =892?$M0& .[
G)M04* /24; 9)"UV W
.97 W 9+9)"UV[ \
*$.*8 U=892?$M0& .V W .84#+. ] .[ \
\
/24; 9&.892?UV W
4% U. Y] ")00V
.892? 0$)";&9=892?$M0&U.V[
\
\

N&.892?$M0&=$#C .$#C ] "&? N&.892?$M0&=$#CUV[
%4"$0 =89&$; .$#C=89&$; ] "&? =89&$;U.$#CV[
.$#C=89&$;+#.$9.UV[
*$"*&0L-&*+#*8&;)0&U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W .$#C=89&$;+4".&99)G.UV[ \
\e .43&2).e )"4.V[
.$#C=89&$;+`24"U)"4.+.2F4004#U.43&2).VV[
.$#C+9&.892?UV[
\
L-&*).29'&9/4*&+#)M34. reLurns a <).)9& descrlblng Lhe Lask. <).)9& has a *$"*&0 meLhod LhaL Lakes a boolean
argumenL, 3$7I".&99)G.I%N)""4"6, and reLurns a value lndlcaLlng wheLher Lhe cancellaLlon aLLempL was successful.
(1hls Lells you only wheLher lL was able Lo dellver Lhe lnLerrupLlon, noL wheLher Lhe Lask deLecLed and acLed on lL.)
When 3$7I".&99)G.I%N)""4"6 ls .9)& and Lhe Lask ls currenLly runnlng ln some Lhread, Lhen LhaL Lhread ls
lnLerrupLed. SeLLlng Lhls argumenL Lo %$0#& means "don'L run Lhls Lask lf lL hasn'L sLarLed yeL", and should be used for
Lasks LhaL are noL deslgned Lo handle lnLerrupLlon.
Slnce you shouldn'L lnLerrupL a Lhread unless you know lLs lnLerrupLlon pollcy, when ls lL Ck Lo call *$"*&0 wlLh an
argumenL of =N)&? 1he Lask execuLlon Lhreads creaLed by Lhe sLandard L-&*).29 lmplemenLaLlons lmplemenL an
lnLerrupLlon pollcy LhaL leLs Lasks be cancelled uslng lnLerrupLlon, so lL ls safe Lo seL 3$7I".&99)G.I%N)""4"6 when
cancelllng Lasks Lhrough Lhelr <).)9&s when Lhey are runnlng ln a sLandard L-&*).29. ?ou should noL lnLerrupL a pool
Lhread dlrecLly when aLLempLlng Lo cancel a Lask, because you won'L know whaL Lask ls runnlng when Lhe lnLerrupL
requesL ls dellvered do Lhls only Lhrough Lhe Lask's <).)9&. 1hls ls yeL anoLher reason Lo code Lasks Lo LreaL
lnLerrupLlon as a cancellaLlon requesL: Lhen Lhey can be cancelled Lhrough Lhelr <).)9&s.
LlsLlng 7.10 shows a verslon of .43&;N)" LhaL submlLs Lhe Lask Lo an L-&*).29'&9/4*& and reLrleves Lhe resulL wlLh a
Llmed <).)9&+6&.. lf 6&. LermlnaLes wlLh a =43&2).L-*&G.42", Lhe Lask ls cancelled vla lLs <).)9&. (1o slmpllfy codlng,
Lhls verslon calls <).)9&+*$"*&0 uncondlLlonally ln a %4"$007 block, Laklng advanLage of Lhe facL LhaL cancelllng a
compleLed Lask has no effecL.) lf Lhe underlylng compuLaLlon Lhrows an excepLlon prlor Lo cancellaLlon, lL ls reLhrown
from .43&;N)", whlch ls Lhe mosL convenlenL way for Lhe caller Lo deal wlLh Lhe excepLlon. LlsLlng 7.10 also lllusLraLes

92 !ava Concurrency ln racLlce
anoLher good pracLlce: cancelllng Lasks whose resulL ls no longer needed. (1hls Lechnlque was also used ln LlsLlng 6.13
on page 128 and LlsLlng 6.16 on page 132.)
2/3./&4 ZA@_A $"&'*??/&4 " J"3< 93/&4 <).)9&A
G)M04* #.$.4* /24; .43&;N)"UN)""$M0& 9e
02"6 .43&2).e =43&!"4. )"4.V
.892?# I".&99)G.&;L-*&G.42" W
<).)9&RST .$#C ] .$#CL-&*+#)M34.U9V[
.97 W
.$#C+6&.U.43&2).e )"4.V[
\ *$.*8 U=43&2).L-*&G.42" &V W
XX .$#C ?400 M& *$"*&00&; M&02?
\ *$.*8 UL-&*).42"L-*&G.42" &V W
XX &-*&G.42" .892?" 4" .$#C[ 9&.892?
.892? 0$)";&9=892?$M0&U&+6&.D$)#&UVV[
\ %4"$007 W
XX E$930&## 4% .$#C $09&$;7 *23G0&.&;
.$#C+*$"*&0U.9)&V[ XX 4".&99)G. 4% 9)""4"6
\
\

When <).)9&+6&. Lhrows I".&99)G.&;L-*&G.42" or =43&2).L-*&G.42" and you know LhaL Lhe resulL ls no longer
needed by Lhe program, cancel Lhe Lask wlLh <).)9&+*$"*&0.

7.1.6. ueallng wlLh nonlnLerrupLlble 8locklng
Many blocklng llbrary meLhods respond Lo lnLerrupLlon by reLurnlng early and Lhrowlng I".&99)G.&;L-*&G.42", whlch
makes lL easler Lo bulld Lasks LhaL are responslve Lo cancellaLlon. Powever, noL all blocklng meLhods or blocklng
mechanlsms are responslve Lo lnLerrupLlon, lf a Lhread ls blocked performlng synchronous sockeL l/C or walLlng Lo
acqulre an lnLrlnslc lock, lnLerrupLlon has no effecL oLher Lhan seLLlng Lhe Lhread's lnLerrupLed sLaLus. We can
someLlmes convlnce Lhreads blocked ln nonlnLerrupLlble acLlvlLles Lo sLop by means slmllar Lo lnLerrupLlon, buL Lhls
requlres greaLer awareness of why Lhe Lhread ls blocked.
Synchronous sockeL l/C ln [ava.lo. 1he common form of blocklng l/C ln server appllcaLlons ls readlng or wrlLlng Lo a
sockeL. unforLunaLely, Lhe 9&$; and ?94.& meLhods ln I"G).'.9&$3 and J).G).'.9&$3 are noL responslve Lo
lnLerrupLlon, buL closlng Lhe underlylng sockeL makes any Lhreads blocked ln 9&$; or ?94.& Lhrow a '2*C&.L-*&G.42".
Synchronous l/C ln [ava.nlo. lnLerrupLlng a Lhread walLlng on an I".&99)G.4M0&D8$""&0 causes lL Lo Lhrow
D02#&;K7I".&99)G.L-*&G.42" and close Lhe channel (and also causes all oLher Lhreads blocked on Lhe channel Lo
Lhrow D02#&;K7I".&99)G.L-*&G.42"). Closlng an I".&99)G.4M0&D8$""&0 causes Lhreads blocked on channel
operaLlons Lo Lhrow 1#7"*892"2)#D02#&L-*&G.42". MosL sLandard D8$""&0s lmplemenL I".&99)G.4M0&D8$""&0.
Asynchronous l/C wlLh SelecLor. lf a Lhread ls blocked ln '&0&*.29+#&0&*. (ln `$/$+"42+*8$""&0#), ?$C&)G causes lL Lo
reLurn premaLurely by Lhrowlng a D02#&;'&0&*.29L-*&G.42".
Lock acqulslLlon. lf a Lhread ls blocked walLlng for an lnLrlnslc lock, Lhere ls noLhlng you can do Lo sLop lL shorL of
ensurlng LhaL lL evenLually acqulres Lhe lock and makes enough progress LhaL you can geL lLs aLLenLlon some oLher way.
Powever, Lhe expllclL 52*C classes offer Lhe 02*CI".&99)G.4M07 meLhod, whlch allows you Lo walL for a lock and sLlll be
responslve Lo lnLerrupLs see ChapLer 13.
N&$;&9=89&$; ln LlsLlng 7.11 shows a Lechnlque for encapsulaLlng nonsLandard cancellaLlon. N&$;&9=89&$; manages a
slngle sockeL connecLlon, readlng synchronously from Lhe sockeL and passlng any daLa recelved Lo G92*&##K)%%&9. 1o
faclllLaLe LermlnaLlng a user connecLlon or shuLLlng down Lhe server, N&$;&9=89&$; overrldes 4".&99)G. Lo boLh dellver
a sLandard lnLerrupL and close Lhe underlylng sockeL, Lhus lnLerrupLlng a N&$;&9=89&$; makes lL sLop whaL lL ls dolng
wheLher lL ls blocked ln 9&$; or ln an lnLerrupLlble blocklng meLhod.
ZA@AZA =&'">3(?"./&4 P%&3."&0")0 $"&'*??"./%& 8/.: ,&?.$#C%29
1he Lechnlque used ln N&$;&9=89&$; Lo encapsulaLe nonsLandard cancellaLlon can be reflned uslng Lhe "&?=$#C<29
hook added Lo =89&$;@220L-&*).29 ln !ava 6. When a D$00$M0& ls submlLLed Lo an L-&*).29'&9/4*&, #)M34. reLurns
a <).)9& LhaL can be used Lo cancel Lhe Lask. 1he "&?=$#C<29 hook ls a facLory meLhod LhaL creaLes Lhe <).)9&
represenLlng Lhe Lask. lL reLurns a N)""$M0&<).)9&, an lnLerface LhaL exLends boLh <).)9& and N)""$M0& (and ls
lmplemenLed by <).)9&=$#C).
CusLomlzlng Lhe Lask <).)9& allows you Lo overrlde <).)9&+*$"*&0. CusLom cancellaLlon code can perform logglng or
gaLher sLaLlsLlcs on cancellaLlon, and can also be used Lo cancel acLlvlLles LhaL are noL responslve Lo lnLerrupLlon.

93 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 198ChapLer 7. CancellaLlon and ShuLdown
N&$;&9=89&$; encapsulaLes cancellaLlon of sockeLuslng Lhreads by overrldlng 4".&99)G., Lhe same can be done for
Lasks by overrldlng <).)9&+*$"*&0.
D$"*&00$M0&=$#C ln LlsLlng 7.12 deflnes a D$"*&00$M0&=$#C lnLerface LhaL exLends D$00$M0& and adds a *$"*&0
meLhod and a "&?=$#C facLory meLhod for consLrucLlng a N)""$M0&<).)9&. D$"*&004"6L-&*).29 exLends
=89&$;@220L-&*).29, and overrldes "&?=$#C<29 Lo leL a D$"*&00$M0&=$#C creaLe lLs own <).)9&.
2/3./&4 ZA@@A =&'">3(?"./&4 P%&3."&0")0 $"&'*??"./%& /& " =89&$; Y+ E#*))/0/&4 I".&99)G.A
G)M04* *0$## N&$;&9=89&$; &-.&";# =89&$; W
G94/$.& %4"$0 '2*C&. #2*C&.[
G94/$.& %4"$0 I"G).'.9&$3 4"[

G)M04* N&$;&9=89&$;U'2*C&. #2*C&.V .892?# IJL-*&G.42" W
.84#+#2*C&. ] #2*C&.[
.84#+4" ] #2*C&.+6&.I"G).'.9&$3UV[
\

G)M04* /24; 4".&99)G.UV W
.97 W
#2*C&.+*02#&UV[
\
*$.*8 UIJL-*&G.42" 46"29&;V W \
%4"$007 W
#)G&9+4".&99)G.UV[
\
\

G)M04* /24; 9)"UV W
.97 W
M7.&fg M)% ] "&? M7.&fK!<'ug[
?840& U.9)&V W
4". *2)". ] 4"+9&$;UM)%V[
4% U*2)". R ZV
M9&$C[
&0#& 4% U*2)". T ZV
G92*&##K)%%&9UM)%e *2)".V[
\
\ *$.*8 UIJL-*&G.42" &V W Xc 1002? .89&$; .2 &-4. cX \
\
\
'2*C&.!#4"6=$#C lmplemenLs D$"*&00$M0&=$#C and deflnes <).)9&+*$"*&0 Lo close Lhe sockeL as well as call
#)G&9+*$"*&0. lf a '2*C&.!#4"6=$#C ls cancelled Lhrough lLs <).)9&, Lhe sockeL ls closed and Lhe execuLlng Lhread ls
lnLerrupLed. 1hls lncreases Lhe Lask's responslveness Lo cancellaLlon: noL only can lL safely call lnLerrupLlble blocklng
meLhods whlle remalnlng responslve Lo cancellaLlon, buL lL can also call blocklng sockeL l/C meLhods.
ZADA C.%>>/&4 " J:)*"0Y"3*0 C*)#/'*
AppllcaLlons commonly creaLe servlces LhaL own Lhreads, such as Lhread pools, and Lhe llfeLlme of Lhese servlces ls
usually longer Lhan LhaL of Lhe meLhod LhaL creaLes Lhem. lf Lhe appllcaLlon ls Lo shuL down gracefully, Lhe Lhreads
owned by Lhese servlces need Lo be LermlnaLed. Slnce Lhere ls no preempLlve way Lo sLop a Lhread, Lhey musL lnsLead
be persuaded Lo shuL down on Lhelr own.
Senslble encapsulaLlon pracLlces dlcLaLe LhaL you should noL manlpulaLe a Lhread lnLerrupL lL, modlfy lLs prlorlLy, eLc.
unless you own lL. 1he Lhread Al has no formal concepL of Lhread ownershlp: a Lhread ls represenLed wlLh a =89&$;
ob[ecL LhaL can be freely shared llke any oLher ob[ecL. Powever, lL makes sense Lo Lhlnk of a Lhread as havlng an owner,
and Lhls ls usually Lhe class LhaL creaLed Lhe Lhread. So a Lhread pool owns lLs worker Lhreads, and lf Lhose Lhreads need
Lo be lnLerrupLed, Lhe Lhread pool should Lake care of lL.
As wlLh any oLher encapsulaLed ob[ecL, Lhread ownershlp ls noL LranslLlve: Lhe appllcaLlon may own Lhe servlce and Lhe
servlce may own Lhe worker Lhreads, buL Lhe appllcaLlon doesn'L own Lhe worker Lhreads and Lherefore should noL
aLLempL Lo sLop Lhem dlrecLly. lnsLead, Lhe servlce should provlde llfecycle meLhods for shuLLlng lLself down LhaL also
shuL down Lhe owned Lhreads, Lhen Lhe appllcaLlon can shuL down Lhe servlce, and Lhe servlce can shuL down Lhe
Lhreads. L-&*).29'&9/4*& provldes Lhe #8).;2?" and #8).;2?",2? meLhods, oLher Lhreadownlng servlces should
provlde a slmllar shuLdown mechanlsm.
rovlde llfecycle meLhods whenever a Lhreadownlng servlce has a llfeLlme longer Lhan LhaL of Lhe meLhod LhaL creaLed
lL.


94 !ava Concurrency ln racLlce
ZADA@A =1"5>?*T F 2%44/&4 C*)#/'*
MosL server appllcaLlons use logglng, whlch can be as slmple as lnserLlng G94".0" sLaLemenLs lnLo Lhe code. SLream
classes llke @94".P94.&9 are Lhreadsafe, so Lhls slmple approach would requlre no expllclL synchronlzaLlon.
[3]
Powever,
as we'll see ln SecLlon 11.6, lnllne logglng can have some performance cosLs ln hlghvolume appllcaLlons. AnoLher
alLernaLlve ls have Lhe 026 call queue Lhe log message for processlng by anoLher Lhread.
[3]
lf you are logglng mulLlple llnes as parL of a slngle log message, you may need Lo use addlLlonal cllenLslde locklng Lo prevenL undeslrable
lnLerleavlng of ouLpuL from mulLlple Lhreads. lf Lwo Lhreads logged mulLlllne sLack Lraces Lo Lhe same sLream wlLh one G94".0" call per llne, Lhe
resulLs would be lnLerleaved unpredlcLably, and could easlly look llke one large buL meanlngless sLack Lrace.
2/3./&4 ZA@DA =&'">3(?"./&4 P%&3."&0")0 $"&'*??"./%& /& " J"3< 8/.: ,&?.$#C%29A
G)M04* 4".&9%$*& D$"*&00$M0&=$#CR=T &-.&";# D$00$M0&R=T W
/24; *$"*&0UV[
N)""$M0&<).)9&R=T "&?=$#CUV[
\

b=89&$;'$%&
G)M04* *0$## D$"*&004"6L-&*).29 &-.&";# =89&$;@220L-&*).29 W
+++
G92.&*.&;R=T N)""$M0&<).)9&R=T "&?=$#C<29UD$00$M0&R=T *$00$M0&V W
4% U*$00$M0& 4"#.$"*&2% D$"*&00$M0&=$#CV
9&.)9" UUD$"*&00$M0&=$#CR=TV *$00$M0&V+"&?=$#CUV[
&0#&
9&.)9" #)G&9+"&?=$#C<29U*$00$M0&V[
\
\

G)M04* $M#.9$*. *0$## '2*C&.!#4"6=$#CR=T
43G0&3&".# D$"*&00$M0&=$#CR=T W
bH)$9;&;K7Ud.84#dV G94/$.& '2*C&. #2*C&.[

G92.&*.&; #7"*892"4:&; /24; #&.'2*C&.U'2*C&. #V W #2*C&. ] #[ \

G)M04* #7"*892"4:&; /24; *$"*&0UV W
.97 W
4% U#2*C&. Y] ")00V
#2*C&.+*02#&UV[
\ *$.*8 UIJL-*&G.42" 46"29&;V W \
\

G)M04* N)""$M0&<).)9&R=T "&?=$#CUV W
9&.)9" "&? <).)9&=$#CR=TU.84#V W
G)M04* M220&$" *$"*&0UM220&$" 3$7I".&99)G.I%N)""4"6V W
.97 W
'2*C&.!#4"6=$#C+.84#+*$"*&0UV[
\ %4"$007 W
9&.)9" #)G&9+*$"*&0U3$7I".&99)G.I%N)""4"6V[
\
\
\[
\
\
526P94.&9 ln LlsLlng 7.13 shows a slmple logglng servlce ln whlch Lhe logglng acLlvlLy ls moved Lo a separaLe logger
Lhread. lnsLead of havlng Lhe Lhread LhaL produces Lhe message wrlLe lL dlrecLly Lo Lhe ouLpuL sLream, 526P94.&9 hands
lL off Lo Lhe logger Lhread vla a K02*C4"6O)&)& and Lhe logger Lhread wrlLes lL ouL. 1hls ls a mulLlpleproducer, slngle
consumer deslgn: any acLlvlLy calllng 026 ls acLlng as a producer, and Lhe background logger Lhread ls Lhe consumer. lf
Lhe logger Lhread falls behlnd, Lhe K02*C4"6O)&)& evenLually blocks Lhe producers unLll Lhe logger Lhread caLches up.

93 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 198ChapLer 7. CancellaLlon and ShuLdown
2/3./&4 ZA@LA -)%0('*)$%&3(5*) 2%44/&4 C*)#/'* 8/.: P% C:(.0%8& C(>>%).A

G)M04* *0$## 526P94.&9 W
G94/$.& %4"$0 K02*C4"6O)&)&R'.94"6T ()&)&[
G94/$.& %4"$0 5266&9=89&$; 0266&9[

G)M04* 526P94.&9UP94.&9 ?94.&9V W
.84#+()&)& ] "&? 54"C&;K02*C4"6O)&)&R'.94"6TUD1@1DI=rV[
.84#+0266&9 ] "&? 5266&9=89&$;U?94.&9V[
\

G)M04* /24; #.$9.UV W 0266&9+#.$9.UV[ \

G)M04* /24; 026U'.94"6 3#6V .892?# I".&99)G.&;L-*&G.42" W
()&)&+G).U3#6V[
\

G94/$.& *0$## 5266&9=89&$; &-.&";# =89&$; W
G94/$.& %4"$0 @94".P94.&9 ?94.&9[
+++
G)M04* /24; 9)"UV W
.97 W
?840& U.9)&V
?94.&9+G94".0"U()&)&+.$C&UVV[
\ *$.*8UI".&99)G.&;L-*&G.42" 46"29&;V W
\ %4"$007 W
?94.&9+*02#&UV[
\
\
\
\
lor a servlce llke 526P94.&9 Lo be useful ln producLlon, we need a way Lo LermlnaLe Lhe logger Lhread so lL does noL
prevenL Lhe !vM from shuLLlng down normally. SLopplng Lhe logger Lhread ls easy enough, slnce lL repeaLedly calls .$C&,
whlch ls responslve Lo lnLerrupLlon, lf Lhe logger Lhread ls modlfled Lo exlL on caLchlng I".&99)G.&;L-*&G.42", Lhen
lnLerrupLlng Lhe logger Lhread sLops Lhe servlce.
Powever, slmply maklng Lhe logger Lhread exlL ls noL a very saLlfylng shuLdown mechanlsm. Such an abrupL shuLdown
dlscards log messages LhaL mlghL be walLlng Lo be wrlLLen Lo Lhe log, buL, more lmporLanLly, Lhreads blocked ln 026
because Lhe queue ls full wlll never become unblocked. Cancelllng a producerconsumer acLlvlLy requlres cancelllng boLh
Lhe producers and Lhe consumers. lnLerrupLlng Lhe logger Lhread deals wlLh Lhe consumer, buL because Lhe producers ln
Lhls case are noL dedlcaLed Lhreads, cancelllng Lhem ls harder.
AnoLher approach Lo shuLLlng down 526P94.&9 would be Lo seL a "shuLdown requesLed" flag Lo prevenL furLher
messages from belng submlLLed, as shown ln LlsLlng 7.14. 1he consumer could Lhen draln Lhe queue upon belng noLlfled
LhaL shuLdown has been requesLed, wrlLlng ouL any pendlng messages and unblocklng any producers blocked ln 026.
Powever, Lhls approach has race condlLlons LhaL make lL unrellable. 1he lmplemenLaLlon of 026 ls a checkLhenacL
sequence: producers could observe LhaL Lhe servlce has noL yeL been shuL down buL sLlll queue messages afLer Lhe
shuLdown, agaln wlLh Lhe rlsk LhaL Lhe producer mlghL geL blocked ln 026 and never become unblocked. 1here are Lrlcks
LhaL reduce Lhe llkellhood of Lhls (llke havlng Lhe consumer walL several seconds before declarlng Lhe queue dralned),
buL Lhese do noL change Lhe fundamenLal problem, merely Lhe llkellhood LhaL lL wlll cause a fallure.
2/3./&4 ZA@MA 9&)*?/"Y?* B"+ .% F00 C:(.0%8& C(>>%). .% .:* 2%44/&4 C*)#/'*A

G)M04* /24; 026U'.94"6 3#6V .892?# I".&99)G.&;L-*&G.42" W
4% UY#8).;2?"N&()&#.&;V
()&)&+G).U3#6V[
&0#&
.892? "&? I00&6$0'.$.&L-*&G.42"Ud0266&9 4# #8). ;2?"dV[
\
1he way Lo provlde rellable shuLdown for 526P94.&9 ls Lo flx Lhe race condlLlon, whlch means maklng Lhe submlsslon of
a new log message aLomlc. 8uL we don'L wanL Lo hold a lock whlle Lrylng Lo enqueue Lhe message, slnce G). could block.
lnsLead, we can aLomlcally check for shuLdown and condlLlonally lncremenL a counLer Lo "reserve" Lhe rlghL Lo submlL a
message, as shown ln 526'&9/4*& ln LlsLlng 7.13.

96 !ava Concurrency ln racLlce
ZADADA L-&*).29'&9/4*& C:(.0%8&
ln SecLlon 6.2.4, we saw LhaL L-&*).29'&9/4*& offers Lwo ways Lo shuL down: graceful shuLdown wlLh #8).;2?", and
abrupL shuLdown wlLh #8).;2?",2?. ln an abrupL shuLdown, #8).;2?",2? reLurns Lhe llsL of Lasks LhaL had noL yeL
sLarLed afLer aLLempLlng Lo cancel all acLlvely execuLlng Lasks.
2/3./&4 ZA@VA F00/&4 N*?/"Y?* $"&'*??"./%& .% 526P94.&9A
G)M04* *0$## 526'&9/4*& W
G94/$.& %4"$0 K02*C4"6O)&)&R'.94"6T ()&)&[
G94/$.& %4"$0 5266&9=89&$; 0266&9=89&$;[
G94/$.& %4"$0 @94".P94.&9 ?94.&9[
bH)$9;&;K7Ud.84#dV G94/$.& M220&$" 4#'8).;2?"[
bH)$9;&;K7Ud.84#dV G94/$.& 4". 9&#&9/$.42"#[

G)M04* /24; #.$9.UV W 0266&9=89&$;+#.$9.UV[ \

G)M04* /24; #.2GUV W
#7"*892"4:&; U.84#V W 4#'8).;2?" ] .9)&[ \
0266&9=89&$;+4".&99)G.UV[
\

G)M04* /24; 026U'.94"6 3#6V .892?# I".&99)G.&;L-*&G.42" W
#7"*892"4:&; U.84#V W
4% U4#'8).;2?"V
.892? "&? I00&6$0'.$.&L-*&G.42"U+++V[
__9&#&9/$.42"#[
\
()&)&+G).U3#6V[
\

G94/$.& *0$## 5266&9=89&$; &-.&";# =89&$; W
G)M04* /24; 9)"UV W
.97 W
?840& U.9)&V W
.97 W
#7"*892"4:&; U.84#V W
4% U4#'8).;2?" qq 9&#&9/$.42"# ]] ZV
M9&$C[
\
'.94"6 3#6 ] ()&)&+.$C&UV[
#7"*892"4:&; U.84#V W aa9&#&9/$.42"#[ \
?94.&9+G94".0"U3#6V[
\ *$.*8 UI".&99)G.&;L-*&G.42" &V W Xc 9&.97 cX \
\
\ %4"$007 W
?94.&9+*02#&UV[
\
\
\
\
1he Lwo dlfferenL LermlnaLlon opLlons offer a Lradeoff beLween safeLy and responslveness: abrupL LermlnaLlon ls fasLer
buL rlskler because Lasks may be lnLerrupLed ln Lhe mlddle of execuLlon, and normal LermlnaLlon ls slower buL safer
because Lhe L-&*).29'&9/4*& does noL shuL down unLll all queued Lasks are processed. CLher Lhreadownlng servlces
should conslder provldlng a slmllar cholce of shuLdown modes.
Slmple programs can geL away wlLh sLarLlng and shuLLlng down a global L-&*).29'&9/4*& from 3$4". More
sophlsLlcaLed programs are llkely Lo encapsulaLe an L-&*).29'&9/4*& behlnd a hlgherlevel servlce LhaL provldes lLs
own llfecycle meLhods, such as Lhe varlanL of 526'&9/4*& ln LlsLlng 7.16 LhaL delegaLes Lo an L-&*).29'&9/4*& lnsLead
of managlng lLs own Lhreads. LncapsulaLlng an L-&*).29'&9/4*& exLends Lhe ownershlp chaln from appllcaLlon Lo
servlce Lo Lhread by addlng anoLher llnk, each member of Lhe chaln manages Lhe llfecycle of Lhe servlces or Lhreads lL
owns.

97 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 198ChapLer 7. CancellaLlon and ShuLdown
2/3./&4 ZA@WA 2%44/&4 C*)#/'* .:". 93*3 "& L-&*).29'&9/4*&A
G)M04* *0$## 526'&9/4*& W
G94/$.& %4"$0 L-&*).29'&9/4*& &-&* ] "&?'4"60&=89&$;L-&*).29UV[
+++
G)M04* /24; #.$9.UV W \

G)M04* /24; #.2GUV .892?# I".&99)G.&;L-*&G.42" W
.97 W
&-&*+#8).;2?"UV[
&-&*+$?$4.=&934"$.42"U=IFLJ!=e !,I=V[
\ %4"$007 W
?94.&9+*02#&UV[
\
\
G)M04* /24; 026U'.94"6 3#6V W
.97 W
&-&*+&-&*).&U"&? P94.&=$#CU3#6VV[
\ *$.*8 UN&`&*.&;L-&*).42"L-*&G.42" 46"29&;V W \
\
\
ZADALA -%/3%& -/??3
AnoLher way Lo convlnce a producerconsumer servlce Lo shuL down ls wlLh a polson plll: a recognlzable ob[ecL placed
on Lhe queue LhaL means "when you geL Lhls, sLop." WlLh a lllC queue, polson pllls ensure LhaL consumers flnlsh Lhe
work on Lhelr queue before shuLLlng down, slnce any work submlLLed prlor Lo submlLLlng Lhe polson plll wlll be reLrleved
before Lhe plll, producers should noL submlL any work afLer puLLlng a polson plll on Lhe queue. I";&-4"6'&9/4*& ln
LlsLlngs 7.17, 7.18, and 7.19 shows a slngleproducer, slngleconsumer verslon of Lhe deskLop search example from
LlsLlng 3.8 on page 91 LhaL uses a polson plll Lo shuL down Lhe servlce.
2/3./&4 ZA@ZA C:(.0%8& 8/.: -%/3%& -/??A
G)M04* *0$## I";&-4"6'&9/4*& W
G94/$.& #.$.4* %4"$0 <40& @JI'J, ] "&? <40&UddV[
G94/$.& %4"$0 I";&-&9=89&$; *2"#)3&9 ] "&? I";&-&9=89&$;UV[
G94/$.& %4"$0 D9$?0&9=89&$; G92;)*&9 ] "&? D9$?0&9=89&$;UV[
G94/$.& %4"$0 K02*C4"6O)&)&R<40&T ()&)&[
G94/$.& %4"$0 <40&<40.&9 %40&<40.&9[
G94/$.& %4"$0 <40& 922.[

*0$## D9$?0&9=89&$; &-.&";# =89&$; W Xc 54#.4"6 v+^t cX \
*0$## I";&-&9=89&$; &-.&";# =89&$; W Xc 54#.4"6 v+^w cX \

G)M04* /24; #.$9.UV W
G92;)*&9+#.$9.UV[
*2"#)3&9+#.$9.UV[
\

G)M04* /24; #.2GUV W G92;)*&9+4".&99)G.UV[ \

G)M04* /24; $?$4.=&934"$.42"UV .892?# I".&99)G.&;L-*&G.42" W
*2"#)3&9+`24"UV[
\
\
olson pllls work only when Lhe number of producers and consumers ls known. 1he approach ln I";&-4"6'&9/4*& can
be exLended LomulLlple producers by havlng each producer place a plll on Lhe queue and havlng Lhe consumer sLop only
when lL recelves n
producers
pllls. lL can be exLended Lo mulLlple consumers by havlng each producer place n
consumers
pllls on
Lhe queue, Lhough Lhls can geL unwleldy wlLh large numbers of producers and consumers. olson pllls work rellably only
wlLh unbounded queues.
ZADAMA =1"5>?*T F E&*3:%. =1*'(./%& C*)#/'*
lf a meLhod needs Lo process a baLch of Lasks and does noL reLurn unLll all Lhe Lasks are flnlshed, lL can slmpllfy servlce
llfecycle managemenL by uslng a prlvaLe L-&*).29 whose llfeLlme ls bounded by LhaL meLhod. (1he 4"/2C&100 and
4"/2C&1"7 meLhods can ofLen be useful ln such slLuaLlons.)
1he *8&*CF$40 meLhod ln LlsLlng 7.20 checks for new mall ln parallel on a number of hosLs. lL creaLes a prlvaLe execuLor
and submlLs a Lask for each hosL: lL Lhen shuLs down Lhe execuLor and walLs for LermlnaLlon, whlch occurs when all Lhe
mallchecklng Lasks have compleLed.
[4]

[4] 1he reason an 1.234*K220&$" ls used lnsLead of a /20$.40& M220&$" ls LhaL ln order Lo access Lhe 8$#,&?F$40 flag from Lhe lnner
N)""$M0&, lL would have Lo be %4"$0, whlch would preclude modlfylng lL.

98 !ava Concurrency ln racLlce
2/3./&4 ZA@\A -)%0('*) J:)*"0 6%) I";&-4"6'&9/4*&A
G)M04* *0$## D9$?0&9=89&$; &-.&";# =89&$; W
G)M04* /24; 9)"UV W
.97 W
*9$?0U922.V[
\ *$.*8 UI".&99)G.&;L-*&G.42" &V W Xc %$00 .892)68 cX \
%4"$007 W
?840& U.9)&V W
.97 W
()&)&+G).U@JI'J,V[
M9&$C[
\ *$.*8 UI".&99)G.&;L-*&G.42" &^V W Xc 9&.97 cX \
\
\
\

G94/$.& /24; *9$?0U<40& 922.V .892?# I".&99)G.&;L-*&G.42" W
+++
\
\
2/3./&4 ZA@^A $%&3(5*) J:)*"0 6%) I";&-4"6'&9/4*&A
G)M04* *0$## I";&-&9=89&$; &-.&";# =89&$; W
G)M04* /24; 9)"UV W
.97 W
?840& U.9)&V W
<40& %40& ] ()&)&+.$C&UV[
4% U%40& ]] @JI'J,V
M9&$C[
&0#&
4";&-<40&U%40&V[
\
\ *$.*8 UI".&99)G.&;L-*&G.42" *2"#)3&;V W \
\
\
2/3./&4 ZAD_A 93/&4 " -)/#".* L-&*).29 B:%3* 2/6*./5* /3 ;%(&0*0 Y+ " K*.:%0 $"??A
M220&$" *8&*CF$40U'&.R'.94"6T 82#.#e 02"6 .43&2).e =43&!"4. )"4.V
.892?# I".&99)G.&;L-*&G.42" W
L-&*).29'&9/4*& &-&* ] L-&*).29#+"&?D$*8&;=89&$;@220UV[
%4"$0 1.234*K220&$" 8$#,&?F$40 ] "&? 1.234*K220&$"U%$0#&V[
.97 W
%29 U%4"$0 '.94"6 82#. h 82#.#V
&-&*+&-&*).&U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W
4% U*8&*CF$40U82#.VV
8$#,&?F$40+#&.U.9)&V[
\
\V[
\ %4"$007 W
&-&*+#8).;2?"UV[
&-&*+$?$4.=&934"$.42"U.43&2).e )"4.V[
\
9&.)9" 8$#,&?F$40+6&.UV[
\
ZADAVA 2/5/."./%&3 %6 '8).;2?""2?
When an L-&*).29'&9/4*& ls shuL down abrupLly wlLh #8).;2?",2?, lL aLLempLs Lo cancel Lhe Lasks currenLly ln
progress and reLurns a llsL of Lasks LhaL were submlLLed buL never sLarLed so LhaL Lhey can be logged or saved for laLer
processlng.
[3]

[3] 1he N)""$M0& ob[ecLs reLurned by #8).;2?",2? mlghL noL be Lhe same ob[ecLs LhaL were submlLLed Lo Lhe L-&*).29'&9/4*&: Lhey
mlghL be wrapped lnsLances of Lhe submlLLed Lasks.
Powever, Lhere ls no general way Lo flnd ouL whlch Lasks sLarLed buL dld noL compleLe. 1hls means LhaL Lhere ls no way
of knowlng Lhe sLaLe of Lhe Lasks ln progress aL shuLdown Llme unless Lhe Lasks Lhemselves perform some sorL of
checkpolnLlng. 1o know whlch Lasks have noL compleLed, you need Lo know noL only whlch Lasks dldn'L sLarL, buL also
whlch Lasks were ln progress when Lhe execuLor was shuL down.
[6]

[6] unforLunaLely, Lhere ls no shuLdown opLlon ln whlch Lasks noL yeL sLarLed are reLurned Lo Lhe caller buL Lasks ln progress are allowed Lo
compleLe, such an opLlon would ellmlnaLe Lhls uncerLaln lnLermedlaLe sLaLe.
=N$*C4"6L-&*).29 ln LlsLlng 7.21 shows a Lechnlque for deLermlnlng whlch Lasks were ln progress aL shuLdown Llme. 8y
encapsulaLlng an L-&*).29'&9/4*& and lnsLrumenLlng &-&*).& (and slmllarly #)M34., noL shown) Lo remember whlch
Lasks were cancelled afLer shuLdown, .9$*C4"6L-&*).29 can ldenLlfy whlch Lasks sLarLed buL dld noL compleLe
normally. AfLer Lhe execuLor LermlnaLes, 6&.D$"*&00&;=$#C# reLurns Lhe llsL of cancelled Lasks. ln order for Lhls

99 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 198ChapLer 7. CancellaLlon and ShuLdown
Lechnlque Lo work, Lhe Lasks musL preserve Lhe Lhread's lnLerrupLed sLaLus when Lhey reLurn, whlch well behaved Lasks
wlll do anyway.
2/3./&4 ZAD@A L-&*).29'&9/4*& .:". c**>3 J)"'< %6 $"&'*??*0 J"3<3 F6.*) C:(.0%8&A
G)M04* *0$## =9$*C4"6L-&*).29 &-.&";# 1M#.9$*.L-&*).29'&9/4*& W
G94/$.& %4"$0 L-&*).29'&9/4*& &-&*[
G94/$.& %4"$0 '&.RN)""$M0&T .$#C#D$"*&00&;1.'8).;2?" ]
D200&*.42"#+#7"*892"4:&;'&.U"&? E$#8'&.RN)""$M0&TUVV[
+++
G)M04* 54#.RN)""$M0&T 6&.D$"*&00&;=$#C#UV W
4% UY&-&*+4#=&934"$.&;UVV
.892? "&? I00&6$0'.$.&L-*&G.42"U+++V[
9&.)9" "&? 199$754#.RN)""$M0&TU.$#C#D$"*&00&;1.'8).;2?"V[
\

G)M04* /24; &-&*).&U%4"$0 N)""$M0& 9)""$M0&V W
&-&*+&-&*).&U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W
.97 W
9)""$M0&+9)"UV[
\ %4"$007 W
4% U4#'8).;2?"UV
qq =89&$;+*)99&".=89&$;UV+4#I".&99)G.&;UVV
.$#C#D$"*&00&;1.'8).;2?"+$;;U9)""$M0&V[
\
\
\V[
\

XX ;&0&6$.& 2.8&9 L-&*).29'&9/4*& 3&.82;# .2 &-&*
\
P&MD9$?0&9 ln LlsLlng 7.22 shows an appllcaLlon of .9$*C4"6L-&*).29. 1he work of a web crawler ls ofLen unbounded,
so lf a crawler musL be shuL down we mlghL wanL Lo save lLs sLaLe so lL can be resLarLed laLer. D9$?0=$#C provldes a
6&.@$6& meLhod LhaL ldenLlfles whaL page lL ls worklng on. When Lhe crawler ls shuL down, boLh Lhe Lasks LhaL dld noL
sLarL and Lhose LhaL were cancelled are scanned and Lhelr u8Ls recorded, so LhaL pagecrawllng Lasks for Lhose u8Ls can
be added Lo Lhe queue when Lhe crawler resLarLs.
=N$*C4"6L-&*).29 has an unavoldable race condlLlon LhaL could make lL yleld false poslLlves: Lasks LhaL are ldenLlfled as
cancelled buL acLually compleLed. 1hls arlses because Lhe Lhread pool could be shuL down beLween when Lhe lasL
lnsLrucLlon of Lhe Lask execuLes and when Lhe pool records Lhe Lask as compleLe. 1hls ls noL a problem lf Lasks are
ldempoLenL (lf performlng Lhem Lwlce has Lhe same effecL as performlng Lhem once), as Lhey Lyplcally are ln a web
crawler. CLherwlse, Lhe appllcaLlon reLrlevlng Lhe cancelled Lasks musL be aware of Lhls rlsk and be prepared Lo deal
wlLh false poslLlves.

100 !ava Concurrency ln racLlce
2/3./&4 ZADDA 93/&4 =N$*C4"6L-&*).29'&9/4*& .% C"#* 9&6/&/3:*0 J"3<3 6%) 2".*) =1*'(./%&A
G)M04* $M#.9$*. *0$## P&MD9$?0&9 W
G94/$.& /20$.40& =9$*C4"6L-&*).29 &-&*[
bH)$9;&;K7Ud.84#dV
G94/$.& %4"$0 '&.R!N5T )90#=2D9$?0 ] "&? E$#8'&.R!N5TUV[
+++
G)M04* #7"*892"4:&; /24; #.$9.UV W
&-&* ] "&? =9$*C4"6L-&*).29U
L-&*).29#+"&?D$*8&;=89&$;@220UVV[
%29 U!N5 )90 h )90#=2D9$?0V #)M34.D9$?0=$#CU)90V[
)90#=2D9$?0+*0&$9UV[
\

G)M04* #7"*892"4:&; /24; #.2GUV .892?# I".&99)G.&;L-*&G.42" W
.97 W
#$/&!"*9$?0&;U&-&*+#8).;2?",2?UVV[
4% U&-&*+$?$4.=&934"$.42"U=IFLJ!=e !,I=VV
#$/&!"*9$?0&;U&-&*+6&.D$"*&00&;=$#C#UVV[
\ %4"$007 W
&-&* ] ")00[
\
\

G92.&*.&; $M#.9$*. 54#.R!N5T G92*&##@$6&U!N5 )90V[

G94/$.& /24; #$/&!"*9$?0&;U54#.RN)""$M0&T )"*9$?0&;V W
%29 UN)""$M0& .$#C h )"*9$?0&;V
)90#=2D9$?0+$;;UUUD9$?0=$#CV .$#CV+6&.@$6&UVV[
\
G94/$.& /24; #)M34.D9$?0=$#CU!N5 )V W
&-&*+&-&*).&U"&? D9$?0=$#CU)VV[
\
G94/$.& *0$## D9$?0=$#C 43G0&3&".# N)""$M0& W
G94/$.& %4"$0 !N5 )90[
+++
G)M04* /24; 9)"UV W
%29 U!N5 04"C h G92*&##@$6&U)90VV W
4% U=89&$;+*)99&".=89&$;UV+4#I".&99)G.&;UVV
9&.)9"[
#)M34.D9$?0=$#CU04"CV[
\
\
G)M04* !N5 6&.@$6&UV W 9&.)9" )90[ \
\
\
ZALA 7"&0?/&4 FY&%)5"? J:)*"0 J*)5/&"./%&
lL ls obvlous when a slngleLhreaded console appllcaLlon LermlnaLes due Lo an uncaughL excepLlonLhe program sLops
runnlng and produces a sLack Lrace LhaL ls very dlfferenL from Lyplcal program ouLpuL. lallure of a Lhread ln a concurrenL
appllcaLlon ls noL always so obvlous. 1he sLack Lrace may be prlnLed on Lhe console, buL no one may be waLchlng Lhe
console. Also, when a Lhread falls, Lhe appllcaLlon may appear Lo conLlnue Lo work, so lLs fallure could go unnoLlced.
lorLunaLely, Lhere are means of boLh deLecLlng and prevenLlng Lhreads from "leaklng" from an appllcaLlon.
1he leadlng cause of premaLure Lhread deaLh ls N)".43&L-*&G.42". 8ecause Lhese excepLlons lndlcaLe a programmlng
error or oLher unrecoverable problem, Lhey are generally noL caughL. lnsLead Lhey propagaLe all Lhe way up Lhe sLack, aL
whlch polnL Lhe defaulL behavlor ls Lo prlnL a sLack Lrace on Lhe console and leL Lhe Lhread LermlnaLe.
1he consequences of abnormal Lhread deaLh range from benlgn Lo dlsasLrous, dependlng on Lhe Lhread's role ln Lhe
appllcaLlon. Loslng a Lhread from a Lhread pool can have performance consequences, buL an appllcaLlon LhaL runs well
wlLh a 30Lhread pool wlll probably run flne wlLh a 49Lhread pool Loo. 8uL loslng Lhe evenL dlspaLch Lhread ln a Cul
appllcaLlon would be qulLe noLlceable Lhe appllcaLlon would sLop processlng evenLs and Lhe Cul would freeze.
J).J%=43& on 124 showed a serlous consequence of Lhread leakage: Lhe servlce represenLed by Lhe =43&9 ls
permanenLly ouL of commlsslon.
!usL abouL any code can Lhrow a N)".43&L-*&G.42". Whenever you call anoLher meLhod, you are Laklng a leap of falLh
LhaL lL wlll reLurn normally or Lhrow one of Lhe checked excepLlons lLs slgnaLure declares. 1he less famlllar you are wlLh
Lhe code belng called, Lhe more skepLlcal you should be abouL lLs behavlor.
1askprocesslng Lhreads such as Lhe worker Lhreads ln a Lhread pool or Lhe Swlng evenL dlspaLch Lhread spend Lhelr
whole llfe calllng unknown code Lhrough an absLracLlon barrler llke N)""$M0&, and Lhese Lhreads should be very
skepLlcal LhaL Lhe code Lhey call wlll be well behaved. lL would be very bad lf a servlce llke Lhe Swlng evenL Lhread falled
[usL because some poorly wrlLLen evenL handler Lhrew a ,)00@24".&9L-*&G.42". Accordlngly, Lhese faclllLles should call
Lasks wlLhln a .97a*$.*8 block LhaL caLches unchecked excepLlons, or wlLhln a .97a%4"$007 block Lo ensure LhaL lf Lhe

101 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 198ChapLer 7. CancellaLlon and ShuLdown
Lhread exlLs abnormally Lhe framework ls lnformed of Lhls and can Lake correcLlve acLlon. 1hls ls one of Lhe few Llmes
when you mlghL wanL Lo conslder caLchlng N)".43&L-*&G.42"when you are calllng unknown, unLrusLed code Lhrough
an absLracLlon such as N)""$M0&.
[7]

[7] 1here ls some conLroversy over Lhe safeLy of Lhls Lechnlque, when a Lhread Lhrows an unchecked excepLlon, Lhe enLlre appllcaLlon may posslbly
be compromlsed. 8uL Lhe alLernaLlve shuLLlng down Lhe enLlre appllcaLlon ls usually noL pracLlcal.
LlsLlng 7.23 lllusLraLes a way Lo sLrucLure a worker Lhread wlLhln a Lhread pool. lf a Lask Lhrows an unchecked excepLlon,
lL allows Lhe Lhread Lo dle, buL noL before noLlfylng Lhe framework LhaL Lhe Lhread has dled. 1he framework may Lhen
replace Lhe worker Lhread wlLh a new Lhread, or may choose noL Lo because Lhe Lhread pool ls belng shuL down or Lhere
are already enough worker Lhreads Lo meeL currenL demand. =89&$;@220L-&*).29 and Swlng use Lhls Lechnlque Lo
ensure LhaL a poorly behaved Lask doesn'L prevenL subsequenL Lasks from execuLlng. lf you are wrlLlng a worker Lhread
class LhaL execuLes submlLLed Lasks, or calllng unLrusLed exLernal code (such as dynamlcally loaded pluglns), use one of
Lhese approaches Lo prevenL a poorly wrlLLen Lask or plugln from Laklng down Lhe Lhread LhaL happens Lo call lL.
2/3./&4 ZADLA J+>/'"? J:)*"0>%%? B%)<*) J:)*"0 C.)('.()*A
G)M04* /24; 9)"UV W
=892?$M0& .892?" ] ")00[
.97 W
?840& UY4#I".&99)G.&;UVV
9)"=$#CU6&.=$#C<923P29CO)&)&UVV[
\ *$.*8 U=892?$M0& &V W
.892?" ] &[
\ %4"$007 W
.89&$;L-4.&;U.84#e .892?"V[
\
\
ZALA@A 9&'"(4:. =1'*>./%& 7"&0?*)3
1he prevlous secLlon offered a proacLlve approach Lo Lhe problem of unchecked excepLlons. 1he 1hread Al also
provldes Lhe !"*$)68.L-*&G.42"E$";0&9 faclllLy, whlch leLs you deLecL when a Lhread dles due Lo an uncaughL
excepLlon. 1he Lwo approaches are complemenLary: Laken LogeLher, Lhey provlde defenselndepLh agalnsL Lhread
leakage.
When a Lhread exlLs due Lo an uncaughL excepLlon, Lhe !vM reporLs Lhls evenL Lo an appllcaLlonprovlded
!"*$)68.L-*&G.42"E$";0&9 (see LlsLlng 7.24), lf no handler exlsLs, Lhe defaulL behavlor ls Lo prlnL Lhe sLack Lrace Lo
'7#.&3+&99.
[8]

[8] 8efore !ava 3.0, Lhe only way Lo conLrol Lhe !"*$)68.L-*&G.42"E$";0&9 was by subclasslng =89&$;H92)G. ln !ava 3.0 and laLer, you
can seL an !"*$)68.L-*&G.42"E$";0&9 on a perLhread basls wlLh =89&$;+#&.!"*$)68.L-*&G.42"E$";0&9, and can also seL Lhe
defaulL !"*$)68.L-*&G.42"E$";0&9 wlLh =89&$;+#&.A&%$)0.!"*$)68.L-*&G.42"E$";0&9. Powever, only one of Lhese
handlers ls calledflrsL Lhe !vM looks for a perLhread handler, Lhen for a =89&$;H92)G handler. 1he defaulL handler lmplemenLaLlon ln
=89&$;H92)G delegaLes Lo lLs parenL Lhread group, and so on up Lhe chaln unLll one of Lhe =89&$;H92)G handlers deals wlLh Lhe uncaughL
excepLlon or lL bubbles up Lo Lhe Loplevel Lhread group. 1he Loplevel Lhread group handler delegaLes Lo Lhe defaulL sysLem handler (lf one exlsLs,
Lhe defaulL ls none) and oLherwlse prlnLs Lhe sLack Lrace Lo Lhe console.
2/3./&4 ZADMA !"*$)68.L-*&G.42"E$";0&9 ,&.*)6"'*A
G)M04* 4".&9%$*& !"*$)68.L-*&G.42"E$";0&9 W
/24; )"*$)68.L-*&G.42"U=89&$; .e =892?$M0& &V[
\
WhaL Lhe handler should do wlLh an uncaughL excepLlon depends on your quallLyofservlce requlremenLs. 1he mosL
common response ls Lo wrlLe an error message and sLack Lrace Lo Lhe appllcaLlon log, as shown ln LlsLlng 7.23. Pandlers
can also Lake more dlrecL acLlon, such as Lrylng Lo resLarL Lhe Lhread, shuLLlng down Lhe appllcaLlon, paglng an operaLor,
or oLher correcLlve or dlagnosLlc acLlon.
2/3./&4 ZADVA !"*$)68.L-*&G.42"E$";0&9 .:". 2%43 .:* =1'*>./%&A
G)M04* *0$## !LE5266&9 43G0&3&".# =89&$;+!"*$)68.L-*&G.42"E$";0&9 W
G)M04* /24; )"*$)68.L-*&G.42"U=89&$; .e =892?$M0& &V W
5266&9 0266&9 ] 5266&9+6&.1"2"732)#5266&9UV[
0266&9+026U5&/&0+'LBLNLe
d=89&$; .&934"$.&; ?4.8 &-*&G.42"h d _ .+6&.,$3&UVe
&V[
\
\

ln longrunnlng appllcaLlons, always use uncaughL excepLlon handlers for all Lhreads LhaL aL leasL log Lhe excepLlon.

102 !ava Concurrency ln racLlce
1o seL an !"*$)68.L-*&G.42"E$";0&9 for pool Lhreads, provlde a =89&$;<$*.297 Lo Lhe =89&$;@220L-&*).29
consLrucLor. (As wlLh all Lhread manlpulaLlon, only Lhe Lhread's owner should change lLs !"*$)68.L-*&G.42"E$";0&9.)
1he sLandard Lhread pools allow an uncaughL Lask excepLlon Lo LermlnaLe Lhe pool Lhread, buL use a .97a%4"$007 block
Lo be noLlfled when Lhls happens so Lhe Lhread can be replaced. WlLhouL an uncaughL excepLlon handler or oLher fallure
noLlflcaLlon mechanlsm, Lasks can appear Lo fall sllenLly, whlch can be very confuslng. lf you wanL Lo be noLlfled when a
Lask falls due Lo an excepLlon so LhaL you can Lake some Laskspeclflc recovery acLlon, elLher wrap Lhe Lask wlLh a
N)""$M0& or D$00$M0& LhaL caLches Lhe excepLlon or overrlde Lhe $%.&9L-&*).& hook ln =89&$;@220L-&*).29.
SomewhaL confuslngly, excepLlons Lhrown from Lasks make lL Lo Lhe uncaughL excepLlon handler only for Lasks
submlLLed wlLh &-&*).&, for Lasks submlLLed wlLh #)M34., any Lhrown excepLlon, checked or noL, ls consldered Lo be
parL of Lhe Lask's reLurn sLaLus. lf a Lask submlLLed wlLh #)M34. LermlnaLes wlLh an excepLlon, lL ls reLhrown by
<).)9&+6&., wrapped ln an L-&*).42"L-*&G.42".
ZAMA !HK C:(.0%8&
1he !vM can shuL down ln elLher an orderly or abrupL manner. An orderly shuLdown ls lnlLlaLed when Lhe lasL "normal"
(nondaemon) Lhread LermlnaLes, someone calls '7#.&3+&-4., or by oLher plaLformspeclflc means (such as sendlng a
'IHI,= or hlLLlng D.90aD). Whlle Lhls ls Lhe sLandard and preferred way for Lhe !vM Lo shuL down, lL can also be shuL
down abrupLly by calllng N)".43&+8$0. or by kllllng Lhe !vM process Lhrough Lhe operaLlng sysLem (such as sendlng a
'IHlI55).
ZAMA@A C:(.0%8& 7%%<3
ln an orderly shuLdown, Lhe !vM flrsL sLarLs all reglsLered shuLdown hooks. ShuLdown hooks are unsLarLed Lhreads LhaL
are reglsLered wlLh N)".43&+$;;'8).;2?"E22C. 1he !vM makes no guaranLees on Lhe order ln whlch shuLdown hooks
are sLarLed. lf any appllcaLlon Lhreads (daemon or nondaemon) are sLlll runnlng aL shuLdown Llme, Lhey conLlnue Lo run
concurrenLly wlLh Lhe shuLdown process. When all shuLdown hooks have compleLed, Lhe !vM may choose Lo run
flnallzers lf 9)"<4"$04:&9#J"L-4. ls .9)&, and Lhen halLs. 1he !vM makes no aLLempL Lo sLop or lnLerrupL any
appllcaLlon Lhreads LhaL are sLlll runnlng aL shuLdown Llme, Lhey are abrupLly LermlnaLed when Lhe !vM evenLually halLs.
lf Lhe shuLdown hooks or flnallzers don'L compleLe, Lhen Lhe orderly shuLdown process "hangs" and Lhe !vM musL be
shuL down abrupLly. ln an abrupL shuLdown, Lhe !vM ls noL requlred Lo do anyLhlng oLher Lhan halL Lhe !vM, shuLdown
hooks wlll noL run.
ShuLdown hooks should be Lhreadsafe: Lhey musL use synchronlzaLlon when accesslng shared daLa and should be
careful Lo avold deadlock, [usL llke any oLher concurrenL code. lurLher, Lhey should noL make assumpLlons abouL Lhe
sLaLe of Lhe appllcaLlon (such as wheLher oLher servlces have shuL down already or all normal Lhreads have compleLed)
or abouL why Lhe !vM ls shuLLlng down, and musL Lherefore be coded exLremely defenslvely. llnally, Lhey should exlL as
qulckly as posslble, slnce Lhelr exlsLence delays !vM LermlnaLlon aL a Llme when Lhe user may be expecLlng Lhe !vM Lo
LermlnaLe qulckly.
ShuLdown hooks can be used for servlce or appllcaLlon cleanup, such as deleLlng Lemporary flles or cleanlng up
resources LhaL are noL auLomaLlcally cleaned up by Lhe CS. LlsLlng 7.26 shows how 526'&9/4*& ln LlsLlng 7.16 could
reglsLer a shuLdown hook from lLs #.$9. meLhod Lo ensure Lhe log flle ls closed on exlL.
8ecause shuLdown hooks all run concurrenLly, closlng Lhe log flle could cause Lrouble for oLher shuLdown hooks who
wanL Lo use Lhe logger. 1o avold Lhls problem, shuLdown hooks should noL rely on servlces LhaL can be shuL down by Lhe
appllcaLlon or oLher shuLdown hooks. Cne way Lo accompllsh Lhls ls Lo use a slngle shuLdown hook for all servlces,
raLher Lhan one for each servlce, and have lL call a serles of shuLdown acLlons. 1hls ensures LhaL shuLdown acLlons
execuLe sequenLlally ln a slngle Lhread, Lhus avoldlng Lhe posslblllLy of race condlLlons or deadlock beLween shuLdown
acLlons. 1hls Lechnlque can be used wheLher or noL you use shuLdown hooks, execuLlng shuLdown acLlons sequenLlally
raLher Lhan concurrenLly ellmlnaLes many poLenLlal sources of fallure. ln appllcaLlons LhaL malnLaln expllclL dependency
lnformaLlon among servlces, Lhls Lechnlque can also ensure LhaL shuLdown acLlons are performed ln Lhe rlghL order.

103 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 198ChapLer 7. CancellaLlon and ShuLdown
2/3./&4 ZADWA N*4/3.*)/&4 " C:(.0%8& 7%%< .% C.%> .:* 2%44/&4 C*)#/'*A
G)M04* /24; #.$9.UV W
N)".43&+6&.N)".43&UV+$;;'8).;2?"E22CU"&? =89&$;UV W
G)M04* /24; 9)"UV W
.97 W 526'&9/4*&+.84#+#.2GUV[ \
*$.*8 UI".&99)G.&;L-*&G.42" 46"29&;V W\
\
\V[
\
ZAMADA ["*5%& J:)*"03
SomeLlmes you wanL Lo creaLe a Lhread LhaL performs some helper funcLlon buL you don'L wanL Lhe exlsLence of Lhls
Lhread Lo prevenL Lhe !vM from shuLLlng down. 1hls ls whaL daemon Lhreads are for.
1hreads are dlvlded lnLo Lwo Lypes: normal Lhreads and daemon Lhreads. When Lhe !vM sLarLs up, all Lhe Lhreads lL
creaLes (such as garbage collecLor and oLher housekeeplng Lhreads) are daemon Lhreads, excepL Lhe maln Lhread. When
a new Lhread ls creaLed, lL lnherlLs Lhe daemon sLaLus of Lhe Lhread LhaL creaLed lL, so by defaulL any Lhreads creaLed by
Lhe maln Lhread are also normal Lhreads.
normal Lhreads and daemon Lhreads dlffer only ln whaL happens when Lhey exlL. When a Lhread exlLs, Lhe !vM
performs an lnvenLory of runnlng Lhreads, and lf Lhe only Lhreads LhaL are lefL are daemon Lhreads, lL lnlLlaLes an orderly
shuLdown. When Lhe !vM halLs, any remalnlng daemon Lhreads are abandoned %4"$007 blocks are noL execuLed,
sLacks are noL unwound Lhe !vM [usL exlLs.
uaemon Lhreads should be used sparlngly few processlng acLlvlLles can be safely abandoned aL any Llme wlLh no
cleanup. ln parLlcular, lL ls dangerous Lo use daemon Lhreads for Lasks LhaL mlghL perform any sorL of l/C. uaemon
Lhreads are besL saved for "housekeeplng" Lasks, such as a background Lhread LhaL perlodlcally removes explred enLrles
from an lnmemory cache.
uaemon Lhreads are noL a good subsLlLuLe for properly managlng Lhe llfecycle of servlces wlLhln an appllcaLlon.
ZAMALA S/&"?/O*)3
1he garbage collecLor does a good [ob of reclalmlng memory resources when Lhey are no longer needed, buL some
resources, such as flle or sockeL handles, musL be expllclLly reLurned Lo Lhe operaLlng sysLem when no longer needed. 1o
asslsL ln Lhls, Lhe garbage collecLor LreaLs ob[ecLs LhaL have a nonLrlvlal %4"$04:& meLhod speclally: afLer Lhey are
reclalmed by Lhe collecLor, %4"$04:& ls called so LhaL perslsLenL resources can be released.
Slnce flnallzers can run ln a Lhread managed by Lhe !vM, any sLaLe accessed by a flnallzer wlll be accessed by more Lhan
one Lhread and Lherefore musL be accessed wlLh synchronlzaLlon. llnallzers offer no guaranLees on when or even lf Lhey
run, and Lhey lmpose a slgnlflcanL performance cosL on ob[ecLs wlLh nonLrlvlal flnallzers. 1hey are also exLremely
dlfflculL Lo wrlLe correcLly.
[9]
ln mosL cases, Lhe comblnaLlon of %4"$007 blocks and expllclL *02#& meLhods does a beLLer
[ob of resource managemenL Lhan flnallzers, Lhe sole excepLlon ls when you need Lo manage ob[ecLs LhaL hold resources
acqulred by naLlve meLhods. lor Lhese reasons and oLhers, work hard Lo avold wrlLlng or uslng classes wlLh flnallzers
(oLher Lhan Lhe plaLform llbrary classes) [L! lLem 6].
[9] See (8oehm, 2003) for some of Lhe challenges lnvolved ln wrlLlng flnallzers.
Avold flnallzers.
C(55")+
Lndofllfecycle lssues for Lasks, Lhreads, servlces, and appllcaLlons can add complexlLy Lo Lhelr deslgn and
lmplemenLaLlon. !ava does noL provlde a preempLlve mechanlsm for cancelllng acLlvlLles or LermlnaLlng Lhreads.
lnsLead, lL provldes a cooperaLlve lnLerrupLlon mechanlsm LhaL can be used Lo faclllLaLe cancellaLlon, buL lL ls up Lo you
Lo consLrucL proLocols for cancellaLlon and use Lhem conslsLenLly. uslng <).)9&=$#C and Lhe L-&*).29 framework
slmpllfles bulldlng cancellable Lasks and servlces.


104 !ava Concurrency ln racLlce
$:">.*) \A F>>?+/&4 J:)*"0 -%%?3
ChapLer 6 lnLroduced Lhe Lask execuLlon framework, whlch slmpllfles managemenL of Lask and Lhread llfecycles and
provldes a slmple and flexlble means for decoupllng Lask submlsslon from execuLlon pollcy. ChapLer 7 covered some of
Lhe messy deLalls of servlce llfecycle LhaL arlse from uslng Lhe Lask execuLlon framework ln real appllcaLlons. 1hls
chapLer looks aL advanced opLlons for conflgurlng and Lunlng Lhread pools, descrlbes hazards Lo waLch for when uslng
Lhe Lask execuLlon framework, and offers some more advanced
\A@A ,5>?/'/. $%(>?/&43 ;*.8**& J"3<3 "&0 =1*'(./%& -%?/'/*3
We clalmed earller LhaL Lhe L-&*).29 framework decouples Lask submlsslon from Lask execuLlon. Llke many aLLempLs aL
decoupllng complex processes, Lhls was a blL of an oversLaLemenL. Whlle Lhe L-&*).29 framework offers subsLanLlal
flexlblllLy ln speclfylng and modlfylng execuLlon pollcles, noL all Lasks are compaLlble wlLh all execuLlon pollcles. 1ypes of
Lasks LhaL requlre speclflc execuLlon pollcles lnclude:
uependenL Lasks. 1he mosL well behaved Lasks are lndependenL: Lhose LhaL do noL depend on Lhe Llmlng, resulLs, or slde
effecLs of oLher Lasks. When execuLlng lndependenL Lasks ln a Lhread pool, you can freely vary Lhe pool slze and
conflguraLlon wlLhouL affecLlng anyLhlng buL performance. Cn Lhe oLher hand, when you submlL Lasks LhaL depend on
oLher Lasks Lo a Lhread pool, you lmpllclLly creaLe consLralnLs on Lhe execuLlon pollcy LhaL musL be carefully managed Lo
avold llveness problems (see SecLlon 8.1.1).
1asks LhaL explolL Lhread conflnemenL. SlngleLhreaded execuLors make sLronger promlses abouL concurrency Lhan do
arblLrary Lhread pools. 1hey guaranLee LhaL Lasks are noL execuLed concurrenLly, whlch allows you Lo relax Lhe Lhread
safeLy of Lask code. Cb[ecLs can be conflned Lo Lhe Lask Lhread, Lhus enabllng Lasks deslgned Lo run ln LhaL Lhread Lo
access Lhose ob[ecLs wlLhouL synchronlzaLlon, even lf Lhose resources are noL Lhreadsafe. 1hls forms an lmpllclL
coupllng beLween Lhe Lask and Lhe execuLlon pollcy Lhe Lasks requlre Lhelr execuLor Lo be slngleLhreaded.
[1]
ln Lhls
case, lf you changed Lhe L-&*).29 from a slngleLhreaded one Lo a Lhread pool, Lhread safeLy could be losL.
[1] 1he requlremenL ls noL qulLe Lhls sLrong, lL would be enough Lo ensure only LhaL Lasks noL execuLe concurrenLly and provlde enough
synchronlzaLlon so LhaL Lhe memory effecLs of one Lask are guaranLeed Lo be vlslble Lo Lhe nexL Lask whlch ls preclsely Lhe guaranLee offered by
"&?'4"60&a=89&$;L-&*).29.
8esponseLlmesenslLlve Lasks. Cul appllcaLlons are senslLlve Lo response Llme: users are annoyed aL long delays
beLween a buLLon cllck and Lhe correspondlng vlsual feedback. SubmlLLlng a longrunnlng Lask Lo a slngleLhreaded
execuLor, or submlLLlng several longrunnlng Lasks Lo a Lhread pool wlLh a small number of Lhreads, may lmpalr Lhe
responslveness of Lhe servlce managed by LhaL L-&*).29.
1asks LhaL use =89&$;52*$0. =89&$;52*$0 allows each Lhread Lo have lLs own prlvaLe "verslon" of a varlable. Powever,
execuLors are free Lo reuse Lhreads as Lhey see flL. 1he sLandard L-&*).29 lmplemenLaLlons may reap ldle Lhreads when
demand ls low and add new ones when demand ls hlgh, and also replace a worker Lhread wlLh a fresh one lf an
unchecked excepLlon ls Lhrown from a Lask. =89&$;52*$0 makes sense Lo use ln pool Lhreads only lf Lhe Lhreadlocal
value has a llfeLlme LhaL ls bounded by LhaL of a Lask, =89&$;a52*$0 should noL be used ln pool Lhreads Lo communlcaLe
values beLween Lasks.
1hread pools work besL when Lasks are homogeneous and lndependenL. Mlxlng longrunnlng and shorLrunnlng Lasks
rlsks "clogglng" Lhe pool unless lL ls very large, submlLLlng Lasks LhaL depend on oLher Lasks rlsks deadlock unless Lhe
pool ls unbounded. lorLunaLely, requesLs ln Lyplcal neLworkbased server appllcaLlons web servers, mall servers, flle
servers usually meeL Lhese guldellnes.
Some Lasks have characLerlsLlcs LhaL requlre or preclude a speclflc execuLlon pollcy. 1asks LhaL depend on oLher Lasks
requlre LhaL Lhe Lhread pool be large enough LhaL Lasks are never queued or re[ecLed, Lasks LhaL explolL Lhread
conflnemenL requlre sequenLlal execuLlon. uocumenL Lhese requlremenLs so LhaL fuLure malnLalners do noL undermlne
safeLy or llveness by subsLlLuLlng an lncompaLlble execuLlon pollcy.
\A@A@A J:)*"0 C.")#"./%& [*"0?%'<
lf Lasks LhaL depend on oLher Lasks execuLe ln a Lhread pool, Lhey can deadlock. ln a slngleLhreaded execuLor, a Lask
LhaL submlLs anoLher Lask Lo Lhe same execuLor and walLs for lLs resulL wlll always deadlock. 1he second Lask slLs on Lhe
work queue unLll Lhe flrsL Lask compleLes, buL Lhe flrsL wlll noL compleLe because lL ls walLlng for Lhe resulL of Lhe
second Lask. 1he same Lhlng can happen ln larger Lhread pools lf all Lhreads are execuLlng Lasks LhaL are blocked walLlng
for oLher Lasks sLlll on Lhe work queue. 1hls ls called Lhread sLarvaLlon deadlock, and can occur whenever a pool Lask
lnlLlaLes an unbounded blocklng walL for some resource or condlLlon LhaL can succeed only Lhrough Lhe acLlon of

103 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 208ChapLer 8. Applylng 1hread ools
anoLher pool Lask, such as walLlng for Lhe reLurn value or slde effecL of anoLher Lask, unless you can guaranLee LhaL Lhe
pool ls large enough.
=89&$;A&$;02*C ln LlsLlng 8.1 lllusLraLes Lhread sLarvaLlon deadlock. N&";&9a@$6&=$#C submlLs Lwo addlLlonal Lasks Lo
Lhe L-&*).29 Lo feLch Lhe page header and fooLer, renders Lhe page body, walLs for Lhe resulLs of Lhe header and fooLer
Lasks, and Lhen comblnes Lhe header, body, and fooLer lnLo Lhe flnlshed page. WlLh a slngleLhreaded execuLor,
=89&$;A&$;02*C wlll always deadlock. Slmllarly, Lasks coordlnaLlng amongsL Lhemselves wlLh a barrler could also cause
Lhread sLarvaLlon deadlock lf Lhe pool ls noL blg enough.
Whenever you submlL Lo an L-&*).29 Lasks LhaL are noL lndependenL, be aware of Lhe posslblllLy of Lhread sLarvaLlon
deadlock, and documenL any pool slzlng or conflguraLlon consLralnLs ln Lhe code or conflguraLlon flle where Lhe
L-&*).29 ls conflgured.
ln addlLlon Lo any expllclL bounds on Lhe slze of a Lhread pool, Lhere may also be lmpllclL llmlLs because of consLralnLs on
oLher resources. lf your appllcaLlon uses a !u8C connecLlon pool wlLh Len connecLlons and each Lask needs a daLabase
connecLlon, lL ls as lf your Lhread pool only has Len Lhreads because Lasks ln excess of Len wlll block walLlng for a
connecLlon.
2/3./&4 \A@A J"3< .:". [*"0?%'<3 /& " C/&4?*.:)*"0*0 L-&*).29A !"#$% !" %&'()

G)M04* *0$## =89&$;A&$;02*C W
L-&*).29'&9/4*& &-&* ] L-&*).29#+"&?'4"60&=89&$;L-&*).29UV[

G)M04* *0$## N&";&9@$6&=$#C 43G0&3&".# D$00$M0&R'.94"6T W
G)M04* '.94"6 *$00UV .892?# L-*&G.42" W
<).)9&R'.94"6T 8&$;&9e %22.&9[
8&$;&9 ] &-&*+#)M34.U"&? 52$;<40&=$#CUd8&$;&9+8.30dVV[
%22.&9 ] &-&*+#)M34.U"&? 52$;<40&=$#CUd%22.&9+8.30dVV[
'.94"6 G$6& ] 9&";&9K2;7UV[
XX P400 ;&$;02*C aa .$#C ?$4.4"6 %29 9&#)0. 2% #)M.$#C
9&.)9" 8&$;&9+6&.UV _ G$6& _ %22.&9+6&.UV[
\
\
\
\A@ADA 2%&4)(&&/&4 J"3<3
1hread pools can have responslveness problems lf Lasks can block for exLended perlods of Llme, even lf deadlock ls noL a
posslblllLy. A Lhread pool can become clogged wlLh longrunnlng Lasks, lncreaslng Lhe servlce Llme even for shorL Lasks. lf
Lhe pool slze ls Loo small relaLlve Lo Lhe expecLed sLeadysLaLe number of longrunnlng Lasks, evenLually all Lhe pool
Lhreads wlll be runnlng longrunnlng Lasks and responslveness wlll suffer.
Cne Lechnlque LhaL can mlLlgaLe Lhe lll effecLs of longrunnlng Lasks ls for Lasks Lo use Llmed resource walLs lnsLead of
unbounded walLs. MosL blocklng meLhods ln Lhe plaform llbrarles come ln boLh unLlmed and Llmed verslons, such as
=89&$;+`24", K02*C4"6O)&)&+G)., D2)".A2?"5$.*8+$?$4., and '&0&*.29+#&0&*.. lf Lhe walL Llmes ouL, you can mark
Lhe Lask as falled and aborL lL or requeue lL for execuLlon laLer. 1hls guaranLees LhaL each Lask evenLually makes progress
Lowards elLher successful or falled compleLlon, freelng up Lhreads for Lasks LhaL mlghL compleLe more qulckly. lf a
Lhread pool ls frequenLly full of blocked Lasks, Lhls may also be a slgn LhaL Lhe pool
\ADA C/O/&4 J:)*"0 -%%?3
1he ldeal slze for a Lhread pool depends on Lhe Lypes of Lasks LhaL wlll be submlLLed and Lhe characLerlsLlcs of Lhe
deploymenL sysLem. 1hread pool slzes should rarely be hardcoded, lnsLead pool slzes should be provlded by a
conflguraLlon mechanlsm or compuLed dynamlcally by consulLlng N)".43&+$/$40$M0&@92*&##29#.
Slzlng Lhread pools ls noL an exacL sclence, buL forLunaLely you need only avold Lhe exLremes of "Loo blg" and "Loo
small". lf a Lhread pool ls Loo blg, Lhen Lhreads compeLe for scarce Cu and memory resources, resulLlng ln hlgher
memory usage and posslble resource exhausLlon. lf lL ls Loo small, LhroughpuL suffers as processors go unused desplLe
avallable work.
1o slze a Lhread pool properly, you need Lo undersLand your compuLlng envlronmenL, your resource budgeL, and Lhe
naLure of your Lasks. Pow many processors does Lhe deploymenL sysLem have? Pow much memory? uo Lasks perform

106 !ava Concurrency ln racLlce
mosLly compuLaLlon, l/C, or some comblnaLlon? uo Lhey requlre a scarce resource, such as a !u8C connecLlon? lf you
have dlfferenL caLegorles of Lasks wlLh very dlfferenL behavlors, conslder uslng mulLlple Lhread pools so each can be
Luned accordlng Lo lLs workload.
lor compuLelnLenslve Lasks, an n
cpu
processor sysLem usually achleves opLlmum uLlllzaLlon wlLh a Lhread pool of n
cpu

+1 Lhreads. (Lven compuLelnLenslve Lhreads occaslonally Lake a page faulL or pause for some oLher reason, so an
"exLra" runnable Lhread prevenLs Cu cycles from golng unused when Lhls happens.) lor Lasks LhaL also lnclude l/C or
oLher blocklng operaLlons, you wanL a larger pool, slnce noL all of Lhe Lhreads wlll be schedulable aL all Llmes. ln order Lo
slze Lhe pool properly, you musL esLlmaLe Lhe raLlo of walLlng Llme Lo compuLe Llme for your Lasks, Lhls esLlmaLe need
noL be preclse and can be obLalned Lhrough proflllng or lnsLrumenLaLlon. AlLernaLlvely, Lhe slze of Lhe Lhread pool can
be Luned by runnlng Lhe appllcaLlon uslng several dlfferenL pool slzes under a benchmark load and observlng Lhe level of
Cu uLlllzaLlon.
Clven Lhese deflnlLlons:
N
cpu
number oI CPUs
u
cpu
target CPU utilization, u u
cpu
1
w
C

ratio oI wait time to compute time
1he opLlmal pool slze for keeplng Lhe processors aL Lhe deslred uLlllzaLlon ls:
N
thcuds
= N
cpu
- u
cpu
- _1 +
w
C
]
?ou can deLermlne Lhe number of Cus uslng N)".43&:
4". ,kD@!' ] N)".43&+6&.N)".43&UV+$/$40$M0&@92*&##29#UV[
Cf course, Cu cycles are noL Lhe only resource you mlghL wanL Lo manage uslng Lhread pools. CLher resources LhaL can
conLrlbuLe Lo slzlng consLralnLs are memory, flle handles, sockeL handles, and daLabase connecLlons. CalculaLlng pool
slze consLralnLs for Lhese Lypes of resources ls easler: [usL add up how much of LhaL resource each Lask requlres and
dlvlde LhaL lnLo Lhe LoLal quanLlLy avallable. 1he resulL wlll be an upper bound on Lhe pool slze.
When Lasks requlre a pooled resource such as daLabase connecLlons, Lhread pool slze and resource pool slze affecL each
oLher. lf each Lask requlres a connecLlon, Lhe effecLlve slze of Lhe Lhread pool ls llmlLed by Lhe connecLlon pool slze.
Slmllarly, when Lhe only consumers of connecLlons are pool Lasks, Lhe effecLlve slze of Lhe connecLlon pool ls llmlLed by
Lhe Lhread pool slze.
\ALA $%&6/4()/&4 J:)*"0-%%?=1*'(.%)
=89&$;@220L-&*).29 provldes Lhe base lmplemenLaLlon for Lhe execuLors reLurned by Lhe "&?D$*8&;=89&$;@220,
"&?<4-&;=89&$;@220, and "&?'*8&;)0&;a=89&$;L-&*).29 facLorles ln L-&*).29#. =89&$;@220L-&*).29 ls a flexlble,
robusL pool lmplemenLaLlon LhaL allows a varleLy of cusLomlzaLlons.
lf Lhe defaulL execuLlon pollcy does noL meeL your needs, you can lnsLanLlaLe a =89&$;@220L-&*).29 Lhrough lLs
consLrucLor and cusLomlze lL as you see flL, you can consulL Lhe source code for L-&*).29# Lo see Lhe execuLlon pollcles
for Lhe defaulL conflguraLlons and use Lhem as a sLarLlng polnL. =89&$;@220L-&*).29 has several consLrucLors, Lhe mosL
general of whlch ls shown ln LlsLlng 8.2.
\ALA@A J:)*"0 $)*"./%& "&0 J*")0%8&
1he core pool slze, maxlmum pool slze, and keepallve Llme govern Lhread creaLlon and Leardown. 1he core slze ls Lhe
LargeL slze, Lhe lmplemenLaLlon aLLempLs Lo malnLaln Lhe pool aL Lhls slze even when Lhere are no Lasks Lo execuLe,
[2]

and wlll noL creaLe more Lhreads Lhan Lhls unless Lhe work queue ls full.
[3]
1he maxlmum pool slze ls Lhe upper bound on
how many pool Lhreads can be acLlve aL once. A Lhread LhaL has been ldle for longer Lhan Lhe keepallve Llme becomes a
candldaLe for reaplng and can be LermlnaLed lf Lhe currenL pool slze exceeds Lhe core slze.

107 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 208ChapLer 8. Applylng 1hread ools
[2] When a =89&$;@220L-&*).29 ls lnlLlally creaLed, Lhe core Lhreads are noL sLarLed lmmedlaLely buL lnsLead as Lasks are submlLLed, unless
you call G9&#.$9.100D29&=89&$;#.
[3] uevelopers are someLlmes LempLed Lo seL Lhe core slze Lo zero so LhaL Lhe worker Lhreads wlll evenLually be Lorn down and Lherefore won'L
prevenL Lhe !vM from exlLlng, buL Lhls can cause some sLrangeseemlng behavlor ln Lhread pools LhaL don'L use a '7"*892"2)#O)&)& for Lhelr
work queue (as "&?D$*8&;=89&$;@220 does). lf Lhe pool ls already aL Lhe core slze, =89&$;@220L-&*).29 creaLes a new Lhread only lf
Lhe work queue ls full. So Lasks submlLLed Lo a Lhread pool wlLh a work queue LhaL has any capaclLy and a core slze of zero wlll noL execuLe unLll
Lhe queue fllls up, whlch ls usually noL whaL ls deslred. ln !ava 6, $002?D29&=89&$;=43&J). allows you Lo requesL LhaL all pool Lhreads be
able Lo Llme ouL, enable Lhls feaLure wlLh a core slze of zero lf you wanL a bounded Lhread pool wlLh a bounded work queue buL sLlll have all Lhe
Lhreads Lorn down when Lhere ls no work Lo do.
2/3./&4 \ADA R*&*)"? $%&3.)('.%) 6%) =89&$;@220L-&*).29A
G)M04* =89&$;@220L-&*).29U4". *29&@220'4:&e
4". 3$-43)3@220'4:&e
02"6 C&&G104/&=43&e
=43&!"4. )"4.e
K02*C4"6O)&)&RN)""$M0&T ?29CO)&)&e
=89&$;<$*.297 .89&$;<$*.297e
N&`&*.&;L-&*).42"E$";0&9 8$";0&9V W +++ \
8y Lunlng Lhe core pool slze and keepallve Llmes, you can encourage Lhe pool Lo reclalm resources used by oLherwlse
ldle Lhreads, maklng Lhem avallable for more useful work. (Llke everyLhlng else, Lhls ls a Lradeoff: reaplng ldle Lhreads
lncurs addlLlonal laLency due Lo Lhread creaLlon lf Lhreads musL laLer be creaLed when demand lncreases.)
1he "&?<4-&;=89&$;@220 facLory seLs boLh Lhe core pool slze and Lhe maxlmum pool slze Lo Lhe requesLed pool slze,
creaLlng Lhe effecL of lnflnlLe LlmeouL, Lhe "&?D$*8&;=89&$;@220 facLory seLs Lhe maxlmum pool slze Lo
I".&6&9+F1okB15!L and Lhe core pool slze Lo zero wlLh a LlmeouL of one mlnuLe, creaLlng Lhe effecL of an lnflnlLely
expandable Lhread pool LhaL wlll conLracL agaln when demand decreases. CLher comblnaLlons are posslble uslng Lhe
expllclL =89&$;@220aL-&*).29 consLrucLor.
\ALADA K"&"4/&4 a(*(*0 J"3<3
8ounded Lhread pools llmlL Lhe number of Lasks LhaL can be execuLed concurrenLly. (1he slngleLhreaded execuLors are a
noLable speclal case: Lhey guaranLee LhaL no Lasks wlll execuLe concurrenLly, offerlng Lhe posslblllLy of achlevlng Lhread
safeLy Lhrough Lhread conflnemenL.)
We saw ln SecLlon 6.1.2 how unbounded Lhread creaLlon could lead Lo lnsLablllLy, and addressed Lhls problem by uslng a
flxedslzed Lhread pool lnsLead of creaLlng a new Lhread for every requesL. Powever, Lhls ls only a parLlal soluLlon, lL ls
sLlll posslble for Lhe appllcaLlon Lo run ouL of resources under heavy load, [usL harder. lf Lhe arrlval raLe for new requesLs
exceeds Lhe raLe aL whlch Lhey can be handled, requesLs wlll sLlll queue up. WlLh a Lhread pool, Lhey walL ln a queue of
N)""$M0&s managed by Lhe L-&*).29 lnsLead of queuelng up as Lhreads conLendlng for Lhe Cu. 8epresenLlng a walLlng
Lask wlLh a N)""$M0& and a llsL node ls cerLalnly a loL cheaper Lhan wlLh a Lhread, buL Lhe rlsk of resource exhausLlon sLlll
remalns lf cllenLs can Lhrow requesLs aL Lhe server fasLer Lhan lL can handle Lhem.
8equesLs ofLen arrlve ln bursLs even when Lhe average requesL raLe ls falrly sLable. Cueues can help smooLh ouL
LranslenL bursLs of Lasks, buL lf Lasks conLlnue Lo arrlve Loo qulckly you wlll evenLually have Lo LhroLLle Lhe arrlval raLe Lo
avold runnlng ouL of memory.
[4]
Lven before you run ouL of memory, response Llme wlll geL progresslvely worse as Lhe
Lask queue grows.
[4] 1hls ls analogous Lo flow conLrol ln communlcaLlons neLworks: you may be wllllng Lo buffer a cerLaln amounL of daLa, buL evenLually you need
Lo flnd a way Lo geL Lhe oLher slde Lo sLop sendlng you daLa, or Lhrow Lhe excess daLa on Lhe floor and hope Lhe sender reLransmlLs lL when you're
noL so busy.
=89&$;@220L-&*).29 allows you Lo supply a K02*C4"6O)&)& Lo hold Lasks awalLlng execuLlon. 1here are Lhree baslc
approaches Lo Lask queulng: unbounded queue, bounded queue, and synchronous handoff. 1he cholce of queue
lnLeracLs wlLh oLher conflguraLlon parameLers such as pool slze.
1he defaulL for "&?<4-&;=89&$;@220 and "&?'4"60&=89&$;L-&*).29 ls Lo use an unbounded 54"C&;K02*C4"6O)&)&.
1asks wlll queue up lf all worker Lhreads are busy, buL Lhe queue could grow wlLhouL bound lf Lhe Lasks keep arrlvlng
fasLer Lhan Lhey can be execuLed.
A more sLable resource managemenL sLraLegy ls Lo use a bounded queue, such as an 199$7K02*C4"6O)&)& or a
bounded 54"C&;K02*C4"6O)&)& or @94294.7aK02*C4"6O)&)&. 8ounded queues help prevenL resource exhausLlon buL
lnLroduce Lhe quesLlon of whaL Lo do wlLh new Lasks when Lhe queue ls full. (1here are a number of posslble saLuraLlon
pollcles for addresslng Lhls problem, see SecLlon 8.3.3.) WlLh a bounded work queue, Lhe queue slze and pool slze musL
be Luned LogeLher. A large queue coupled wlLh a small pool can help reduce memory usage, Cu usage, and conLexL
swlLchlng, aL Lhe cosL of poLenLlally consLralnlng LhroughpuL.

108 !ava Concurrency ln racLlce
lor very large or unbounded pools, you can also bypass queulng enLlrely and lnsLead hand off Lasks dlrecLly from
producers Lo worker Lhreads uslng a '7"*892"2)#O)&)&. A '7"*892"2)#O)&)& ls noL really a queue aL all, buL a
mechanlsm for managlng handoffs beLween Lhreads. ln order Lo puL an elemenL on a '7"*892"2)#O)&)&, anoLher
Lhread musL already be walLlng Lo accepL Lhe handoff. lf no Lhread ls walLlng buL Lhe currenL pool slze ls less Lhan Lhe
maxlmum, =89&$;a@220L-&*).29 creaLes a new Lhread, oLherwlse Lhe Lask ls re[ecLed accordlng Lo Lhe saLuraLlon
pollcy. uslng a dlrecL handoff ls more efflclenL because Lhe Lask can be handed rlghL Lo Lhe Lhread LhaL wlll execuLe lL,
raLher Lhan flrsL placlng lL on a queue and Lhen havlng Lhe worker Lhread feLch lL from Lhe queue. '7"*892"2)#O)&)& ls
a pracLlcal cholce only lf Lhe pool ls unbounded or lf re[ecLlng excess Lasks ls accepLable. 1he "&?D$*8&;=89&$;@220
facLory uses a '7"*892"2)#O)&)&.
uslng a lllC queue llke 54"C&;K02*C4"6O)&)& or 199$7K02*C4"6O)&)& causes Lasks Lo be sLarLed ln Lhe order ln whlch
Lhey arrlved. lor more conLrol over Lask execuLlon order, you can use a @94294.7K02*C4"6O)&)&, whlch orders Lasks
accordlng Lo prlorlLy. rlorlLy can be deflned by naLural order (lf Lasks lmplemenL D23G$9$M0&) or by a D23G$9$.29.
1he "&?D$*8&;=89&$;@220 facLory ls a good defaulL cholce for an L-&*).29, provldlng beLLer queulng performance
Lhan a flxed Lhread pool.
[3]
A flxed slze Lhread pool ls a good cholce when you need Lo llmlL Lhe number of concurrenL
Lasks for resourcemanagemenL purposes, as ln a server appllcaLlon LhaL accepLs requesLs from neLwork cllenLs and
would oLherwlse be vulnerable Lo overload.
[3] 1hls performance dlfference comes from Lhe use of '7"*892"2)#O)&)& lnsLead of 54"C&;K02*C4"6aO)&)&. '7"*892"2)#O)&)&
was replaced ln !ava 6 wlLh a new nonblocklng algorlLhm LhaL lmproved LhroughpuL ln L-&*).29 benchmarks by a facLor of Lhree over Lhe !ava
3.0 '7"*892"2)#O)&)& lmplemenLaLlon (Scherer eL al., 2006).
8oundlng elLher Lhe Lhread pool or Lhe work queue ls sulLable only when Lasks are lndependenL. WlLh Lasks LhaL depend
on oLher Lasks, bounded Lhread pools or queues can cause Lhread sLarvaLlon deadlock, lnsLead, use an unbounded pool
conflguraLlon llke "&?D$*8&;=89&$;@220.
[6]

[6] An alLernaLlve conflguraLlon for Lasks LhaL submlL oLher Lasks and walL for Lhelr resulLs ls Lo use a bounded Lhread pool, a
'7"*892"2)#O)&)& as Lhe work queue, and Lhe callerruns saLuraLlon pollcy.
\ALALA C".()"./%& -%?/'/*3
When a bounded work queue fllls up, Lhe saLuraLlon pollcy comes lnLo play. 1he saLuraLlon pollcy for a
=89&$;@220L-&*).29 can be modlfled by calllng #&.N&`&*.&;L-&*).42"E$";0&9. (1he saLuraLlon pollcy ls also used
when a Lask ls submlLLed Lo an L-&*).29 LhaL has been shuL down.) Several lmplemenLaLlons of
N&`&*.&;L-&*).42"E$";0&9 are provlded, each lmplemenLlng a dlfferenL saLuraLlon pollcy: 1M29.@204*7,
D$00&9N)"#@204*7, A4#*$9;@204*7, and A4#*$9;J0;&#.@204*7.
1he defaulL pollcy, aborL, causes &-&*).& Lo Lhrow Lhe unchecked N&`&*.&;aL-&*).42"L-*&G.42", Lhe caller can caLch
Lhls excepLlon and lmplemenL lLs own overflow handllng as lL sees flL. 1he dlscard pollcy sllenLly dlscards Lhe newly
submlLLed Lask lf lL cannoL be queued for execuLlon, Lhe dlscardoldesL pollcy dlscards Lhe Lask LhaL would oLherwlse be
execuLed nexL and Lrles Lo resubmlL Lhe new Lask. (lf Lhe work queue ls a prlorlLy queue, Lhls dlscards Lhe hlghesL
prlorlLy elemenL, so Lhe comblnaLlon of a dlscardoldesL saLuraLlon pollcy and a prlorlLy queue ls noL a good one.)
1he callerruns pollcy lmplemenLs a form of LhroLLllng LhaL nelLher dlscards Lasks nor Lhrows an excepLlon, buL lnsLead
Lrles Lo slow down Lhe flow of new Lasks by pushlng some of Lhe work back Lo Lhe caller. lL execuLes Lhe newly
submlLLed Lask noL ln a pool Lhread, buL ln Lhe Lhread LhaL calls &-&*).&. lf we modlfled our P&M'&9/&9 example Lo use
a bounded queue and Lhe callerruns pollcy, afLer all Lhe pool Lhreads were occupled and Lhe work queue fllled up Lhe
nexL Lask would be execuLed ln Lhe maln Lhread durlng Lhe call Lo &-&*).&. Slnce Lhls would probably Lake some Llme,
Lhe maln Lhread cannoL submlL any more Lasks for aL leasL a llLLle whlle, glvlng Lhe worker Lhreads some Llme Lo caLch
up on Lhe backlog. 1he maln Lhread would also noL be calllng $**&G. durlng Lhls Llme, so lncomlng requesLs wlll queue
up ln Lhe 1C layer lnsLead of ln Lhe appllcaLlon. lf Lhe overload perslsLed, evenLually Lhe 1C layer would declde lL has
queued enough connecLlon requesLs and begln dlscardlng connecLlon requesLs as well. As Lhe server becomes
overloaded, Lhe overload ls gradually pushed ouLward from Lhe pool Lhreads Lo Lhe work queue Lo Lhe appllcaLlon Lo
Lhe 1C layer, and evenLually Lo Lhe cllenL enabllng more graceful degradaLlon under load.
Chooslng a saLuraLlon pollcy or maklng oLher changes Lo Lhe execuLlon pollcy can be done when Lhe L-&*).29 ls
creaLed. LlsLlng 8.3 lllusLraLes creaLlng a flxed slze Lhread pool wlLh Lhe callerruns saLuraLlon pollcy.

109 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 208ChapLer 8. Applylng 1hread ools
2/3./&4 \ALA $)*"./&4 " S/1*03/O*0 J:)*"0 -%%? 8/.: " ;%(&0*0 a(*(* "&0 .:* $"??*))(&3 C".()"./%& -%?/'+A
=89&$;@220L-&*).29 &-&*).29
] "&? =89&$;@220L-&*).29U,k=ENL1A'e ,k=ENL1A'e
Z5e =43&!"4.+FI55I'LDJ,A'e
"&? 54"C&;K02*C4"6O)&)&RN)""$M0&TUD1@1DI=rVV[
&-&*).29+#&.N&`&*.&;L-&*).42"E$";0&9U
"&? =89&$;@220L-&*).29+D$00&9N)"#@204*7UVV[
1here ls no predeflned saLuraLlon pollcy Lo make &-&*).& block when Lhe work queue ls full. Powever, Lhe same effecL
can be accompllshed by uslng a '&3$G829& Lo bound Lhe Lask ln[ecLlon raLe, as shown ln K2)";&;L-&*).29 ln LlsLlng 8.4.
ln such an approach, use an unbounded queue (Lhere's no reason Lo bound boLh Lhe queue slze and Lhe ln[ecLlon raLe)
and seL Lhe bound on Lhe semaphore Lo be equal Lo Lhe pool slze plus Lhe number of queued Lasks you wanL Lo allow,
slnce Lhe semaphore ls boundlng Lhe number of Lasks boLh currenLly execuLlng and awalLlng execuLlon.
\ALAMA J:)*"0 S"'.%)/*3
Whenever a Lhread pool needs Lo creaLe a Lhread, lL does so Lhrough a Lhread facLory (see LlsLlng 8.3). 1he defaulL
Lhread facLory creaLes a new, nondaemon Lhread wlLh no speclal conflguraLlon. Speclfylng a Lhread facLory allows you Lo
cusLomlze Lhe conflguraLlon of pool Lhreads. =89&$;<$*.297 has a slngle meLhod, "&?=89&$;, LhaL ls called whenever a
Lhread pool needs Lo creaLe a new Lhread.
1here are a number of reasons Lo use a cusLom Lhread facLory. ?ou mlghL wanL Lo speclfy an
!"*$)68.L-*&G.42"E$";0&9 for pool Lhreads, or lnsLanLlaLe an lnsLance of a cusLom =89&$; class, such as one LhaL
performs debug logglng. ?ou mlghL wanL Lo modlfy Lhe prlorlLy (generally noL a very good ldea, see SecLlon 10.3.1) or
seL Lhe daemon sLaLus (agaln, noL all LhaL good an ldea, see SecLlon 7.4.2) of pool Lhreads. Cr maybe you [usL wanL Lo
glve pool Lhreads more meanlngful names Lo slmpllfy lnLerpreLlng Lhread dumps and error logs.
2/3./&4 \AMA 93/&4 " '&3$G829& .% J:)%..?* J"3< C(Y5/33/%&A
b=89&$;'$%&
G)M04* *0$## K2)";&;L-&*).29 W
G94/$.& %4"$0 L-&*).29 &-&*[
G94/$.& %4"$0 '&3$G829& #&3$G829&[

G)M04* K2)";&;L-&*).29UL-&*).29 &-&*e 4". M2)";V W
.84#+&-&* ] &-&*[
.84#+#&3$G829& ] "&? '&3$G829&UM2)";V[
\

G)M04* /24; #)M34.=$#CU%4"$0 N)""$M0& *233$";V
.892?# I".&99)G.&;L-*&G.42" W
#&3$G829&+$*()49&UV[
.97 W
&-&*+&-&*).&U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W
.97 W
*233$";+9)"UV[
\ %4"$007 W
#&3$G829&+9&0&$#&UV[
\
\
\V[
\ *$.*8 UN&`&*.&;L-&*).42"L-*&G.42" &V W
#&3$G829&+9&0&$#&UV[
\
\
\
2/3./&4 \AVA =89&$;<$*.297 ,&.*)6"'*A
G)M04* 4".&9%$*& =89&$;<$*.297 W
=89&$; "&?=89&$;UN)""$M0& 9V[
\
F7=89&$;<$*.297 ln LlsLlng 8.6 lllusLraLes a cusLom Lhread facLory. lL lnsLanLlaLes a new F71GG=89&$;, passlng a pool
speclflc name Lo Lhe consLrucLor so LhaL Lhreads from each pool can be dlsLlngulshed ln Lhread dumps and error logs.
F7a1GG=89&$; can also be used elsewhere ln Lhe appllcaLlon so LhaL all Lhreads can Lake advanLage of lLs debugglng
feaLures.

110 !ava Concurrency ln racLlce
2/3./&4 \AWA $(3.%5 J:)*"0 S"'.%)+A
G)M04* *0$## F7=89&$;<$*.297 43G0&3&".# =89&$;<$*.297 W
G94/$.& %4"$0 '.94"6 G220,$3&[

G)M04* F7=89&$;<$*.297U'.94"6 G220,$3&V W
.84#+G220,$3& ] G220,$3&[
\

G)M04* =89&$; "&?=89&$;UN)""$M0& 9)""$M0&V W
9&.)9" "&? F71GG=89&$;U9)""$M0&e G220,$3&V[
\
\
1he lnLeresLlng cusLomlzaLlon Lakes place ln F71GG=89&$;, shown ln LlsLlng 8.7, whlch leLs you provlde a Lhread name,
seLs a cusLom !"*$)68.L-*&G.42"aE$";0&9 LhaL wrlLes a message Lo a 5266&9, malnLalns sLaLlsLlcs on how many
Lhreads have been creaLed and desLroyed, and opLlonally wrlLes a debug message Lo Lhe log when a Lhread ls creaLed or
LermlnaLes.
lf your appllcaLlon Lakes advanLage of securlLy pollcles Lo granL permlsslons Lo parLlcular codebases, you may wanL Lo
use Lhe G94/40&6&;=89&$;<$*.297 facLory meLhod ln L-&*).29# Lo consLrucL your Lhread facLory. lL creaLes pool
Lhreads LhaL have Lhe same permlsslons, 1**&##D2".920D2".&-., and *2".&-.D0$##52$;&9 as Lhe Lhread creaLlng Lhe
G94/40&6&;=89&$;<$*.297. CLherwlse, Lhreads creaLed by Lhe Lhread pool lnherlL permlsslons from whaLever cllenL
happens Lo be calllng &-&*).& or #)M34. aL Lhe Llme a new Lhread ls needed, whlch could cause confuslng securlLy
relaLed excepLlons.
\ALAVA $(3.%5/O/&4 J:)*"0-%%?=1*'(.%) F6.*) $%&3.)('./%&
MosL of Lhe opLlons passed Lo Lhe =89&$;@220L-&*).29 consLrucLors can also be modlfled afLer consLrucLlon vla seLLers
(such as Lhe core Lhread pool slze, maxlmum Lhread pool slze, keepallve Llme, Lhread facLory, and re[ecLed execuLlon
handler). lf Lhe L-&*).29 ls creaLed Lhrough one of Lhe facLory meLhods ln L-&*).29# (excepL
"&?'4"60&=89&$;L-&*).29), you can casL Lhe resulL Lo =89&$;a@220L-&*).29 Lo access Lhe seLLers as ln LlsLlng 8.8.
L-&*).29# lncludes a facLory meLhod, )"*2"%46)9$M0&L-&*).29'&9/4*&, whlch Lakes an exlsLlng L-&*).29'&9/4*&
and wraps lL wlLh one exposlng only Lhe meLhods of L-&*).29'&9/4*& so lL cannoL be furLher conflgured. unllke Lhe
pooled lmplemenLaLlons, "&?'4"60&=89&$;L-&*).29 reLurns an L-&*).29'&9/4*& wrapped ln Lhls manner, raLher Lhan
a raw =89&$;@220L-&*).29. Whlle a slngleLhreaded execuLor ls acLually lmplemenLed as a Lhread pool wlLh one
Lhread, lL also promlses noL Lo execuLe Lasks concurrenLly. lf some mlsgulded code were Lo lncrease Lhe pool slze on a
slngleLhreaded execuLor, lL would undermlne Lhe lnLended execuLlon semanLlcs.

111 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 208ChapLer 8. Applylng 1hread ools
2/3./&4 \AZA $(3.%5 J:)*"0 ;"3* $?"33A
G)M04* *0$## F71GG=89&$; &-.&";# =89&$; W
G)M04* #.$.4* %4"$0 '.94"6 AL<1!5=k,1FL ] dF71GG=89&$;d[
G94/$.& #.$.4* /20$.40& M220&$" ;&M)654%&*7*0& ] %$0#&[
G94/$.& #.$.4* %4"$0 1.234*I".&6&9 *9&$.&; ] "&? 1.234*I".&6&9UV[
G94/$.& #.$.4* %4"$0 1.234*I".&6&9 $04/& ] "&? 1.234*I".&6&9UV[
G94/$.& #.$.4* %4"$0 5266&9 026 ] 5266&9+6&.1"2"732)#5266&9UV[

G)M04* F71GG=89&$;UN)""$M0& 9V W .84#U9e AL<1!5=k,1FLV[ \

G)M04* F71GG=89&$;UN)""$M0& 9)""$M0&e '.94"6 "$3&V W
#)G&9U9)""$M0&e "$3& _ dad _ *9&$.&;+4"*9&3&".1";H&.UVV[
#&.!"*$)68.L-*&G.42"E$";0&9U
"&? =89&$;+!"*$)68.L-*&G.42"E$";0&9UV W
G)M04* /24; )"*$)68.L-*&G.42"U=89&$; .e
=892?$M0& &V W
026+026U5&/&0+'LBLNLe
d!,D1!HE= 4" .89&$; d _ .+6&.,$3&UVe &V[
\
\V[
\

G)M04* /24; 9)"UV W
XX D2G7 ;&M)6 %0$6 .2 &"#)9& *2"#4#.&". /$0)& .892)682).+
M220&$" ;&M)6 ] ;&M)654%&*7*0&[
4% U;&M)6V 026+026U5&/&0+<I,Le dD9&$.&; d_6&.,$3&UVV[
.97 W
$04/&+4"*9&3&".1";H&.UV[
#)G&9+9)"UV[
\ %4"$007 W
$04/&+;&*9&3&".1";H&.UV[
4% U;&M)6V 026+026U5&/&0+<I,Le dL-4.4"6 d_6&.,$3&UVV[
\
\

G)M04* #.$.4* 4". 6&.=89&$;#D9&$.&;UV W 9&.)9" *9&$.&;+6&.UV[ \
G)M04* #.$.4* 4". 6&.=89&$;#104/&UV W 9&.)9" $04/&+6&.UV[ \
G)M04* #.$.4* M220&$" 6&.A&M)6UV W 9&.)9" ;&M)654%&*7*0&[ \
G)M04* #.$.4* /24; #&.A&M)6UM220&$" MV W ;&M)654%&*7*0& ] M[ \
\
2/3./&4 \A\A K%0/6+/&4 "& L-&*).29 $)*".*0 8/.: .:* C."&0")0 S"'.%)/*3A
L-&*).29'&9/4*& &-&* ] L-&*).29#+"&?D$*8&;=89&$;@220UV[
4% U&-&* 4"#.$"*&2% =89&$;@220L-&*).29V
UU=89&$;@220L-&*).29V &-&*V+#&.D29&@220'4:&U^ZV[
&0#&
.892? "&? 1##&9.42"L9929UdJ2G#e M$; $##)3G.42"dV[
?ou can use Lhls Lechnlque wlLh your own execuLors Lo prevenL Lhe execuLlon pollcy from belng modlfled. lf you wlll be
exposlng an L-&*).29'&9/4*& Lo code you don'L LrusL noL Lo modlfy lL, you can wrap lL wlLh an
)"*2"%46)9$M0&L-&*).29'&9/4*&.
\AMA =1.*&0/&4 J:)*"0-%%?=1*'(.%)
=89&$;@220L-&*).29 was deslgned for exLenslon, provldlng several "hooks" for subclasses Lo 2/&994;&M&%29&L-&*).&,
$%.&9L-&*).&, and .&934"$.&LhaL can be used Lo exLend Lhe behavlor of =89&$;@220L-&*).29.
1he M&%29&L-&*).& and $%.&9L-&*).& hooks are called ln Lhe Lhread LhaL execuLes Lhe Lask, and can be used for
addlng logglng, Llmlng, monlLorlng, or sLaLlsLlcs gaLherlng. 1he $%.&9L-&*).& hook ls called wheLher Lhe Lask compleLes
by reLurnlng normally from 9)" or by Lhrowlng an L-*&G.42". (lf Lhe Lask compleLes wlLh an L9929, $%.&9L-&*).& ls noL
called.) lf M&%29&L-&*).& Lhrows a N)".43&L-*&G.42", Lhe Lask ls noL execuLed and $%.&9L-&*).& ls noL called.
1he .&934"$.&; hook ls called when Lhe Lhread pool compleLes Lhe shuLdown process, afLer all Lasks have flnlshed and
all worker Lhreads have shuL down. lL can be used Lo release resources allocaLed by Lhe L-&*).29 durlng lLs llfecycle,
perform noLlflcaLlon or logglng, or flnallze sLaLlsLlcs gaLherlng.
\AMA@A =1"5>?*T F00/&4 C."./3./'3 .% " J:)*"0 -%%?
=434"6=89&$;@220 ln LlsLlng 8.9 shows a cusLom Lhread pool LhaL uses M&%29&aL-&*).&, $%.&9L-&*).&, and
.&934"$.&; Lo add logglng and sLaLlsLlcs gaLherlng. 1o measure a Lask's runLlme, M&%29&L-&*).& musL record Lhe sLarL
Llme and sLore lL somewhere $%.&9L-&*).& can flnd lL. 8ecause execuLlon hooks are called ln Lhe Lhread LhaL execuLes
Lhe Lask, a value placed ln a =89&$;52*$0 by M&%29&L-&*).& can be reLrleved by $%.&9L-&*).&. =434"6=89&$;@220
uses a palr of 1.234*52"6s Lo keep Lrack of Lhe LoLal number of Lasks processed and Lhe LoLal processlng Llme, and uses
Lhe .&934"$.&; hook Lo prlnL a log message showlng Lhe average Lask Llme.

112 !ava Concurrency ln racLlce
2/3./&4 \A^A J:)*"0 -%%? =1.*&0*0 8/.: 2%44/&4 "&0 J/5/&4A
G)M04* *0$## =434"6=89&$;@220 &-.&";# =89&$;@220L-&*).29 W
G94/$.& %4"$0 =89&$;52*$0R52"6T #.$9.=43&
] "&? =89&$;52*$0R52"6TUV[
G94/$.& %4"$0 5266&9 026 ] 5266&9+6&.5266&9Ud=434"6=89&$;@220dV[
G94/$.& %4"$0 1.234*52"6 ")3=$#C# ] "&? 1.234*52"6UV[
G94/$.& %4"$0 1.234*52"6 .2.$0=43& ] "&? 1.234*52"6UV[

G92.&*.&; /24; M&%29&L-&*).&U=89&$; .e N)""$M0& 9V W
#)G&9+M&%29&L-&*).&U.e 9V[
026+%4"&U'.94"6+%293$.Ud=89&$; x#h #.$9. x#de .e 9VV[
#.$9.=43&+#&.U'7#.&3+"$"2=43&UVV[
\

G92.&*.&; /24; $%.&9L-&*).&UN)""$M0& 9e =892?$M0& .V W
.97 W
02"6 &";=43& ] '7#.&3+"$"2=43&UV[
02"6 .$#C=43& ] &";=43& a #.$9.=43&+6&.UV[
")3=$#C#+4"*9&3&".1";H&.UV[
.2.$0=43&+$;;1";H&.U.$#C=43&V[
026+%4"&U'.94"6+%293$.Ud=89&$; x#h &"; x#e .43&]x;"#de
.e 9e .$#C=43&VV[
\ %4"$007 W
#)G&9+$%.&9L-&*).&U9e .V[
\
\

G92.&*.&; /24; .&934"$.&;UV W
.97 W
026+4"%2U'.94"6+%293$.Ud=&934"$.&;h $/6 .43&]x;"#de
.2.$0=43&+6&.UV X ")3=$#C#+6&.UVVV[
\ %4"$007 W
#)G&9+.&934"$.&;UV[
\
\
\
\AVA -")"??*?/O/&4 N*'()3/#* F?4%)/.:53
1he page renderlng examples ln SecLlon 6.3 wenL Lhrough a serles of reflnemenLs ln search of explolLable parallellsm.
1he flrsL aLLempL was enLlrely sequenLlal, Lhe second used Lwo Lhreads buL sLlll performed all Lhe lmage downloads
sequenLlally, Lhe flnal verslon LreaLed each lmage download as a separaLe Lask Lo achleve greaLer parallellsm. Loops
whose bodles conLaln nonLrlvlal compuLaLlon or perform poLenLlally blocklng l/C are frequenLly good candldaLes for
parallellzaLlon, as long as Lhe lLeraLlons are lndependenL.
lf we have a loop whose lLeraLlons are lndependenL and we don'L need Lo walL for all of Lhem Lo compleLe before
proceedlng, we can use an L-&*).29 Lo Lransform a sequenLlal loop lnLo a parallel one, as shown ln
G92*&##'&()&".4$007 and G92*&##I"@$9$00&0 ln LlsLlng 8.10.
2/3./&4 \A@_A J)"&36%)5/&4 C*Q(*&./"? =1*'(./%& /&.% -")"??*? =1*'(./%&A
/24; G92*&##'&()&".4$007U54#.RL0&3&".T &0&3&".#V W
%29 UL0&3&". & h &0&3&".#V
G92*&##U&V[
\

/24; G92*&##I"@$9$00&0UL-&*).29 &-&*e 54#.RL0&3&".T &0&3&".#V W
%29 U%4"$0 L0&3&". & h &0&3&".#V
&-&*+&-&*).&U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W G92*&##U&V[ \
\V[
\
A call Lo G92*&##I"@$9$00&0 reLurns more qulckly Lhan a call Lo G92*&##'&()&".4$007 because lL reLurns as soon as all
Lhe Lasks are queued Lo Lhe L-&*).29, raLher Lhan walLlng for Lhem all Lo compleLe. lf you wanL Lo submlL a seL of Lasks
and walL for Lhem all Lo compleLe, you can use L-&*).29'&9/4*&+4"/2C&100, Lo reLrleve Lhe resulLs as Lhey become
avallable, you can use a D23G0&.42"'&9/4*&, as ln N&";&9&9 on page 130.
SequenLlal loop lLeraLlons are sulLable for parallellzaLlon when each lLeraLlon ls lndependenL of Lhe oLhers and Lhe work
done ln each lLeraLlon of Lhe loop body ls slgnlflcanL enough Lo offseL Lhe cosL of managlng a new Lask.
Loop parallellzaLlon can also be applled Lo some recurslve deslgns, Lhere are ofLen sequenLlal loops wlLhln Lhe recurslve
algorlLhm LhaL can be parallellzed ln Lhe same manner as LlsLlng 8.10. 1he easler case ls when each lLeraLlon does noL
requlre Lhe resulLs of Lhe recurslve lLeraLlons lL lnvokes. lor example, #&()&".4$0N&*)9#4/& ln LlsLlng 8.11 does a

113 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 208ChapLer 8. Applylng 1hread ools
depLhflrsL Lraversal of a Lree, performlng a calculaLlon on each node and placlng Lhe resulL ln a collecLlon. 1he
Lransformed verslon, G$9$00&0N&*)9#4/&, also does a depLhflrsL Lraversal, buL lnsLead of compuLlng Lhe resulL as each
node ls vlslLed, lL submlLs a Lask Lo compuLe Lhe node resulL.
2/3./&4 \A@@A J)"&36%)5/&4 C*Q(*&./"? J"/?)*'()3/%& /&.% -")"??*?/O*0 N*'()3/%&A
G)M04*R=T /24; #&()&".4$0N&*)9#4/&U54#.R,2;&R=TT "2;&#e
D200&*.42"R=T 9&#)0.#V W
%29 U,2;&R=T " h "2;&#V W
9&#)0.#+$;;U"+*23G).&UVV[
#&()&".4$0N&*)9#4/&U"+6&.D840;9&"UVe 9&#)0.#V[
\
\

G)M04*R=T /24; G$9$00&0N&*)9#4/&U%4"$0 L-&*).29 &-&*e
54#.R,2;&R=TT "2;&#e
%4"$0 D200&*.42"R=T 9&#)0.#V W
%29 U%4"$0 ,2;&R=T " h "2;&#V W
&-&*+&-&*).&U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W
9&#)0.#+$;;U"+*23G).&UVV[
\
\V[
G$9$00&0N&*)9#4/&U&-&*e "+6&.D840;9&"UVe 9&#)0.#V[
\
\
When G$9$00&0N&*)9#4/& reLurns, each node ln Lhe Lree has been vlslLed (Lhe Lraversal ls sLlll sequenLlal: only Lhe calls
Lo *23G).& are execuLed ln parallel) and Lhe compuLaLlon for each node has been queued Lo Lhe L-&*).29. Callers of
G$9$00&0N&*)9#4/& can walL for all Lhe resulLs by creaLlng an L-&*).29 speclflc Lo Lhe Lraversal and uslng #8).;2?"
and $?$4.=&934"$.42", as shown ln LlsLlng 8.12.
2/3./&4 \A@DA B"/./&4 6%) N*3(?.3 .% Y* $"?'(?".*0 /& -")"??*?A
G)M04*R=T D200&*.42"R=T 6&.@$9$00&0N&#)0.#U54#.R,2;&R=TT "2;&#V
.892?# I".&99)G.&;L-*&G.42" W
L-&*).29'&9/4*& &-&* ] L-&*).29#+"&?D$*8&;=89&$;@220UV[
O)&)&R=T 9&#)0.O)&)& ] "&? D2"*)99&".54"C&;O)&)&R=TUV[
G$9$00&0N&*)9#4/&U&-&*e "2;&#e 9&#)0.O)&)&V[
&-&*+#8).;2?"UV[
&-&*+$?$4.=&934"$.42"U52"6+F1okB15!Le =43&!"4.+'LDJ,A'V[
9&.)9" 9&#)0.O)&)&[
\
\AVA@A =1"5>?*T F -(OO?* S)"5*8%)<
An appeallng appllcaLlon of Lhls Lechnlque ls solvlng puzzles LhaL lnvolve flndlng a sequence of LransformaLlons from
some lnlLlal sLaLe Lo reach a goal sLaLe, such as Lhe famlllar "slldlng block puzzles",
[7]
"PlC", "lnsLanL lnsanlLy", and oLher
sollLalre puzzles.
[7] See http://www.puzzleworld.org/SlidingBlockPuzzles.
We deflne a "puzzle" as a comblnaLlon of an lnlLlal poslLlon, a goal poslLlon, and a seL of rules LhaL deLermlne valld
moves. 1he rule seL has Lwo parLs: compuLlng Lhe llsL of legal moves from a glven poslLlon and compuLlng Lhe resulL of
applylng a move Lo a poslLlon. @)::0& ln LlsLlng 8.13 shows our puzzle absLracLlon, Lhe Lype parameLers @ and F
represenL Lhe classes for a poslLlon and a move. lrom Lhls lnLerface, we can wrlLe a slmple sequenLlal solver LhaL
searches Lhe puzzle space unLll a soluLlon ls found or Lhe puzzle space ls exhausLed.
2/3./&4 \A@LA FY3.)"'./%& 6%) -(OO?*3 2/<* .:* `C?/0/&4 ;?%'<3 -(OO?*`A
G)M04* 4".&9%$*& @)::0&R@e FT W
@ 4"4.4$0@2#4.42"UV[
M220&$" 4#H2$0U@ G2#4.42"V[
'&.RFT 0&6$0F2/&#U@ G2#4.42"V[
@ 32/&U@ G2#4.42"e F 32/&V[
\
,2;& ln LlsLlng 8.14 represenLs a poslLlon LhaL has been reached Lhrough some serles of moves, holdlng a reference Lo
Lhe move LhaL creaLed Lhe poslLlon and Lhe prevlous ,2;&. lollowlng Lhe llnks back from a ,2;& leLs us reconsLrucL Lhe
sequence of moves LhaL led Lo Lhe currenL poslLlon.
'&()&".4$0@)::0&'20/&9 ln LlsLlng 8.13 shows a sequenLlal solver for Lhe puzzle framework LhaL performs a depLhflrsL
search of Lhe puzzle space. lL LermlnaLes when lL flnds a soluLlon (whlch ls noL necessarlly Lhe shorLesL soluLlon).
8ewrlLlng Lhe solver Lo explolL concurrency would allow us Lo compuLe nexL moves and evaluaLe Lhe goal condlLlon ln
parallel, slnce Lhe process of evaluaLlng one move ls mosLly lndependenL of evaluaLlng oLher moves. (We say "mosLly"

114 !ava Concurrency ln racLlce
because Lasks share some muLable sLaLe, such as Lhe seL of seen poslLlons.) lf mulLlple processors are avallable, Lhls
could reduce Lhe Llme lL Lakes Lo flnd a soluLlon.
D2"*)99&".@)::0&'20/&9 ln LlsLlng 8.16 uses an lnner '20/&9=$#C class LhaL exLends ,2;& and lmplemenLs N)""$M0&.
MosL of Lhe work ls done ln 9)": evaluaLlng Lhe seL of posslble nexL poslLlons, prunlng poslLlons already searched,
evaluaLlng wheLher success has yeL been achleved (by Lhls Lask or by some oLher Lask), and submlLLlng unsearched
poslLlons Lo an L-&*).29.
1o avold lnflnlLe loops, Lhe sequenLlal verslon malnLalned a '&. of prevlously searched poslLlons,
D2"*)99&".@)::0&'20/&9 uses a D2"*)99&".E$#8F$G for Lhls purpose. 1hls provldes Lhread safeLy and avolds Lhe race
condlLlon lnherenL ln condlLlonally updaLlng a shared collecLlon by uslng G).I%1M#&". Lo aLomlcally add a poslLlon only
lf lL was noL prevlously known. D2"*)99&".@)::0&'20/&9 uses Lhe lnLernal work queue of Lhe Lhread pool lnsLead of
Lhe call sLack Lo hold Lhe sLaLe of Lhe search.
2/3./&4 \A@MA 2/&< P%0* 6%) .:* -(OO?* C%?#*) S)"5*8%)<A
bI33).$M0&
#.$.4* *0$## ,2;&R@e FT W
%4"$0 @ G2#[
%4"$0 F 32/&[
%4"$0 ,2;&R@e FT G9&/[

,2;&U@ G2#e F 32/&e ,2;&R@e FT G9&/V W+++\

54#.RFT $#F2/&54#.UV W
54#.RFT #20).42" ] "&? 54"C&;54#.RFTUV[
%29 U,2;&R@e FT " ] .84#[ "+32/& Y] ")00[ " ] "+G9&/V
#20).42"+$;;UZe "+32/&V[
9&.)9" #20).42"[
\
\
1he concurrenL approach also Lrades one form of llmlLaLlon for anoLher LhaL mlghL be more sulLable Lo Lhe problem
domaln. 1he sequenLlal verslon performs a depLhflrsL search, so Lhe search ls bounded by Lhe avallable sLack slze. 1he
concurrenL verslon performs a breadLhflrsL search and ls Lherefore free of Lhe sLack slze resLrlcLlon (buL can sLlll run ouL
of memory lf Lhe seL of poslLlons Lo be searched or already searched exceeds Lhe avallable memory).
ln order Lo sLop searchlng when we flnd a soluLlon, we need a way Lo deLermlne wheLher any Lhread has found a
soluLlon yeL. lf we wanL Lo accepL Lhe flrsL soluLlon found, we also need Lo updaLe Lhe soluLlon only lf no oLher Lask has
already found one. 1hese requlremenLs descrlbe a sorL of laLch (see SecLlon 3.3.1) and ln parLlcular, a resulLbearlng
laLch. We could easlly bulld a blocklng resulLbearlng laLch uslng Lhe Lechnlques ln ChapLer 14, buL lL ls ofLen easler and
less errorprone Lo use exlsLlng llbrary classes raLher Lhan lowlevel language mechanlsms. B$0)&5$.*8 ln LlsLlng 8.17
uses a D2)".A2?"5$.*8 Lo provlde Lhe needed laLchlng behavlor, and uses locklng Lo ensure LhaL Lhe soluLlon ls seL only
once.
Lach Lask flrsL consulLs Lhe soluLlon laLch and sLops lf a soluLlon has already been found. 1he maln Lhread needs Lo walL
unLll a soluLlon ls found, 6&.B$0)& ln B$0)&5$.*8 blocks unLll some Lhread has seL Lhe value. B$0)&5$.*8 provldes a
way Lo hold a value such LhaL only Lhe flrsL call acLually seLs Lhe value, callers can LesL wheLher lL has been seL, and
callers can block walLlng for lL Lo be seL. Cn Lhe flrsL call Lo #&.B$0)&, Lhe soluLlon ls updaLed and Lhe D2)".A2?"5$.*8 ls
decremenLed, releaslng Lhe maln solver Lhread from 6&.B$0)&.
1he flrsL Lhread Lo flnd a soluLlon also shuLs down Lhe L-&*).29, Lo prevenL new Lasks from belng accepLed. 1o avold
havlng Lo deal wlLh N&`&*.&;L-&*).42"L-*&G.42", Lhe re[ecLed execuLlon handler should be seL Lo dlscard submlLLed
Lasks. 1hen, all unflnlshed Lasks evenLually run Lo compleLlon and any subsequenL aLLempLs Lo execuLe new Lasks fall
sllenLly, allowlng Lhe execuLor Lo LermlnaLe. (lf Lhe Lasks Look longer Lo run, we mlghL wanL Lo lnLerrupL Lhem lnsLead of
leLLlng Lhem flnlsh.)

113 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 208ChapLer 8. Applylng 1hread ools
2/3./&4 \A@VA C*Q(*&./"? -(OO?* C%?#*)A
G)M04* *0$## '&()&".4$0@)::0&'20/&9R@e FT W
G94/$.& %4"$0 @)::0&R@e FT G)::0&[
G94/$.& %4"$0 '&.R@T #&&" ] "&? E$#8'&.R@TUV[

G)M04* '&()&".4$0@)::0&'20/&9U@)::0&R@e FT G)::0&V W
.84#+G)::0& ] G)::0&[
\

G)M04* 54#.RFT #20/&UV W
@ G2# ] G)::0&+4"4.4$0@2#4.42"UV[
9&.)9" #&$9*8U"&? ,2;&R@e FTUG2#e ")00e ")00VV[
\

G94/$.& 54#.RFT #&$9*8U,2;&R@e FT "2;&V W
4% UY#&&"+*2".$4"#U"2;&+G2#VV W
#&&"+$;;U"2;&+G2#V[
4% UG)::0&+4#H2$0U"2;&+G2#VV
9&.)9" "2;&+$#F2/&54#.UV[
%29 UF 32/& h G)::0&+0&6$0F2/&#U"2;&+G2#VV W
@ G2# ] G)::0&+32/&U"2;&+G2#e 32/&V[
,2;&R@e FT *840; ] "&? ,2;&R@e FTUG2#e 32/&e "2;&V[
54#.RFT 9&#)0. ] #&$9*8U*840;V[
4% U9&#)0. Y] ")00V
9&.)9" 9&#)0.[
\
\
9&.)9" ")00[
\

#.$.4* *0$## ,2;&R@e FT W Xc 54#.4"6 t+^i cX \
\
2/3./&4 \A@WA $%&'())*&. H*)3/%& %6 -(OO?* C%?#*)A
G)M04* *0$## D2"*)99&".@)::0&'20/&9R@e FT W
G94/$.& %4"$0 @)::0&R@e FT G)::0&[
G94/$.& %4"$0 L-&*).29'&9/4*& &-&*[
G94/$.& %4"$0 D2"*)99&".F$GR@e K220&$"T #&&"[
%4"$0 B$0)&5$.*8R,2;&R@e FTT #20).42"
] "&? B$0)&5$.*8R,2;&R@e FTTUV[
+++
G)M04* 54#.RFT #20/&UV .892?# I".&99)G.&;L-*&G.42" W
.97 W
@ G ] G)::0&+4"4.4$0@2#4.42"UV[
&-&*+&-&*).&U"&?=$#CUGe ")00e ")00VV[
XX M02*C )".40 #20).42" %2)";
,2;&R@e FT #20",2;& ] #20).42"+6&.B$0)&UV[
9&.)9" U#20",2;& ]] ")00V S ")00 h #20",2;&+$#F2/&54#.UV[
\ %4"$007 W
&-&*+#8).;2?"UV[
\
\

G92.&*.&; N)""$M0& "&?=$#CU@ Ge F 3e ,2;&R@eFT "V W
9&.)9" "&? '20/&9=$#CUGe 3e "V[
\

*0$## '20/&9=$#C &-.&";# ,2;&R@e FT 43G0&3&".# N)""$M0& W
+++
G)M04* /24; 9)"UV W
4% U#20).42"+4#'&.UV
nn #&&"+G).I%1M#&".UG2#e .9)&V Y] ")00V
9&.)9"[ XX $09&$;7 #20/&; 29 #&&" .84# G2#4.42"
4% UG)::0&+4#H2$0UG2#VV
#20).42"+#&.B$0)&U.84#V[
&0#&
%29 UF 3 h G)::0&+0&6$0F2/&#UG2#VV
&-&*+&-&*).&U
"&?=$#CUG)::0&+32/&UG2#e 3Ve 3e .84#VV[
\
\
\


116 !ava Concurrency ln racLlce
2/3./&4 \A@ZA N*3(?.Y*")/&4 2".': 93*0 Y+ D2"*)99&".@)::0&'20/&9A
b=89&$;'$%&
G)M04* *0$## B$0)&5$.*8R=T W
bH)$9;&;K7Ud.84#dV G94/$.& = /$0)& ] ")00[
G94/$.& %4"$0 D2)".A2?"5$.*8 ;2"& ] "&? D2)".A2?"5$.*8U^V[

G)M04* M220&$" 4#'&.UV W
9&.)9" U;2"&+6&.D2)".UV ]] ZV[
\
G)M04* #7"*892"4:&; /24; #&.B$0)&U= "&?B$0)&V W
4% UY4#'&.UVV W
/$0)& ] "&?B$0)&[
;2"&+*2)".A2?"UV[
\
\

G)M04* = 6&.B$0)&UV .892?# I".&99)G.&;L-*&G.42" W
;2"&+$?$4.UV[
#7"*892"4:&; U.84#V W
9&.)9" /$0)&[
\
\
\
D2"*)99&".@)::0&'20/&9 does noL deal well wlLh Lhe case where Lhere ls no soluLlon: lf all posslble moves and
poslLlons have been evaluaLed and no soluLlon has been found, #20/& walLs forever ln Lhe call Lo 6&.'20).42". 1he
sequenLlal verslon LermlnaLed when lL had exhausLed Lhe search space, buL geLLlng concurrenL programs Lo LermlnaLe
can someLlmes be more dlfflculL. Cne posslble soluLlon ls Lo keep a counL of acLlve solver Lasks and seL Lhe soluLlon Lo
null when Lhe counL drops Lo zero, as ln LlsLlng 8.18.
llndlng Lhe soluLlon may also Lake longer Lhan we are wllllng Lo walL, Lhere are several addlLlonal LermlnaLlon condlLlons
we could lmpose on Lhe solver. Cne ls a Llme llmlL, Lhls ls easlly done by lmplemenLlng a Llmed 6&.B$0)& ln B$0)&5$.*8
(whlch would use Lhe Llmed verslon of $?$4.), and shuLLlng down Lhe L-&*).29 and declarlng fallure lf 6&.B$0)& Llmes
ouL. AnoLher ls some sorL of puzzlespeclflc meLrlc such as searchlng only up Lo a cerLaln number of poslLlons. Cr we can
provlde a cancellaLlon mechanlsm and leL Lhe cllenL make lLs own declslon abouL when Lo sLop searchlng.
2/3./&4 \A@\A C%?#*) .:". N*'%4&/O*3 8:*& P% C%?(./%& =1/3.3A
G)M04* *0$## @)::0&'20/&9R@eFT &-.&";# D2"*)99&".@)::0&'20/&9R@eFT W
+++
G94/$.& %4"$0 1.234*I".&6&9 .$#CD2)". ] "&? 1.234*I".&6&9UZV[

G92.&*.&; N)""$M0& "&?=$#CU@ Ge F 3e ,2;&R@eFT "V W
9&.)9" "&? D2)".4"6'20/&9=$#CUGe 3e "V[
\

*0$## D2)".4"6'20/&9=$#C &-.&";# '20/&9=$#C W
D2)".4"6'20/&9=$#CU@ G2#e F 32/&e ,2;&R@e FT G9&/V W
#)G&9UG2#e 32/&e G9&/V[
.$#CD2)".+4"*9&3&".1";H&.UV[
\
G)M04* /24; 9)"UV W
.97 W
#)G&9+9)"UV[
\ %4"$007 W
4% U.$#CD2)".+;&*9&3&".1";H&.UV ]] ZV
#20).42"+#&.B$0)&U")00V[
\
\
\
\
C(55")+
1he L-&*).29 framework ls a powerful and flexlble framework for concurrenLly execuLlng Lasks. lL offers a number of
Lunlng opLlons, such as pollcles for creaLlng and Learlng down Lhreads, handllng queued Lasks, and whaL Lo do wlLh
excess Lasks, and provldes several hooks for exLendlng lLs behavlor. As ln mosL powerful frameworks, however, Lhere
are comblnaLlons of seLLlngs LhaL do noL work well LogeLher, some Lypes of Lasks requlre speclflc execuLlon pollcles, and
some comblnaLlons of Lunlng parameLers may produce sLrange resulLs.


117 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 218ChapLer 9. Cul AppllcaLlons
$:">.*) ^A R9, F>>?/'"./%&3
lf you've Lrled Lo wrlLe even a slmple Cul appllcaLlon uslng Swlng, you know LhaL Cul appllcaLlons have Lhelr own
pecullar Lhreadlng lssues. 1o malnLaln safeLy, cerLaln Lasks musL run ln Lhe Swlng evenL Lhread. 8uL you cannoL execuLe
longrunnlng Lasks ln Lhe evenL Lhread, lesL Lhe ul become unresponslve. And Swlng daLa sLrucLures are noL Lhreadsafe,
so you musL be careful Lo conflne Lhem Lo Lhe evenL Lhread.
nearly all Cul LoolklLs, lncludlng Swlng and SW1, are lmplemenLed as slngleLhreaded subsysLems ln whlch all Cul
acLlvlLy ls conflned Lo a slngle Lhread. lf you are noL plannlng Lo wrlLe a LoLally slngleLhreaded program, Lhere wlll be
acLlvlLles LhaL run parLlally ln an appllcaLlon Lhread and parLlally ln Lhe evenL Lhread. Llke many oLher Lhreadlng bugs,
geLLlng Lhls dlvlslon wrong may noL necessarlly make your program crash lmmedlaLely, lnsLead, lL could behave oddly
under hardLoldenLlfy condlLlons. Lven Lhough Lhe Cul frameworks Lhemselves are slngleLhreaded subsysLems, your
appllcaLlon may noL be, and you sLlll need Lo conslder Lhreadlng lssues carefully when wrlLlng Cul code.
^A@A B:+ ")* R9,3 C/&4?*.:)*"0*0U
ln Lhe old days, Cul appllcaLlons were slngleLhreaded and Cul evenLs were processed from a "maln evenL loop".
Modern Cul frameworks use a model LhaL ls only sllghLly dlfferenL: Lhey creaLe a dedlcaLed evenL dlspaLch Lhread (Lu1)
for handllng Cul evenLs.
SlngleLhreaded Cul frameworks are noL unlque Lo !ava, CL, nexLSLep, MacCS Cocoa, x Wlndows, and many oLhers are
also slngleLhreaded. 1hls ls noL for lack of Lrylng, Lhere have been many aLLempLs Lo wrlLe mulLlLhreaded Cul
frameworks, buL because of perslsLenL problems wlLh race condlLlons and deadlock, Lhey all evenLually arrlved aL Lhe
slngleLhreaded evenL queue model ln whlch a dedlcaLed Lhread feLches evenLs off a queue and dlspaLches Lhem Lo
appllcaLlondeflned evenL handlers. (AW1 orlglnally Lrled Lo supporL a greaLer degree of mulLlLhreaded access, and Lhe
declslon Lo make Swlng slngleLhreaded was based largely on experlence wlLh AW1.)
MulLlLhreaded Cul frameworks Lend Lo be parLlcularly suscepLlble Lo deadlock, parLlally because of Lhe unforLunaLe
lnLeracLlon beLween lnpuL evenL processlng and any senslble ob[ecLorlenLed modellng of Cul componenLs. AcLlons
lnlLlaLed by Lhe user Lend Lo "bubble up" from Lhe CS Lo Lhe appllcaLlon a mouse cllck ls deLecLed by Lhe CS, ls Lurned
lnLo a "mouse cllck" evenL by Lhe LoolklL, and ls evenLually dellvered Lo an appllcaLlon llsLener as a hlgher level evenL
such as a "buLLon pressed" evenL. Cn Lhe oLher hand, appllcaLlonlnlLlaLed acLlons "bubble down" from Lhe appllcaLlon
Lo Lhe CS changlng Lhe background color of a componenL orlglnaLes ln Lhe appllcaLlon and ls dlspaLched Lo a speclflc
componenL class and evenLually lnLo Lhe CS for renderlng. Comblnlng Lhls Lendency for acLlvlLles Lo access Lhe same Cul
ob[ecLs ln Lhe opposlLe order wlLh Lhe requlremenL of maklng each ob[ecL Lhreadsafe ylelds a reclpe for lnconslsLenL
lock orderlng, whlch leads Lo deadlock (see ChapLer 10). And Lhls ls exacLly whaL nearly every Cul LoolklL developmenL
efforL redlscovered Lhrough experlence.
AnoLher facLor leadlng Lo deadlock ln mulLlLhreaded Cul frameworks ls Lhe prevalence of Lhe modelvlewconLrol (MvC)
paLLern. lacLorlng user lnLeracLlons lnLo cooperaLlng model, vlew, and conLroller ob[ecLs greaLly slmpllfles lmplemenLlng
Cul appllcaLlons, buL agaln ralses Lhe rlsk of lnconslsLenL lock orderlng. 1he conLroller calls lnLo Lhe model, whlch
noLlfles Lhe vlew LhaL someLhlng has changed. 8uL Lhe conLroller can also call lnLo Lhe vlew, whlch may ln Lurn call back
lnLo Lhe model Lo query Lhe model sLaLe. 1he resulL ls agaln lnconslsLenL lock orderlng, wlLh Lhe aLLendanL rlsk of
deadlock.
ln hls weblog,
[1]
Sun v Craham PamllLon nlcely sums up Lhe challenges, descrlblng why Lhe mulLlLhreaded Cul LoolklL ls
one of Lhe recurrlng "falled dreams" of compuLer sclence.
[1] http://weblogs.java.net/blog/kgh/archive/2004/10
l belleve you can program successfully wlLh mulLlLhreaded Cul LoolklLs lf Lhe LoolklL ls very carefully deslgned, lf Lhe
LoolklL exposes lLs locklng meLhodology ln gory deLall, lf you are very smarL, very careful, and have a global
undersLandlng of Lhe whole sLrucLure of Lhe LoolklL. lf you geL one of Lhese Lhlngs sllghLly wrong, Lhlngs wlll mosLly
work, buL you wlll geL occaslonal hangs (due Lo deadlocks) or gllLches (due Lo races). 1hls mulLlLhreaded approach works
besL for people who have been lnLlmaLely lnvolved ln Lhe deslgn of Lhe LoolklL.
unforLunaLely, l don'L Lhlnk Lhls seL of characLerlsLlcs scales Lo wldespread commerclal use. WhaL you Lend Lo end up
wlLh ls normal smarL programmers bulldlng apps LhaL don'L qulLe work rellably for reasons LhaL are noL aL all obvlous. So
Lhe auLhors geL very dlsgrunLled and frusLraLed and use bad words on Lhe poor lnnocenL LoolklL.

118 !ava Concurrency ln racLlce
SlngleLhreaded Cul frameworks achleve Lhread safeLy vla Lhread conflnemenL, all Cul ob[ecLs, lncludlng vlsual
componenLs and daLa models, are accessed excluslvely from Lhe evenL Lhread. Cf course, Lhls [usL pushes some of Lhe
Lhread safeLy burden back onLo Lhe appllcaLlon developer, who musL make sure Lhese ob[ecLs are properly conflned.
^A@A@A C*Q(*&./"? =#*&. -)%'*33/&4
Cul appllcaLlons are orlenLed around processlng flnegralned evenLs such as mouse cllcks, key presses, or Llmer
explraLlons. LvenLs are a klnd of Lask, Lhe evenL handllng machlnery provlded by AW1 and Swlng ls sLrucLurally slmllar Lo
an L-&*).29.
8ecause Lhere ls only a slngle Lhread for processlng Cul Lasks, Lhey are processed sequenLlally one Lask flnlshes before
Lhe nexL one beglns, and no Lwo Lasks overlap. knowlng Lhls makes wrlLlng Lask code easler you don'L have Lo worry
abouL lnLerference from oLher Lasks.
1he downslde of sequenLlal Lask processlng ls LhaL lf one Lask Lakes a long Llme Lo execuLe, oLher Lasks musL walL unLll lL
ls flnlshed. lf Lhose oLher Lasks are responslble for respondlng Lo user lnpuL or provldlng vlsual feedback, Lhe appllcaLlon
wlll appear Lo have frozen. lf a lengLhy Lask ls runnlng ln Lhe evenL Lhread, Lhe user cannoL even cllck "Cancel" because
Lhe cancel buLLon llsLener ls noL called unLll Lhe lengLhy Lask compleLes. 1herefore, Lasks LhaL execuLe ln Lhe evenL
Lhread musL reLurn conLrol Lo Lhe evenL Lhread qulckly. 1o lnlLlaLe a longrunnlng Lask such as spellchecklng a large
documenL, searchlng Lhe flle sysLem, or feLchlng a resource over a neLwork, you musL run LhaL Lask ln anoLher Lhread so
conLrol can reLurn qulckly Lo Lhe evenL Lhread. 1o updaLe a progress lndlcaLor whlle a longrunnlng Lask execuLes or
provlde vlsual feedback when lL compleLes, you agaln need Lo execuLe code ln Lhe evenL Lhread. 1hls can geL
compllcaLed qulckly.
^A@ADA J:)*"0 $%&6/&*5*&. /& C8/&4
All Swlng componenLs (such as >K)..2" and >=$M0&) and daLa model ob[ecLs (such as =$M0&F2;&0 and =N&&F2;&0) are
conflned Lo Lhe evenL Lhread, so any code LhaL accesses Lhese ob[ecLs musL run ln Lhe evenL Lhread. Cul ob[ecLs are kepL
conslsLenL noL by synchronlzaLlon, buL by Lhread conflnemenL. 1he upslde ls LhaL Lasks LhaL run ln Lhe evenL Lhread need
noL worry abouL synchronlzaLlon when accesslng presenLaLlon ob[ecLs, Lhe downslde ls LhaL you cannoL access
presenLaLlon ob[ecLs from ouLslde Lhe evenL Lhread aL all.
1he Swlng slngleLhread rule: Swlng componenLs and models should be creaLed, modlfled, and querled only from Lhe
evenLdlspaLchlng Lhread.
As wlLh all rules, Lhere are a few excepLlons. A small number of Swlng meLhods may be called safely from any Lhread,
Lhese are clearly ldenLlfled ln Lhe !avadoc as belng Lhreadsafe. CLher excepLlons Lo Lhe slngleLhread rule lnclude:
'?4"6!.404.4&#.4#L/&".A4#G$.*8=89&$;, whlch deLermlnes wheLher Lhe currenL Lhread ls Lhe evenL Lhread,
'?4"6!.404.4&#.4"/2C&5$.&9, whlch schedules a N)""$M0& for execuLlon on Lhe evenL Lhread (callable from
any Lhread),
'?4"6!.404.4&#.4"/2C&1";P$4., whlch schedules a N)""$M0& Lask for execuLlon on Lhe evenL Lhread and
blocks Lhe currenL Lhread unLll lL compleLes (callable only from a nonCul Lhread),
meLhods Lo enqueue a repalnL or revalldaLlon requesL on Lhe evenL queue (callable from any Lhread), and
meLhods for addlng and removlng llsLeners (can be called from any Lhread, buL llsLeners wlll always be lnvoked
ln Lhe evenL Lhread).
1he 4"/2C&5$.&9 and 4"/2C&1";P$4. meLhods funcLlon a loL llke an L-&*).29. ln facL, lL ls Lrlvlal Lo lmplemenL Lhe
LhreadlngrelaLed meLhods from '?4"6!.404.4&# uslng a slngleLhreaded L-&*).29, as shown ln LlsLlng 9.1. 1hls ls noL
how '?4"6!.404.4&# ls acLually lmplemenLed, as Swlng predaLes Lhe L-&*).29 framework, buL ls probably how lL
would be lf Swlng were belng lmplemenLed Loday.
1he Swlng evenL Lhread can be LhoughL of as a slngleLhreaded L-&*).29 LhaL processes Lasks from Lhe evenL queue. As
wlLh Lhread pools, someLlmes Lhe worker Lhread dles and ls replaced by a new one, buL Lhls should be LransparenL Lo
Lasks. SequenLlal, slngleLhreaded execuLlon ls a senslble execuLlon pollcy when Lasks are shorLllved, schedullng
predlcLablllLy ls noL lmporLanL, or lL ls lmperaLlve LhaL Lasks noL execuLe concurrenLly.
H)4L-&*).29 ln LlsLlng 9.2 ls an L-&*).29 LhaL delegaLes Lasks Lo '?4"6!.404.4&# for execuLlon. lL could be
lmplemenLed ln Lerms of oLher Cul frameworks as well, for example, SW1 provldes Lhe A4#G0$7+$#7"*L-&* meLhod,
whlch ls slmllar Lo Swlng's 4"/2C&5$.&9.

119 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 218ChapLer 9. Cul AppllcaLlons
^ADA C:%).)(&&/&4 R9, J"3<3
ln a Cul appllcaLlon, evenLs orlglnaLe ln Lhe evenL Lhread and bubble up Lo appllcaLlonprovlded llsLeners, whlch wlll
probably perform some compuLaLlon LhaL affecLs Lhe presenLaLlon ob[ecLs. lor slmple, shorLrunnlng Lasks, Lhe enLlre
acLlon can sLay ln Lhe evenL Lhread, for longerrunnlng Lasks, some of Lhe processlng should be offloaded Lo anoLher
Lhread.
ln Lhe slmple case, conflnlng presenLaLlon ob[ecLs Lo Lhe evenL Lhread ls compleLely naLural. LlsLlng 9.3 creaLes a buLLon
whose color changes randomly when pressed. When Lhe user cllcks on Lhe buLLon, Lhe LoolklL dellvers an 1*.42"L/&".
ln Lhe evenL Lhread Lo all reglsLered acLlon llsLeners. ln response, Lhe acLlon llsLener plcks a new color and changes Lhe
buLLon's background color. So Lhe evenL orlglnaLes ln Lhe Cul LoolklL and ls dellvered Lo Lhe appllcaLlon, and Lhe
appllcaLlon modlfles Lhe Cul ln response Lo Lhe user's acLlon. ConLrol never has Lo leave Lhe evenL Lhread, as lllusLraLed
ln llgure 9.1.
S/4()* ^A@A $%&.)%? S?%8 %6 " C/5>?* ;(..%& $?/'<A


1hls Lrlvlal example characLerlzes Lhe ma[orlLy of lnLeracLlons beLween Cul appllcaLlons and Cul LoolklLs. So long as
Lasks are shorLllved and access only Cul ob[ecLs (or oLher Lhreadconflned or Lhreadsafe appllcaLlon ob[ecLs), you can
almosL LoLally lgnore Lhreadlng concerns and do everyLhlng from Lhe evenL Lhread, and Lhe rlghL Lhlng happens.


120 !ava Concurrency ln racLlce
2/3./&4 ^A@A ,5>?*5*&./&4 SwingUtilities 93/&4 "& ExecutorA
G)M04* *0$## '?4"6!.404.4&# W
G94/$.& #.$.4* %4"$0 L-&*).29'&9/4*& &-&* ]
L-&*).29#+"&?'4"60&=89&$;L-&*).29U"&? '?4"6=89&$;<$*.297UVV[
G94/$.& #.$.4* /20$.40& =89&$; #?4"6=89&$;[

G94/$.& #.$.4* *0$## '?4"6=89&$;<$*.297 43G0&3&".# =89&$;<$*.297 W
G)M04* =89&$; "&?=89&$;UN)""$M0& 9V W
#?4"6=89&$; ] "&? =89&$;U9V[
9&.)9" #?4"6=89&$;[
\
\

G)M04* #.$.4* M220&$" 4#L/&".A4#G$.*8=89&$;UV W
9&.)9" =89&$;+*)99&".=89&$;UV ]] #?4"6=89&$;[
\

G)M04* #.$.4* /24; 4"/2C&5$.&9UN)""$M0& .$#CV W
&-&*+&-&*).&U.$#CV[
\

G)M04* #.$.4* /24; 4"/2C&1";P$4.UN)""$M0& .$#CV
.892?# I".&99)G.&;L-*&G.42"e I"/2*$.42"=$96&.L-*&G.42" W
<).)9& % ] &-&*+#)M34.U.$#CV[
.97 W
%+6&.UV[
\ *$.*8 UL-&*).42"L-*&G.42" &V W
.892? "&? I"/2*$.42"=$96&.L-*&G.42"U&V[
\
\
\

2/3./&4 ^ADA Executor ;(/?. F.%> SwingUtilitiesA
G)M04* *0$## H)4L-&*).29 &-.&";# 1M#.9$*.L-&*).29'&9/4*& W
XX '4"60&.2"# 8$/& $ G94/$.& *2"#.9)*.29 $"; $ G)M04* %$*.297
G94/$.& #.$.4* %4"$0 H)4L-&*).29 4"#.$"*& ] "&? H)4L-&*).29UV[

G94/$.& H)4L-&*).29UV W \

G)M04* #.$.4* H)4L-&*).29 4"#.$"*&UV W 9&.)9" 4"#.$"*&[ \

G)M04* /24; &-&*).&UN)""$M0& 9V W
4% U'?4"6!.404.4&#+4#L/&".A4#G$.*8=89&$;UVV
9+9)"UV[
&0#&
'?4"6!.404.4&#+4"/2C&5$.&9U9V[
\

XX @0)# .94/4$0 43G0&3&".$.42"# 2% 04%&*7*0& 3&.82;#
\
2/3./&4 ^ALA C/5>?* =#*&. 2/3.*&*)A
%4"$0 N$";23 9$";23 ] "&? N$";23UV[
%4"$0 >K)..2" M)..2" ] "&? >K)..2"UdD8$"6& D2029dV[
+++
M)..2"+$;;1*.42"54#.&"&9U"&? 1*.42"54#.&"&9UV W
G)M04* /24; $*.42"@&9%293&;U1*.42"L/&". &V W
M)..2"+#&.K$*C692)";U"&? D2029U9$";23+"&-.I".UVVV[
\
\V[
A sllghLly more compllcaLed verslon of Lhls same scenarlo, lllusLraLed ln llgure 9.2, lnvolves Lhe use of a formal daLa
model such as a =$M0&F2;&0 or .9&&F2;&0. Swlng spllLs mosL vlsual componenLs lnLo Lwo ob[ecLs, a model and a vlew.
1he daLa Lo be dlsplayed resldes ln Lhe model and Lhe rules governlng how lL ls dlsplayed reslde ln Lhe vlew. 1he model
ob[ecLs can flre evenLs lndlcaLlng LhaL Lhe model daLa has changed, and vlews subscrlbe Lo Lhese evenLs. When Lhe vlew
recelves an evenL lndlcaLlng Lhe model daLa may have changed, lL querles Lhe model for Lhe new daLa and updaLes Lhe
dlsplay. So ln a buLLon llsLener LhaL modlfles Lhe conLenLs of a Lable, Lhe acLlon llsLener would updaLe Lhe model and call
one of Lhe %49&o-- meLhods, whlch would ln Lurn lnvoke Lhe vlew's Lable model llsLeners, whlch would updaLe Lhe
vlew. Agaln, conLrol never leaves Lhe evenL Lhread. (1he Swlng daLa model %49&o-- meLhods always call Lhe model
llsLeners dlrecLly raLher Lhan submlLLlng a new evenL Lo Lhe evenL queue, so Lhe %49&o-- meLhods musL be called only
from Lhe evenL Lhread.)

121 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 218ChapLer 9. Cul AppllcaLlons
S/4()* ^ADA $%&.)%? S?%8 8/.: C*>")".* K%0*? "&0 H/*8 EY]*'.3A

^ALA 2%&4)(&&/&4 R9, J"3<3
lf all Lasks were shorLrunnlng (and Lhe appllcaLlon had no slgnlflcanL nonCul porLlon), Lhen Lhe enLlre appllcaLlon could
run wlLhln Lhe evenL Lhread and you wouldn'L have Lo pay any aLLenLlon Lo Lhreads aL all. Powever, sophlsLlcaLed Cul
appllcaLlons may execuLe Lasks LhaL may Lake longer Lhan Lhe user ls wllllng Lo walL, such as spell checklng, background
compllaLlon, or feLchlng remoLe resources. 1hese Lasks musL run ln anoLher Lhread so LhaL Lhe Cul remalns responslve
whlle Lhey run.
Swlng makes lL easy Lo have a Lask run ln Lhe evenL Lhread, buL (prlor Lo !ava 6) doesn'L provlde any mechanlsm for
helplng Cul Lasks execuLe code ln oLher Lhreads. 8uL we don'L need Swlng Lo help us here: we can creaLe our own
L-&*).29 for processlng longrunnlng Lasks. A cached Lhread pool ls a good cholce for longrunnlng Lasks, only rarely do
Cul appllcaLlons lnlLlaLe a large number of longrunnlng Lasks, so Lhere ls llLLle rlsk of Lhe pool growlng wlLhouL bound.
We sLarL wlLh a slmple Lask LhaL does noL supporL cancellaLlon or progress lndlcaLlon and LhaL does noL updaLe Lhe Cul
on compleLlon, and Lhen add Lhose feaLures one by one. LlsLlng 9.4 shows an acLlon llsLener, bound Lo a vlsual
componenL, LhaL submlLs a longrunnlng Lask Lo an L-&*).29. uesplLe Lhe Lwo layers of lnner classes, havlng a Cul Lask
lnlLlaLe a Lask ln Lhls manner ls falrly sLralghLforward: Lhe ul acLlon llsLener ls called ln Lhe evenL Lhread and submlLs a
N)""$M0& Lo execuLe ln Lhe Lhread pool.
1hls example geLs Lhe longrunnlng Lask ouL of Lhe evenL Lhread ln a "flre and forgeL" manner, whlch ls probably noL
very useful. 1here ls usually some sorL of vlsual feedback when a longrunnlng Lask compleLes. 8uL you cannoL access
presenLaLlon ob[ecLs from Lhe background Lhread, so on compleLlon Lhe Lask musL submlL anoLher Lask Lo run ln Lhe
evenL Lhread Lo updaLe Lhe user lnLerface.
2/3./&4 ^AMA ;/&0/&4 " 2%&4)(&&/&4 J"3< .% " H/3("? $%5>%&*&.A
L-&*).29'&9/4*& M$*C692)";L-&* ] L-&*).29#+"&?D$*8&;=89&$;@220UV[
+++
M)..2"+$;;1*.42"54#.&"&9U"&? 1*.42"54#.&"&9UV W
G)M04* /24; $*.42"@&9%293&;U1*.42"L/&". &V W
M$*C692)";L-&*+&-&*).&U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W ;2K46D23G).$.42"UV[ \
\V[
\\V[
LlsLlng 9.3 lllusLraLes Lhe obvlous way Lo do Lhls, whlch ls sLarLlng Lo geL compllcaLed, we're now up Lo Lhree layers of
lnner classes. 1he acLlon llsLener flrsL dlms Lhe buLLon and seLs a label lndlcaLlng LhaL a compuLaLlon ls ln progress, Lhen
submlLs a Lask Lo Lhe background execuLor. When LhaL Lask flnlshes, lL queues anoLher Lask Lo run ln Lhe evenL Lhread,
whlch reenables Lhe buLLon and resLores Lhe label LexL.

122 !ava Concurrency ln racLlce
2/3./&4 ^AVA 2%&4)(&&/&4 J"3< 8/.: 93*) S**0Y"'<A
M)..2"+$;;1*.42"54#.&"&9U"&? 1*.42"54#.&"&9UV W
G)M04* /24; $*.42"@&9%293&;U1*.42"L/&". &V W
M)..2"+#&.L"$M0&;U%$0#&V[
0$M&0+#&.=&-.UdM)#7dV[
M$*C692)";L-&*+&-&*).&U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W
.97 W
;2K46D23G).$.42"UV[
\ %4"$007 W
H)4L-&*).29+4"#.$"*&UV+&-&*).&U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W
M)..2"+#&.L"$M0&;U.9)&V[
0$M&0+#&.=&-.Ud4;0&dV[
\
\V[
\
\
\V[
\
\V[
1he Lask Lrlggered when Lhe buLLon ls pressed ls composed of Lhree sequenLlal subLasks whose execuLlon alLernaLes
beLween Lhe evenL Lhread and Lhe background Lhread. 1he flrsL subLask updaLes Lhe user lnLerface Lo show LhaL a long
runnlng operaLlon has begun and sLarLs Lhe second subLask ln a background Lhread. upon compleLlon, Lhe second
subLask queues Lhe Lhlrd subLask Lo run agaln ln Lhe evenL Lhread, whlch updaLes Lhe user lnLerface Lo reflecL LhaL Lhe
operaLlon has compleLed. 1hls sorL of "Lhread hopplng" ls Lyplcal of handllng longrunnlng Lasks ln Cul appllcaLlons.
^ALA@A $"&'*??"./%&
Any Lask LhaL Lakes long enough Lo run ln anoLher Lhread probably also Lakes long enough LhaL Lhe user mlghL wanL Lo
cancel lL. ?ou could lmplemenL cancellaLlon dlrecLly uslng Lhread lnLerrupLlon, buL lL ls much easler Lo use <).)9&, whlch
was deslgned Lo manage cancellable Lasks.
When you call *$"*&0 on a <).)9& wlLh 3$7I".&99)G.I%N)""4"6 seL Lo .9)&, Lhe <).)9& lmplemenLaLlon lnLerrupLs
Lhe Lhread LhaL ls execuLlng Lhe Lask lf lL ls currenLly runnlng. lf your Lask ls wrlLLen Lo be responslve Lo lnLerrupLlon, lL
can reLurn early lf lL ls cancelled. LlsLlng 9.6 lllusLraLes a Lask LhaL polls Lhe Lhread's lnLerrupLed sLaLus and reLurns early
on lnLerrupLlon.
2/3./&4 ^AWA $"&'*??/&4 " 2%&4)(&&/&4 J"3<A
<).)9&RST 9)""4"6=$#C ] ")00[ XX .89&$;a*2"%4"&;
+++
#.$9.K)..2"+$;;1*.42"54#.&"&9U"&? 1*.42"54#.&"&9UV W
G)M04* /24; $*.42"@&9%293&;U1*.42"L/&". &V W
4% U9)""4"6=$#C Y] ")00V W
9)""4"6=$#C ] M$*C692)";L-&*+#)M34.U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W
?840& U329&P29CUVV W
4% U=89&$;+*)99&".=89&$;UV+4#I".&99)G.&;UVV W
*0&$"!G@$9.4$0P29CUV[
M9&$C[
\
;2'23&P29CUV[
\
\
\V[
\[
\\V[

*$"*&0K)..2"+$;;1*.42"54#.&"&9U"&? 1*.42"54#.&"&9UV W
G)M04* /24; $*.42"@&9%293&;U1*.42"L/&". &/&".V W
4% U9)""4"6=$#C Y] ")00V
9)""4"6=$#C+*$"*&0U.9)&V[
\\V[
8ecause 9)""4"6=$#C ls conflned Lo Lhe evenL Lhread, no synchronlzaLlon ls requlred when seLLlng or checklng lL, and
Lhe sLarL buLLon llsLener ensures LhaL only one background Lask ls runnlng aL a Llme. Powever, lL would be beLLer Lo be
noLlfled when Lhe Lask compleLes so LhaL, for example, Lhe cancel buLLon could be dlsabled. We address Lhls ln Lhe nexL
secLlon.
^ALADA -)%4)*33 "&0 $%5>?*./%& ,&0/'"./%&
uslng a <).)9& Lo represenL a longrunnlng Lask greaLly slmpllfled lmplemenLlng cancellaLlon. <).)9&=$#C also has a
;2"& hook LhaL slmllarly faclllLaLes compleLlon noLlflcaLlon. AfLer Lhe background D$00$M0& compleLes, ;2"& ls called. 8y

123 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 218ChapLer 9. Cul AppllcaLlons
havlng ;2"& 18lgger a compleLlon Lask ln Lhe evenL Lhread, we can consLrucL a K$*C692)";=$#C class provldlng an
2"D23G0&.42" hook LhaL ls called ln Lhe evenL Lhread, as shown ln LlsLlng 9.7.
K$*C692)";=$#C also supporLs progress lndlcaLlon. 1he *23G).& meLhod can call #&.@9269&##, lndlcaLlng progress ln
numerlcal Lerms. 1hls causes 2"@9269&## Lo be called from Lhe evenL Lhread, whlch can updaLe Lhe user lnLerface Lo
lndlcaLe progress vlsually.
1o lmplemenL a K$*C692)";=$#C you need only lmplemenL *23G).&, whlch ls called ln Lhe background Lhread. ?ou also
have Lhe opLlon of overrldlng 2"D23G0&.42" and 2"@9269&##, whlch are lnvoked ln Lhe evenL Lhread.
8aslng K$*C692)";=$#C on <).)9&=$#C also slmpllfles cancellaLlon. 8aLher Lhan havlng Lo poll Lhe Lhread's lnLerrupLed
sLaLus, *23G).& can call <).)9&+ 4#aD$"*&00&;. LlsLlng 9.8 recasLs Lhe example from LlsLlng 9.6 uslng K$*C692)";a
=$#C.
^ALALA '?4"6P29C&9
We've bullL a slmple framework uslng <).)9&=$#C and L-&*).29 Lo execuLe longrunnlng Lasks ln background Lhreads
wlLhouL undermlnlng Lhe responslveness of Lhe Cul. 1hese Lechnlques can be applled Lo any slngleLhreaded Cul
framework, noL [usL Swlng. ln Swlng, many of Lhe feaLures developed here are provlded by Lhe '?4"6P29C&9 class,
lncludlng cancellaLlon, compleLlon noLlflcaLlon, and progress lndlcaLlon. varlous verslons of '?4"6P29C&9 have been
publlshed ln 1he Swlng ConnecLlon and 1he !ava 1uLorlal, and an updaLed verslon ls lncluded ln !ava 6.
^AMA C:")*0 ["." K%0*?3
Swlng presenLaLlon ob[ecLs, lncludlng daLa model ob[ecLs such as =$M0&F2;&0 or .9&&F2;&0, are conflned Lo Lhe evenL
Lhread. ln slmple Cul programs, all Lhe muLable sLaLe ls held ln Lhe presenLaLlon ob[ecLs and Lhe only Lhread besldes Lhe
evenL Lhread ls Lhe maln Lhread. ln Lhese programs enforclng Lhe slngleLhread rule ls easy: don'L access Lhe daLa model
or presenLaLlon componenLs from Lhe maln Lhread. More compllcaLed programs may use oLher Lhreads Lo move daLa Lo
or from a perslsLenL sLore, such as a flle sysLem or daLabase, so as noL Lo compromlse responslveness.
ln Lhe slmplesL case, Lhe daLa ln Lhe daLa model ls enLered by Lhe user or loaded sLaLlcally from a flle or oLher daLa
source aL appllcaLlon sLarLup, ln whlch case Lhe daLa ls never Louched by any Lhread oLher Lhan Lhe evenL Lhread. 8uL
someLlmes Lhe presenLaLlon model ob[ecL ls only a vlew onLo anoLher daLa source, such as a daLabase, flle sysLem, or
remoLe servlce. ln Lhls case, more Lhan one Lhread ls llkely Lo Louch Lhe daLa as lL goes lnLo or ouL of Lhe appllcaLlon.

124 !ava Concurrency ln racLlce
2/3./&4 ^AZA ;"'<4)%(&0 J"3< $?"33 C(>>%)./&4 $"&'*??"./%&X $%5>?*./%& P%./6/'"./%&X "&0 -)%4)*33 P%./6/'"./%&A
$M#.9$*. *0$## K$*C692)";=$#CRBT 43G0&3&".# N)""$M0&e <).)9&RBT W
G94/$.& %4"$0 <).)9&=$#CRBT *23G).$.42" ] "&? D23G).$.42"UV[

G94/$.& *0$## D23G).$.42" &-.&";# <).)9&=$#CRBT W
G)M04* D23G).$.42"UV W
#)G&9U"&? D$00$M0&RBTUV W
G)M04* B *$00UV .892?# L-*&G.42" W
9&.)9" K$*C692)";=$#C+.84#+*23G).&UV [
\
\V[
\
G92.&*.&; %4"$0 /24; ;2"&UV W
H)4L-&*).29+4"#.$"*&UV+&-&*).&U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W
B /$0)& ] ")00[
=892?$M0& .892?" ] ")00[
M220&$" *$"*&00&; ] %$0#&[
.97 W
/$0)& ] 6&.UV[
\ *$.*8 UL-&*).42"L-*&G.42" &V W
.892?" ] &+6&.D$)#&UV[
\ *$.*8 UD$"*&00$.42"L-*&G.42" &V W
*$"*&00&; ] .9)&[
\ *$.*8 UI".&99)G.&;L-*&G.42" *2"#)3&;V W
\ %4"$007 W
2"D23G0&.42"U/$0)&e .892?"e *$"*&00&;V[
\
\[
\V[
\
\
G92.&*.&; /24; #&.@9269&##U%4"$0 4". *)99&".e %4"$0 4". 3$-V W
H)4L-&*).29+4"#.$"*&UV+&-&*).&U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W 2"@9269&##U*)99&".e 3$-V[ \
\V[
\
XX D$00&; 4" .8& M$*C692)"; .89&$;
G92.&*.&; $M#.9$*. B *23G).&UV .892?# L-*&G.42"[
XX D$00&; 4" .8& &/&". .89&$;
G92.&*.&; /24; 2"D23G0&.42"UB 9&#)0.e =892?$M0& &-*&G.42"e
M220&$" *$"*&00&;V W \
G92.&*.&; /24; 2"@9269&##U4". *)99&".e 4". 3$-V W \
XX J.8&9 <).)9& 3&.82;# %29?$9;&; .2 *23G).$.42"
\
2/3./&4 ^A\A ,&/./"./&4 " 2%&4)(&&/&4X $"&'*??"Y?* J"3< 8/.: K$*C692)";=$#CA
G)M04* /24; 9)"I"K$*C692)";U%4"$0 N)""$M0& .$#CV W
#.$9.K)..2"+$;;1*.42"54#.&"&9U"&? 1*.42"54#.&"&9UV W
G)M04* /24; $*.42"@&9%293&;U1*.42"L/&". &V W
*0$## D$"*&054#.&"&9 43G0&3&".# 1*.42"54#.&"&9 W
K$*C692)";=$#CRST .$#C[
G)M04* /24; $*.42"@&9%293&;U1*.42"L/&". &/&".V W
4% U.$#C Y] ")00V
.$#C+*$"*&0U.9)&V[
\
\
%4"$0 D$"*&054#.&"&9 04#.&"&9 ] "&? D$"*&054#.&"&9UV[
04#.&"&9+.$#C ] "&? K$*C692)";=$#CRB24;TUV W
G)M04* B24; *23G).&UV W
?840& U329&P29CUV qq Y4#D$"*&00&;UVV
;2'23&P29CUV[
9&.)9" ")00[
\
G)M04* /24; 2"D23G0&.42"UM220&$" *$"*&00&;e '.94"6 #e
=892?$M0& &-*&G.42"V W
*$"*&0K)..2"+9&32/&1*.42"54#.&"&9U04#.&"&9V[
0$M&0+#&.=&-.Ud;2"&dV[
\
\[
*$"*&0K)..2"+$;;1*.42"54#.&"&9U04#.&"&9V[
M$*C692)";L-&*+&-&*).&U.$#CV[
\
\V[
\
lor example, you mlghL dlsplay Lhe conLenLs of a remoLe flle sysLem uslng a Lree conLrol. ?ou wouldn'L wanL Lo
enumeraLe Lhe enLlre flle sysLem before you can dlsplay Lhe Lree conLrolLhaL would Lake Loo much Llme and memory.
lnsLead, Lhe Lree can be lazlly populaLed as nodes are expanded. LnumeraLlng even a slngle dlrecLory on a remoLe
volume can Lake a long Llme, so you may wanL Lo do Lhe enumeraLlon ln a background Lask. When Lhe background Lask

123 38arL ll: SLrucLurlng ConcurrenL AppllcaLlons 218ChapLer 9. Cul AppllcaLlons
compleLes, you have Lo geL Lhe daLa lnLo Lhe Lree model somehow. 1hls could be done by uslng a Lhreadsafe Lree
model, by "pushlng" Lhe daLa from Lhe background Lask Lo Lhe evenL Lhread by posLlng a Lask wlLh 4"/2C&5$.&9, or by
havlng Lhe evenL Lhread poll Lo see lf Lhe daLa ls avallable.
^AMA@A J:)*"03"6* ["." K%0*?3
As long as responslveness ls noL unduly affecLed by blocklng, Lhe problem of mulLlple Lhreads operaLlng on Lhe daLa can
be addressed wlLh a Lhreadsafe daLa model. lf Lhe daLa model supporLs flnegralned concurrency, Lhe evenL Lhread and
background Lhreads should be able Lo share lL wlLhouL responslveness problems. lor example,
A&0&6$.4"6B&84*0&=9$*C&9 on page 63 uses an underlylng D2"*)99&".E$#8F$G whose reLrleval operaLlons offer a
hlgh degree of concurrency. 1he downslde ls LhaL lL does noL offer a conslsLenL snapshoL of Lhe daLa, whlch may or may
noL be a requlremenL. 1hreadsafe daLa models musL also generaLe evenLs when Lhe model has been updaLed, so LhaL
vlews can be updaLed when Lhe daLa changes.
lL may someLlmes be posslble Lo geL Lhread safeLy, conslsLency and good responslveness wlLh a versloned daLa model
such as D2G7J"P94.&199$754#. [C! 2.2.3.3]. When you acqulre an lLeraLor for a copyonwrlLe collecLlon, LhaL lLeraLor
Lraverses Lhe collecLlon as lL exlsLed when Lhe lLeraLor was creaLed. Powever, copyonwrlLe collecLlons offer good
performance only when Lraversals greaLly ouLnumber modlflcaLlons, whlch would probably noL be Lhe case ln, say, a
vehlcle Lracklng appllcaLlon. More speclallzed versloned daLa sLrucLures may avold Lhls resLrlcLlon, buL bulldlng
versloned daLa sLrucLures LhaL provlde boLh efflclenL concurrenL access and do noL reLaln old verslons of daLa longer
Lhan needed ls noL easy, and Lhus should be consldered only when oLher approaches are noL pracLlcal.
^AMADA C>?/. ["." K%0*?3
lrom Lhe perspecLlve of Lhe Cul, Lhe Swlng Lable model classes llke =$M0&F2;&0 and .9&&F2;&0 are Lhe offlclal
reposlLory for daLa Lo be dlsplayed. Powever, Lhese model ob[ecLs are ofLen Lhemselves "vlews" of oLher ob[ecLs
managed by Lhe appllcaLlon. A program LhaL has boLh a presenLaLlondomaln and an appllcaLlondomaln daLa model ls
sald Lo have a spllLmodel deslgn (lowler, 2003).
ln a spllLmodel deslgn, Lhe presenLaLlon model ls conflned Lo Lhe evenL Lhread and Lhe oLher model, Lhe shared model,
ls Lhreadsafe and may be accessed by boLh Lhe evenL Lhread and appllcaLlon Lhreads. 1he presenLaLlon model reglsLers
llsLeners wlLh Lhe shared model so lL can be noLlfled of updaLes. 1he presenLaLlon model can Lhen be updaLed from Lhe
shared model by embeddlng a snapshoL of Lhe relevanL sLaLe ln Lhe updaLe message or by havlng Lhe presenLaLlon
model reLrleve Lhe daLa dlrecLly from Lhe shared model when lL recelves an updaLe evenL.
1he snapshoL approach ls slmple, buL has llmlLaLlons. lL works well when Lhe daLa model ls small, updaLes are noL Loo
frequenL, and Lhe sLrucLure of Lhe Lwo models ls slmllar. lf Lhe daLa model ls large or updaLes are very frequenL, or lf one
or boLh sldes of Lhe spllL conLaln lnformaLlon LhaL ls noL vlslble Lo Lhe oLher slde, lL can be more efflclenL Lo send
lncremenLal updaLes lnsLead of enLlre snapshoLs. 1hls approach has Lhe effecL of serlallzlng updaLes on Lhe shared
model and recreaLlng Lhem ln Lhe evenL Lhread agalnsL Lhe presenLaLlon model. AnoLher advanLage of lncremenLal
updaLes ls LhaL flnergralned lnformaLlon abouL whaL changed can lmprove Lhe percelved quallLy of Lhe dlsplaylf only
one vehlcle moves, we don'L have Lo repalnL Lhe enLlre dlsplay, [usL Lhe affecLed reglons.
Conslder a spllLmodel deslgn when a daLa model musL be shared by more Lhan one Lhread and lmplemenLlng a Lhread
safe daLa model would be lnadvlsable because of blocklng, conslsLency, or complexlLy reasons.
^AVA E.:*) S%)53 %6 C/&4?*.:)*"0*0 C(Y3+3.*53
1hread conflnemenL ls noL resLrlcLed Lo Culs: lL can be used whenever a faclllLy ls lmplemenLed as a slngleLhreaded
subsysLem. SomeLlmes Lhread conflnemenL ls forced on Lhe developer for reasons LhaL have noLhlng Lo do wlLh avoldlng
synchronlzaLlon or deadlock. lor example, some naLlve llbrarles requlre LhaL all access Lo Lhe llbrary, even loadlng Lhe
llbrary wlLh '7#.&3+02$;54M9$97, be made from Lhe same Lhread.
8orrowlng from Lhe approach Laken by Cul frameworks, you can easlly creaLe a dedlcaLed Lhread or slngleLhreaded
execuLor for accesslng Lhe naLlve llbrary, and provlde a proxy ob[ecL LhaL lnLercepLs calls Lo Lhe Lhreadconflned ob[ecL
and submlLs Lhem as Lasks Lo Lhe dedlcaLed Lhread. <).)9& and "&?'4"60&=89&$;L-&*).29 work LogeLher Lo make Lhls
easy, Lhe proxy meLhod can #)M34. Lhe Lask and lmmedlaLely call <).)9&+6&. Lo walL for Lhe resulL. (lf Lhe class Lo be
Lhreadconflned lmplemenLs an lnLerface, you can auLomaLe Lhe process of havlng each meLhod submlL a D$00$M0& Lo a
background Lhread execuLor and walLlng for Lhe resulL uslng dynamlc proxles.)

126 !ava Concurrency ln racLlce
C(55")+
Cul frameworks are nearly always lmplemenLed as slngleLhreaded subsysLems ln whlch all presenLaLlonrelaLed code
runs as Lasks ln an evenL Lhread. 8ecause Lhere ls only a slngle evenL Lhread, longrunnlng Lasks can compromlse
responslveness and so should be execuLed ln background Lhreads. Pelper classes llke '?4"6P29C&9 or Lhe
K$*C692)";=$#C class bullL here, whlch provlde supporL for cancellaLlon, progress lndlcaLlon, and compleLlon lndlcaLlon,
can slmpllfy Lhe developmenL of longrunnlng Lasks LhaL have boLh Cul and nonCul componenLs.


127 68arL lll: Llveness, erformance, and 1esLlng 218ChapLer 9. Cul AppllcaLlons
-"). ,,,T 2/#*&*33X -*)6%)5"&'*X "&0 J*3./&4

=>.?@(, 34. Avoldlng Llveness Pazards
=>.?@(, 33. erformance and ScalablllLy
=>.?@(, 37. 1esLlng ConcurrenL rograms


128 !ava Concurrency ln racLlce
$:">.*) @_A F#%/0/&4 2/#*&*33 7"O")03
1here ls ofLen a Lenslon beLween safeLy and llveness. We use locklng Lo ensure Lhread safeLy, buL lndlscrlmlnaLe use of
locklng can cause lockorderlng deadlocks. Slmllarly, we use Lhread pools and semaphores Lo bound resource
consumpLlon, buL fallure Lo undersLand Lhe acLlvlLles belng bounded can cause resource deadlocks. !ava appllcaLlons do
noL recover from deadlock, so lL ls worLhwhlle Lo ensure LhaL your deslgn precludes Lhe condlLlons LhaL could cause lL.
1hls chapLer explores some of Lhe causes of llveness fallures and whaL can be done Lo prevenL Lhem.
@_A@A [*"0?%'<
ueadlock ls lllusLraLed by Lhe classlc, lf somewhaL unsanlLary, "dlnlng phllosophers" problem. llve phllosophers go ouL
for Chlnese food and are seaLed aL a clrcular Lable. 1here are flve chopsLlcks (noL flve palrs), one placed beLween each
palr of dlners. 1he phllosophers alLernaLe beLween Lhlnklng and eaLlng. Lach needs Lo acqulre Lwo chopsLlcks for long
enough Lo eaL, buL can Lhen puL Lhe chopsLlcks back and reLurn Lo Lhlnklng. 1here are some chopsLlckmanagemenL
algorlLhms LhaL leL everyone eaL on a more or less Llmely basls (a hungry phllosopher Lrles Lo grab boLh ad[acenL
chopsLlcks, buL lf one ls noL avallable, puLs down Lhe one LhaL ls avallable and walLs a mlnuLe or so before Lrylng agaln),
and some LhaL can resulL ln some or all of Lhe phllosophers dylng of hunger (each phllosopher lmmedlaLely grabs Lhe
chopsLlck Lo hls lefL and walLs for Lhe chopsLlck Lo hls rlghL Lo be avallable before puLLlng down Lhe lefL). 1he laLLer
slLuaLlon, where each has a resource needed by anoLher and ls walLlng for a resource held by anoLher, and wlll noL
release Lhe one Lhey hold unLll Lhey acqulre Lhe one Lhey don'L, lllusLraLes deadlock.
When a Lhread holds a lock forever, oLher Lhreads aLLempLlng Lo acqulre LhaL lock wlll block forever walLlng. When
Lhread A holds lock L and Lrles Lo acqulre lock M, buL aL Lhe same Llme Lhread 8 holds M and Lrles Lo acqulre L, boLh
Lhreads wlll walL forever. 1hls slLuaLlon ls Lhe slmplesL case of deadlock (or deadly embrace), where mulLlple Lhreads
walL forever due Lo a cycllc locklng dependency. (1hlnk of Lhe Lhreads as Lhe nodes of a dlrecLed graph whose edges
represenL Lhe relaLlon "1hread A ls walLlng for a resource held by Lhread 8". lf Lhls graph ls cycllcal, Lhere ls a deadlock.)
uaLabase sysLems are deslgned Lo deLecL and recover from deadlock. A LransacLlon may acqulre many locks, and locks
are held unLll Lhe LransacLlon commlLs. So lL ls qulLe posslble, and ln facL noL uncommon, for Lwo LransacLlons Lo
deadlock. WlLhouL lnLervenLlon, Lhey would walL forever (holdlng locks LhaL are probably requlred by oLher LransacLlons
as well). 8uL Lhe daLabase server ls noL golng Lo leL Lhls happen. When lL deLecLs LhaL a seL of LransacLlons ls deadlocked
(whlch lL does by searchlng Lhe lswalLlngfor graph for cycles), lL plcks a vlcLlm and aborLs LhaL LransacLlon. 1hls releases
Lhe locks held by Lhe vlcLlm, allowlng Lhe oLher LransacLlons Lo proceed. 1he appllcaLlon can Lhen reLry Lhe aborLed
LransacLlon, whlch may be able Lo compleLe now LhaL any compeLlng LransacLlons have compleLed.
1he !vM ls noL nearly as helpful ln resolvlng deadlocks as daLabase servers are. When a seL of !ava Lhreads deadlock,
LhaL's Lhe end of Lhe gameLhose Lhreads are permanenLly ouL of commlsslon. uependlng on whaL Lhose Lhreads do, Lhe
appllcaLlon may sLall compleLely, or a parLlcular subsysLem may sLall, or performance may suffer. 1he only way Lo
resLore Lhe appllcaLlon Lo healLh ls Lo aborL and resLarL lLand hope Lhe same Lhlng doesn'L happen agaln.
Llke many oLher concurrency hazards, deadlocks rarely manlfesL Lhemselves lmmedlaLely. 1he facL LhaL a class has a
poLenLlal deadlock doesn'L mean LhaL lL ever wlll deadlock, [usL LhaL lL can. When deadlocks do manlfesL Lhemselves, lL ls
ofLen aL Lhe worsL posslble Llmeunder heavy producLlon load.
@_A@A@A 2%'<%)0*)/&4 [*"0?%'<3
5&%.N468.A&$;02*C ln LlsLlng 10.1 ls aL rlsk for deadlock. 1he 0&%.N468. and 9468.5&%. meLhods each acqulre Lhe
0&%. and 9468. locks. lf one Lhread calls 0&%.N468. and anoLher calls 9468.5&%., and Lhelr acLlons are lnLerleaved as
shown ln llgure 10.1, Lhey wlll deadlock.
S/4()* @_A@A 9&?('<+ J/5/&4 /& 5&%.N468.A&$;02*CA



129 68arL lll: Llveness, erformance, and 1esLlng 228ChapLer 10. Avoldlng Llveness Pazards
1he deadlock ln 5&%.N468.A&$;02*C came abouL because Lhe Lwo Lhreads aLLempLed Lo acqulre Lhe same locks ln a
dlfferenL order. lf Lhey asked for Lhe locks ln Lhe same order, Lhere would be no cycllc locklng dependency and Lherefore
no deadlock. lf you can guaranLee LhaL every Lhread LhaL needs locks L and M aL Lhe same Llme always acqulres L and M
ln Lhe same order, Lhere wlll be no deadlock.
A program wlll be free of lockorderlng deadlocks lf all Lhreads acqulre Lhe locks Lhey need ln a flxed global order.
verlfylng conslsLenL lock orderlng requlres a global analysls of your program's locklng behavlor. lL ls noL sufflclenL Lo
lnspecL code paLhs LhaL acqulre mulLlple locks lndlvldually, boLh 0&%.N468. and 9468.5&%. are "reasonable" ways Lo
acqulre Lhe Lwo locks, Lhey are [usL noL compaLlble. When lL comes Lo locklng, Lhe lefL hand needs Lo know whaL Lhe
rlghL hand ls dolng.
2/3./&4 @_A@A C/5>?* 2%'<%)0*)/&4 [*"0?%'<A !"#$% !" %&'()

XX P$9"4"6h ;&$;02*CaG92"&Y
G)M04* *0$## 5&%.N468.A&$;02*C W
G94/$.& %4"$0 JM`&*. 0&%. ] "&? JM`&*.UV[
G94/$.& %4"$0 JM`&*. 9468. ] "&? JM`&*.UV[

G)M04* /24; 0&%.N468.UV W
#7"*892"4:&; U0&%.V W
#7"*892"4:&; U9468.V W
;2'23&.84"6UV[
\
\
\

G)M04* /24; 9468.5&%.UV W
#7"*892"4:&; U9468.V W
#7"*892"4:&; U0&%.V W
;2'23&.84"6L0#&UV[
\
\
\
\
@_A@ADA [+&"5/' 2%'< E)0*) [*"0?%'<3
SomeLlmes lL ls noL obvlous LhaL you have sufflclenL conLrol over lock orderlng Lo prevenL deadlocks. Conslder Lhe
harmlesslooklng code ln LlsLlng 10.2 LhaL Lransfers funds from one accounL Lo anoLher. lL acqulres Lhe locks on boLh
1**2)". ob[ecLs before execuLlng Lhe Lransfer, ensurlng LhaL Lhe balances are updaLed aLomlcally and wlLhouL vlolaLlng
lnvarlanLs such as "an accounL cannoL have a negaLlve balance".
Pow can =N$"#%&9F2"&7 deadlock? lL may appear as lf all Lhe Lhreads acqulre Lhelr locks ln Lhe same order, buL ln facL
Lhe lock order depends on Lhe order of argumenLs passed Lo .9$"#%&9F2"&7, and Lhese ln Lurn mlghL depend on
exLernal lnpuLs. ueadlock can occur lf Lwo Lhreads call .9$"#%&9F2"&7 aL Lhe same Llme, one Lransferrlng from x Lo ?,
and Lhe oLher dolng Lhe opposlLe:
2/3./&4 @_ADA [+&"5/' 2%'<%)0*)/&4 [*"0?%'<A !"#$% !" %&'()

XX P$9"4"6h ;&$;02*CaG92"&Y
G)M04* /24; .9$"#%&9F2"&7U1**2)". %9231**2)".e
1**2)". .21**2)".e
A200$9132)". $32)".V
.892?# I"#)%%4*4&".<)";#L-*&G.42" W
#7"*892"4:&; U%9231**2)".V W
#7"*892"4:&; U.21**2)".V W
4% U%9231**2)".+6&.K$0$"*&UV+*23G$9&=2U$32)".V R ZV
.892? "&? I"#)%%4*4&".<)";#L-*&G.42"UV[
&0#& W
%9231**2)".+;&M4.U$32)".V[
.21**2)".+*9&;4.U$32)".V[
\
\
\
\


130 !ava Concurrency ln racLlce
1h .9$"#%&9F2"&7U371**2)".e 72)91**2)".e ^ZV[
Kh .9$"#%&9F2"&7U72)91**2)".e 371**2)".e jZV[
WlLh unlucky Llmlng, A wlll acqulre Lhe lock on 371**2)". and walL for Lhe lock on 72)91**2)"., whlle 8 ls holdlng Lhe
lock on 72)91**2)". and walLlng for Lhe lock on 371**2)"..
ueadlocks llke Lhls one can be spoLLed Lhe same way as ln LlsLlng 10.1look for nesLed lock acqulslLlons. Slnce Lhe order
of argumenLs ls ouL of our conLrol, Lo flx Lhe problem we musL lnduce an orderlng on Lhe locks and acqulre Lhem
accordlng Lo Lhe lnduced orderlng conslsLenLly LhroughouL Lhe appllcaLlon.
Cne way Lo lnduce an orderlng on ob[ecLs ls Lo use '7#.&3+4;&".4.7E$#8D2;&, whlch reLurns Lhe value LhaL would be
reLurned by JM`&*.+8$#8D2;&. LlsLlng 10.3 shows a verslon of .9$"#%&9F2"&7 LhaL uses '7#.&3+4;&".4.7E$#8D2;& Lo
lnduce a lock orderlng. lL lnvolves a few exLra llnes of code, buL ellmlnaLes Lhe posslblllLy of deadlock.
ln Lhe rare case LhaL Lwo ob[ecLs have Lhe same hash code, we musL use an arblLrary means of orderlng Lhe lock
acqulslLlons, and Lhls relnLroduces Lhe posslblllLy of deadlock. 1o prevenL lnconslsLenL lock orderlng ln Lhls case, a Lhlrd
"Lle breaklng" lock ls used. 8y acqulrlng Lhe Llebreaklng lock before acqulrlng elLher 1**2)". lock, we ensure LhaL only
one Lhread aL a Llme performs Lhe rlsky Lask of acqulrlng Lwo locks ln an arblLrary order, ellmlnaLlng Lhe posslblllLy of
deadlock (so long as Lhls mechanlsm ls used conslsLenLly). lf hash colllslons were common, Lhls Lechnlque mlghL become
a concurrency boLLleneck ([usL as havlng a slngle, programwlde lock would), buL because hash colllslons wlLh
'7#.&3+4;&".4.7E$#8D2;& are vanlshlngly lnfrequenL, Lhls Lechnlque provldes LhaL lasL blL of safeLy aL llLLle cosL.
2/3./&4 @_ALA ,&0('/&4 " 2%'< E)0*)/&4 .% F#%/0 [*"0?%'<A
G94/$.& #.$.4* %4"$0 JM`&*. .4&52*C ] "&? JM`&*.UV[

G)M04* /24; .9$"#%&9F2"&7U%4"$0 1**2)". %9231**.e
%4"$0 1**2)". .21**.e
%4"$0 A200$9132)". $32)".V
.892?# I"#)%%4*4&".<)";#L-*&G.42" W
*0$## E&0G&9 W
G)M04* /24; .9$"#%&9UV .892?# I"#)%%4*4&".<)";#L-*&G.42" W
4% U%9231**.+6&.K$0$"*&UV+*23G$9&=2U$32)".V R ZV
.892? "&? I"#)%%4*4&".<)";#L-*&G.42"UV[
&0#& W
%9231**.+;&M4.U$32)".V[
.21**.+*9&;4.U$32)".V[
\
\
\
4". %923E$#8 ] '7#.&3+4;&".4.7E$#8D2;&U%9231**.V[
4". .2E$#8 ] '7#.&3+4;&".4.7E$#8D2;&U.21**.V[

4% U%923E$#8 R .2E$#8V W
#7"*892"4:&; U%9231**.V W
#7"*892"4:&; U.21**.V W
"&? E&0G&9UV+.9$"#%&9UV[
\
\
\ &0#& 4% U%923E$#8 T .2E$#8V W
#7"*892"4:&; U.21**.V W
#7"*892"4:&; U%9231**.V W
"&? E&0G&9UV+.9$"#%&9UV[
\
\
\ &0#& W
#7"*892"4:&; U.4&52*CV W
#7"*892"4:&; U%9231**.V W
#7"*892"4:&; U.21**.V W
"&? E&0G&9UV+.9$"#%&9UV[
\
\
\
\
\
lf 1**2)". has a unlque, lmmuLable, comparable key such as an accounL number, lnduclng a lock orderlng ls even easler:
order ob[ecLs by Lhelr key, Lhus ellmlnaLlng Lhe need for Lhe Llebreaklng lock.
?ou may Lhlnk we're oversLaLlng Lhe rlsk of deadlock because locks are usually held only brlefly, buL deadlocks are a
serlous problem ln real sysLems. A producLlon appllcaLlon may perform bllllons of lock acqulrerelease cycles per day.
Cnly one of Lhose needs Lo be Llmed [usL wrong Lo brlng Lhe appllcaLlon Lo deadlock, and even a Lhorough loadLesLlng
reglmen may noL dlsclose all laLenL deadlocks.
[1]
A&32"#.9$.&A&$;02*C ln LlsLlng 10.4
[2]
deadlocks falrly qulckly on mosL
sysLems.

131 68arL lll: Llveness, erformance, and 1esLlng 228ChapLer 10. Avoldlng Llveness Pazards
[1] lronlcally, holdlng locks for shorL perlods of Llme, as you are supposed Lo do Lo reduce lock conLenLlon, lncreases Lhe llkellhood LhaL LesLlng wlll
noL dlsclose laLenL deadlock rlsks.
[2] lor slmpllclLy, A&32"#.9$.&A&$;02*C lgnores Lhe lssue of negaLlve accounL balances.
2/3./&4 @_AMA [)/#*) 2%%> .:". ,&0('*3 [*"0?%'< 9&0*) J+>/'"? $%&0/./%&3A
G)M04* *0$## A&32"#.9$.&A&$;02*C W
G94/$.& #.$.4* %4"$0 4". ,!Fk=ENL1A' ] jZ[
G94/$.& #.$.4* %4"$0 4". ,!Fk1DDJ!,=' ] p[
G94/$.& #.$.4* %4"$0 4". ,!FkI=LN1=IJ,' ] ^ZZZZZZ[

G)M04* #.$.4* /24; 3$4"U'.94"6fg $96#V W
%4"$0 N$";23 9"; ] "&? N$";23UV[
%4"$0 1**2)".fg $**2)".# ] "&? 1**2)".f,!Fk1DDJ!,='g[

%29 U4". 4 ] Z[ 4 R $**2)".#+0&"6.8[ 4__V
$**2)".#f4g ] "&? 1**2)".UV[

*0$## =9$"#%&9=89&$; &-.&";# =89&$; W
G)M04* /24; 9)"UV W
%29 U4". 4]Z[ 4R,!FkI=LN1=IJ,'[ 4__V W
4". %9231**. ] 9";+"&-.I".U,!Fk1DDJ!,='V[
4". .21**. ] 9";+"&-.I".U,!Fk1DDJ!,='V[
A200$9132)". $32)". ]
"&? A200$9132)".U9";+"&-.I".U^ZZZVV[
.9$"#%&9F2"&7U$**2)".#f%9231**.ge
$**2)".#f.21**.ge $32)".V[
\
\
\
%29 U4". 4 ] Z[ 4 R ,!Fk=ENL1A'[ 4__V
"&? =9$"#%&9=89&$;UV+#.$9.UV[
\
\
@_A@ALA [*"0?%'<3 ;*.8**& $%%>*)"./&4 EY]*'.3
MulLlple lock acqulslLlon ls noL always as obvlous as ln 5&%.N468.A&$;02*C or =N$"#%&9F2"&7, Lhe Lwo locks need noL
be acqulred by Lhe same meLhod. Conslder Lhe cooperaLlng classes ln LlsLlng 10.3, whlch mlghL be used ln a Laxlcab
dlspaLchlng appllcaLlon. =$-4 represenLs an lndlvldual Laxl wlLh a locaLlon and a desLlnaLlon, A4#G$.*8&9 represenLs a
fleeL of Laxls.
Whlle no meLhod expllclLly acqulres Lwo locks, callers of #&.52*$.42" and 6&.I3$6& can acqulre Lwo locks [usL Lhe
same. lf a Lhread calls #&.52*$.42" ln response Lo an updaLe from a CS recelver, lL flrsL updaLes Lhe Laxl's locaLlon and
Lhen checks Lo see lf lL has reached lLs desLlnaLlon. lf lL has, lL lnforms Lhe dlspaLcher LhaL lL needs a new desLlnaLlon.
Slnce boLh #&.52*$.42" and "2.4%71/$40$M0& are #7"*892"4:&;, Lhe Lhread calllng #&.52*$.42" acqulres Lhe =$-4
lock and Lhen Lhe A4#G$.*8&9 lock. Slmllarly, a Lhread calllng 6&.I3$6& acqulres Lhe A4#G$.*8&9 lock and Lhen each
=$-4 lock (one aL aL Llme). !usL as ln 5&%.N468.A&$;02*C, Lwo locks are acqulred by Lwo Lhreads ln dlfferenL orders,
rlsklng deadlock.
lL was easy Lo spoL Lhe deadlock posslblllLy ln 5&%.N468.A&$;02*C or .9$"#%&9F2"&7 by looklng for meLhods LhaL
acqulre Lwo locks. SpoLLlng Lhe deadlock posslblllLy ln =$-4 and A4#G$.*8&9 ls a llLLle harder: Lhe warnlng slgn ls LhaL an
allen meLhod (deflned on page 40) ls belng called whlle holdlng a lock.
lnvoklng an allen meLhod wlLh a lock held ls asklng for llveness Lrouble. 1he allen meLhod mlghL acqulre oLher locks
(rlsklng deadlock) or block for an unexpecLedly long Llme, sLalllng oLher Lhreads LhaL need Lhe lock you hold.
@_A@AMA E>*& $"??3
Cf course, =$-4 and A4#G$.*8&9 dldn'L know LhaL Lhey were each half of a deadlock walLlng Lo happen. And Lhey
shouldn'L have Lo, a meLhod call ls an absLracLlon barrler lnLended Lo shleld you from Lhe deLalls of whaL happens on Lhe
oLher slde. 8uL because you don'L know whaL ls happenlng on Lhe oLher slde of Lhe call, calllng an allen meLhod wlLh a
lock held ls dlfflculL Lo analyze and Lherefore rlsky.
Calllng a meLhod wlLh no locks held ls called an open call [C! 2.4.1.3], and classes LhaL rely on open calls are more well
behaved and composable Lhan classes LhaL make calls wlLh locks held. uslng open calls Lo avold deadlock ls analogous Lo
uslng encapsulaLlon Lo provlde Lhread safeLy: whlle one can cerLalnly consLrucL a Lhreadsafe program wlLhouL any
encapsulaLlon, Lhe Lhread safeLy analysls of a program LhaL makes effecLlve use of encapsulaLlon ls far easler Lhan LhaL
of one LhaL does noL. Slmllarly, Lhe llveness analysls of a program LhaL relles excluslvely on open calls ls far easler Lhan
LhaL of one LhaL does noL. 8esLrlcLlng yourself Lo open calls makes lL far easler Lo ldenLlfy Lhe code paLhs LhaL acqulre
mulLlple locks and Lherefore Lo ensure LhaL locks are acqulred ln a conslsLenL order.
[3]


132 !ava Concurrency ln racLlce
[3] 1he need Lo rely on open calls and careful lock orderlng reflecLs Lhe fundamenLal messlness of composlng synchronlzed ob[ecLs raLher Lhan
synchronlzlng composed ob[ecLs.
2/3./&4 @_AVA 2%'<%)0*)/&4 [*"0?%'< ;*.8**& $%%>*)"./&4 EY]*'.3A [%&b. [% .:/3A

XX P$9"4"6h ;&$;02*CaG92"&Y
*0$## =$-4 W
bH)$9;&;K7Ud.84#dV G94/$.& @24". 02*$.42"e ;&#.4"$.42"[
G94/$.& %4"$0 A4#G$.*8&9 ;4#G$.*8&9[

G)M04* =$-4UA4#G$.*8&9 ;4#G$.*8&9V W
.84#+;4#G$.*8&9 ] ;4#G$.*8&9[
\

G)M04* #7"*892"4:&; @24". 6&.52*$.42"UV W
9&.)9" 02*$.42"[
\

G)M04* #7"*892"4:&; /24; #&.52*$.42"U@24". 02*$.42"V W
.84#+02*$.42" ] 02*$.42"[
4% U02*$.42"+&()$0#U;&#.4"$.42"VV
;4#G$.*8&9+"2.4%71/$40$M0&U.84#V[
\
\

*0$## A4#G$.*8&9 W
bH)$9;&;K7Ud.84#dV G94/$.& %4"$0 '&.R=$-4T .$-4#[
bH)$9;&;K7Ud.84#dV G94/$.& %4"$0 '&.R=$-4T $/$40$M0&=$-4#[

G)M04* A4#G$.*8&9UV W
.$-4# ] "&? E$#8'&.R=$-4TUV[
$/$40$M0&=$-4# ] "&? E$#8'&.R=$-4TUV[
\

G)M04* #7"*892"4:&; /24; "2.4%71/$40$M0&U=$-4 .$-4V W
$/$40$M0&=$-4#+$;;U.$-4V[
\

G)M04* #7"*892"4:&; I3$6& 6&.I3$6&UV W
I3$6& 43$6& ] "&? I3$6&UV[
%29 U=$-4 . h .$-4#V
43$6&+;9$?F$9C&9U.+6&.52*$.42"UVV[
9&.)9" 43$6&[
\
\
=$-4 and A4#G$.*8&9 ln LlsLlng 10.3 can be easlly refacLored Lo use open calls and Lhus ellmlnaLe Lhe deadlock rlsk. 1hls
lnvolves shrlnklng Lhe #7"*892"4:&; blocks Lo guard only operaLlons LhaL lnvolve shared sLaLe, as ln LlsLlng 10.6. very
ofLen, Lhe cause of problems llke Lhose ln LlsLlng 10.3 ls Lhe use of #7"*892"4:&; meLhods lnsLead of smaller
#7"*892"4:&; blocks for reasons of compacL synLax or slmpllclLy raLher Lhan because Lhe enLlre meLhod musL be
guarded by a lock. (As a bonus, shrlnklng Lhe #7"*892"4:&; block may also lmprove scalablllLy as well, see SecLlon
11.4.1 for advlce on slzlng #7"*892"4:&; blocks.)
SLrlve Lo use open calls LhroughouL your program. rograms LhaL rely on open calls are far easler Lo analyze for
deadlockfreedom Lhan Lhose LhaL allow calls Lo allen meLhods wlLh locks held.
8esLrucLurlng a #7"*892"4:&; block Lo allow open calls can someLlmes have undeslrable consequences, slnce lL Lakes an
operaLlon LhaL was aLomlc and makes lL noL aLomlc. ln many cases, Lhe loss of aLomlclLy ls perfecLly accepLable, Lhere's
no reason LhaL updaLlng a Laxl's locaLlon and noLlfylng Lhe dlspaLcher LhaL lL ls ready for a new desLlnaLlon need be an
aLomlc operaLlon. ln oLher cases, Lhe loss of aLomlclLy ls noLlceable buL Lhe semanLlc changes are sLlll accepLable. ln Lhe
deadlockprone verslon, 6&.I3$6& produces a compleLe snapshoL of Lhe fleeL locaLlons aL LhaL lnsLanL, ln Lhe refacLored
verslon, lL feLches Lhe locaLlon of each Laxl aL sllghLly dlfferenL Llmes.
ln some cases, however, Lhe loss of aLomlclLy ls a problem, and here you wlll have Lo use anoLher Lechnlque Lo achleve
aLomlclLy. Cne such Lechnlque ls Lo sLrucLure a concurrenL ob[ecL so LhaL only one Lhread can execuLe Lhe code paLh
followlng Lhe open call. lor example, when shuLLlng down a servlce, you may wanL Lo walL for lnprogress operaLlons Lo
compleLe and Lhen release resources used by Lhe servlce. Poldlng Lhe servlce lock whlle walLlng for operaLlons Lo
compleLe ls lnherenLly deadlockprone, buL releaslng Lhe servlce lock before Lhe servlce ls shuL down may leL oLher
Lhreads sLarL new operaLlons. 1he soluLlon ls Lo hold Lhe lock long enough Lo updaLe Lhe servlce sLaLe Lo "shuLLlng

133 68arL lll: Llveness, erformance, and 1esLlng 228ChapLer 10. Avoldlng Llveness Pazards
down" so LhaL oLher Lhreads wanLlng Lo sLarL new operaLlonslncludlng shuLLlng down Lhe servlcesee LhaL Lhe servlce ls
unavallable, and do noL Lry. ?ou can Lhen walL for shuLdown Lo compleLe, knowlng LhaL only Lhe shuLdown Lhread has
access Lo Lhe servlce sLaLe afLer Lhe open call compleLes. 1hus, raLher Lhan uslng locklng Lo keep Lhe oLher Lhreads ouL
of a crlLlcal secLlon of code, Lhls Lechnlque relles on consLrucLlng proLocols so LhaL oLher Lhreads don'L Lry Lo geL ln.
@_A@AVA N*3%()'* [*"0?%'<3
!usL as Lhreads can deadlock when Lhey are each walLlng for a lock LhaL Lhe oLher holds and wlll noL release, Lhey can
also deadlock when walLlng for resources.
2/3./&4 @_AWA 93/&4 E>*& $"??3 .% F#%/0/&4 [*"0?%'< ;*.8**& $%%>*)"./&4 EY]*'.3A
b=89&$;'$%&
*0$## =$-4 W
bH)$9;&;K7Ud.84#dV G94/$.& @24". 02*$.42"e ;&#.4"$.42"[
G94/$.& %4"$0 A4#G$.*8&9 ;4#G$.*8&9[
+++
G)M04* #7"*892"4:&; @24". 6&.52*$.42"UV W
9&.)9" 02*$.42"[
\

G)M04* #7"*892"4:&; /24; #&.52*$.42"U@24". 02*$.42"V W
M220&$" 9&$*8&;A&#.4"$.42"[
#7"*892"4:&; U.84#V W
.84#+02*$.42" ] 02*$.42"[
9&$*8&;A&#.4"$.42" ] 02*$.42"+&()$0#U;&#.4"$.42"V[
\
4% U9&$*8&;A&#.4"$.42"V
;4#G$.*8&9+"2.4%71/$40$M0&U.84#V[
\
\

b=89&$;'$%&
*0$## A4#G$.*8&9 W
bH)$9;&;K7Ud.84#dV G94/$.& %4"$0 '&.R=$-4T .$-4#[
bH)$9;&;K7Ud.84#dV G94/$.& %4"$0 '&.R=$-4T $/$40$M0&=$-4#[
+++
G)M04* #7"*892"4:&; /24; "2.4%71/$40$M0&U=$-4 .$-4V W
$/$40$M0&=$-4#+$;;U.$-4V[
\

G)M04* I3$6& 6&.I3$6&UV W
'&.R=$-4T *2G7[
#7"*892"4:&; U.84#V W
*2G7 ] "&? E$#8'&.R=$-4TU.$-4#V[
\
I3$6& 43$6& ] "&? I3$6&UV[
%29 U=$-4 . h *2G7V
43$6&+;9$?F$9C&9U.+6&.52*$.42"UVV[
9&.)9" 43$6&[
\
\
Say you have Lwo pooled resources, such as connecLlon pools for Lwo dlfferenL daLabases. 8esource pools are usually
lmplemenLed wlLh semaphores (see SecLlon 3.3.3) Lo faclllLaLe blocklng when Lhe pool ls empLy. lf a Lask requlres
connecLlons Lo boLh daLabases and Lhe Lwo resources are noL always requesLed ln Lhe same order, Lhread A could be
holdlng a connecLlon Lo daLabase u
1
whlle walLlng for a connecLlon Lo daLabase u
2
, and Lhread 8 could be holdlng a
connecLlon Lo u
2
whlle walLlng for a connecLlon Lo u
1
. (1he larger Lhe pools are, Lhe less llkely Lhls ls Lo occur, lf each
pool has n connecLlons, deadlock requlres n seLs of cycllcally walLlng Lhreads and a loL of unlucky Llmlng.)
AnoLher form of resourcebased deadlock ls LhreadsLarvaLlon deadlock. We saw an example of Lhls hazard ln SecLlon
8.1.1, where a Lask LhaL submlLs a Lask and walLs for lLs resulL execuLes ln a slngleLhreaded L-&*).29. ln LhaL case, Lhe
flrsL Lask wlll walL forever, permanenLly sLalllng LhaL Lask and all oLhers walLlng Lo execuLe ln LhaL L-&*).29. 1asks LhaL
walL for Lhe resulLs of oLher Lasks are Lhe prlmary source of LhreadsLarvaLlon deadlock, bounded pools and
lnLerdependenL Lasks do noL mlx well.
@_ADA F#%/0/&4 "&0 [/"4&%3/&4 [*"0?%'<3
A program LhaL never acqulres more Lhan one lock aL a Llme cannoL experlence lockorderlng deadlock. Cf course, Lhls ls
noL always pracLlcal, buL lf you can geL away wlLh lL, lL's a loL less work. lf you musL acqulre mulLlple locks, lock orderlng
musL be a parL of your deslgn: Lry Lo mlnlmlze Lhe number of poLenLlal locklng lnLeracLlons, and follow and documenL a
lockorderlng proLocol for locks LhaL may be acqulred LogeLher.

134 !ava Concurrency ln racLlce
ln programs LhaL use flnegralned locklng, audlL your code for deadlock freedom uslng a LwoparL sLraLegy: flrsL, ldenLlfy
where mulLlple locks could be acqulred (Lry Lo make Lhls a small seL), and Lhen perform a global analysls of all such
lnsLances Lo ensure LhaL lock orderlng ls conslsLenL across your enLlre program. uslng open calls wherever posslble
slmpllfles Lhls analysls subsLanLlally. WlLh no nonopen calls, flndlng lnsLances where mulLlple locks are acqulred ls falrly
easy, elLher by code revlew or by auLomaLed byLecode or source code analysls.
@_ADA@A J/5*0 2%'< F..*5>.3
AnoLher Lechnlque for deLecLlng and recoverlng from deadlocks ls Lo use Lhe Llmed .9752*C feaLure of Lhe expllclL 52*C
classes (see ChapLer 13) lnsLead of lnLrlnslc locklng. Where lnLrlnslc locks walL forever lf Lhey cannoL acqulre Lhe lock,
expllclL locks leL you speclfy a LlmeouL afLer whlch .9752*C reLurns fallure. 8y uslng a LlmeouL LhaL ls much longer Lhan
you expecL acqulrlng Lhe lock Lo Lake, you can regaln conLrol when someLhlng unexpecLed happens. (LlsLlng 13.3 on
page 280 shows an alLernaLlve lmplemenLaLlon of .9$"#%&9F2"&7 uslng Lhe polled .9752*C wlLh reLrles for probablllsLlc
deadlock avoldance.)
When a Llmed lock aLLempL falls, you do noL necessarlly know why. Maybe Lhere was a deadlock, maybe a Lhread
erroneously enLered an lnflnlLe loop whlle holdlng LhaL lock, or maybe some acLlvlLy ls [usL runnlng a loL slower Lhan you
expecLed. SLlll, aL leasL you have Lhe opporLunlLy Lo record LhaL your aLLempL falled, log any useful lnformaLlon abouL
whaL you were Lrylng Lo do, and resLarL Lhe compuLaLlon somewhaL more gracefully Lhan kllllng Lhe enLlre process.
uslng Llmed lock acqulslLlon Lo acqulre mulLlple locks can be effecLlve agalnsL deadlock even when Llmed locklng ls noL
used conslsLenLly LhroughouL Lhe program. lf a lock acqulslLlon Llmes ouL, you can release Lhe locks, back off and walL
for a whlle, and Lry agaln, posslbly clearlng Lhe deadlock condlLlon and allowlng Lhe program Lo recover. (1hls Lechnlque
works only when Lhe Lwo locks are acqulred LogeLher, lf mulLlple locks are acqulred due Lo Lhe nesLlng of meLhod calls,
you cannoL [usL release Lhe ouLer lock, even lf you know you hold lL.)
@_ADADA [*"0?%'< F&"?+3/3 8/.: J:)*"0 [(5>3
Whlle prevenLlng deadlocks ls mosLly your problem, Lhe !vM can help ldenLlfy Lhem when Lhey do happen uslng Lhread
dumps. A Lhread dump lncludes a sLack Lrace for each runnlng Lhread, slmllar Lo Lhe sLack Lrace LhaL accompanles an
excepLlon. 1hread dumps also lnclude locklng lnformaLlon, such as whlch locks are held by each Lhread, ln whlch sLack
frame Lhey were acqulred, and whlch lock a blocked Lhread ls walLlng Lo acqulre.
[4]
8efore generaLlng a Lhread dump,
Lhe !vM searches Lhe lswalLlngfor graph for cycles Lo flnd deadlocks. lf lL flnds one, lL lncludes deadlock lnformaLlon
ldenLlfylng whlch locks and Lhreads are lnvolved, and where ln Lhe program Lhe offendlng lock acqulslLlons are.
[4] 1hls lnformaLlon ls useful for debugglng even when you don'L have a deadlock, perlodlcally Lrlggerlng Lhread dumps leLs you observe your
program's locklng behavlor.
1o Lrlgger a Lhread dump, you can send Lhe !vM process a 'IHO!I= slgnal (C400 as) on unlx plaLforms, or press Lhe
D.90ay key on unlx or D.90aK9&$C on Wlndows plaLforms. Many luLs can requesL a Lhread dump as well.
lf you are uslng Lhe expllclL 52*C classes lnsLead of lnLrlnslc locklng, !ava 3.0 has no supporL for assoclaLlng 52*C
lnformaLlon wlLh Lhe Lhread dump, expllclL 52*Cs do noL show up aL all ln Lhread dumps. !ava 6 does lnclude Lhread
dump supporL and deadlock deLecLlon wlLh expllclL 52*Cs, buL Lhe lnformaLlon on where 52*Cs are acqulred ls
necessarlly less preclse Lhan for lnLrlnslc locks. lnLrlnslc locks are assoclaLed wlLh Lhe sLack frame ln whlch Lhey were
acqulred, expllclL 52*Cs are assoclaLed only wlLh Lhe acqulrlng Lhread.
LlsLlng 10.7 shows porLlons of a Lhread dump Laken from a producLlon !2LL appllcaLlon. 1he fallure LhaL caused Lhe
deadlock lnvolves Lhree componenLsa !2LL appllcaLlon, a !2LL conLalner, and a !u8C drlver, each from dlfferenL
vendors. (1he names have been changed Lo proLecL Lhe gullLy.) All Lhree were commerclal producLs LhaL had been
Lhrough exLenslve LesLlng cycles, each had a bug LhaL was harmless unLll Lhey all lnLeracLed and caused a faLal server
fallure.
We've shown only Lhe porLlon of Lhe Lhread dump relevanL Lo ldenLlfylng Lhe deadlock. 1he !vM has done a loL of work
for us ln dlagnoslng Lhe deadlockwhlch locks are causlng Lhe problem, whlch Lhreads are lnvolved, whlch oLher locks
Lhey hold, and wheLher oLher Lhreads are belng lndlrecLly lnconvenlenced. Cne Lhread holds Lhe lock on Lhe
F)3M0&AKD2""&*.42" and ls walLlng Lo acqulre Lhe lock on Lhe F)3M0&AKD$00$M0&'.$.&3&"., Lhe oLher holds Lhe lock
on Lhe F)3M0&AKD$00$M0&'.$.&3&". and ls walLlng for Lhe lock on Lhe F)3M0&AKD2""&*.42".

133 68arL lll: Llveness, erformance, and 1esLlng 228ChapLer 10. Avoldlng Llveness Pazards
2/3./&4 @_AZA -%)./%& %6 J:)*"0 [(5> F6.*) [*"0?%'<A
<2)"; 2"& >$/$a0&/&0 ;&$;02*Ch
]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
d1GG04*$.42"'&9/&9=89&$;dh
?$4.4"6 .2 02*C 32"4.29 Z-ZtZ%Z*;* U$ F)3M0&AKD2""&*.42"Ve
?84*8 4# 8&0; M7 d1GG04*$.42"'&9/&9=89&$;d
d1GG04*$.42"'&9/&9=89&$;dh
?$4.4"6 .2 02*C 32"4.29 Z-ZtZ%Z&;i U$ F)3M0&AKD$00$M0&'.$.&3&".Ve
?84*8 4# 8&0; M7 d1GG04*$.42"'&9/&9=89&$;d

>$/$ #.$*C 4"%293$.42" %29 .8& .89&$;# 04#.&; $M2/&h
d1GG04*$.42"'&9/&9=89&$;dh
$. F)3M0&AKD2""&*.42"+9&32/&k#.$.&3&".
a ?$4.4"6 .2 02*C RZ-zpZ%v%sZT U$ F)3M0&AKD2""&*.42"V
$. F)3M0&AK'.$.&3&".+*02#&
a 02*C&; RZ-zZji%%MZT U$ F)3M0&AKD$00$M0&'.$.&3&".V
+++

d1GG04*$.42"'&9/&9=89&$;dh
$. F)3M0&AKD$00$M0&'.$.&3&".+#&";K$.*8
a ?$4.4"6 .2 02*C RZ-zZji%%MZT U$ F)3M0&AKD$00$M0&'.$.&3&".V
$. F)3M0&AKD2""&*.42"+*2334.
a 02*C&; RZ-zpZ%v%sZT U$ F)3M0&AKD2""&*.42"V
+++
1he !u8C drlver belng used here clearly has a lockorderlng bug: dlfferenL call chalns Lhrough Lhe !u8C drlver acqulre
mulLlple locks ln dlfferenL orders. 8uL Lhls problem would noL have manlfesLed lLself were lL noL for anoLher bug:
mulLlple Lhreads were Lrylng Lo use Lhe same !u8C D2""&*.42" aL Lhe same Llme. 1hls was noL how Lhe appllcaLlon was
supposed Lo workLhe developers were surprlsed Lo see Lhe same D2""&*.42" used concurrenLly by Lwo Lhreads. 1here's
noLhlng ln Lhe !u8C speclflcaLlon LhaL requlres a D2""&*.42" Lo be Lhreadsafe, and lL ls common Lo conflne use of a
D2""&*.42" Lo a slngle Lhread, as was lnLended here. 1hls vendor Lrled Lo dellver a Lhreadsafe !u8C drlver, as
evldenced by Lhe synchronlzaLlon on mulLlple !u8C ob[ecLs wlLhln Lhe drlver code. unforLunaLely, because Lhe vendor
dld noL Lake lock orderlng lnLo accounL, Lhe drlver was prone Lo deadlock, buL lL was only Lhe lnLeracLlon of Lhe
deadlockprone drlver and Lhe lncorrecL D2""&*.42" sharlng by Lhe appllcaLlon LhaL dlsclosed Lhe problem. 8ecause
nelLher bug was faLal ln lsolaLlon, boLh perslsLed desplLe exLenslve LesLlng.
@_ALA E.:*) 2/#*&*33 7"O")03
Whlle deadlock ls Lhe mosL wldely encounLered llveness hazard, Lhere are several oLher llveness hazards you may
encounLer ln concurrenL programs lncludlng sLarvaLlon, mlssed slgnals, and llvelock. (Mlssed slgnals are covered ln
SecLlon 14.2.3.)
@_ALA@A C.")#"./%&
SLarvaLlon occurs when a Lhread ls perpeLually denled access Lo resources lL needs ln order Lo make progress, Lhe mosL
commonly sLarved resource ls Cu cycles. SLarvaLlon ln !ava appllcaLlons can be caused by lnapproprlaLe use of Lhread
prlorlLles. lL can also be caused by execuLlng nonLermlnaLlng consLrucLs (lnflnlLe loops or resource walLs LhaL do noL
LermlnaLe) wlLh a lock held, slnce oLher Lhreads LhaL need LhaL lock wlll never be able Lo acqulre lL.
1he Lhread prlorlLles deflned ln Lhe 1hread Al are merely schedullng hlnLs. 1he 1hread Al deflnes Len prlorlLy levels
LhaL Lhe !vM can map Lo operaLlng sysLem schedullng prlorlLles as lL sees flL. 1hls mapplng ls plaLformspeclflc, so Lwo
!ava prlorlLles can map Lo Lhe same CS prlorlLy on one sysLem and dlfferenL CS prlorlLles on anoLher. Some operaLlng
sysLems have fewer Lhan Len prlorlLy levels, ln whlch case mulLlple !ava prlorlLles map Lo Lhe same CS prlorlLy.
CperaLlng sysLem schedulers go Lo greaL lengLhs Lo provlde schedullng falrness and llveness beyond LhaL requlred by Lhe
!ava Language SpeclflcaLlon. ln mosL !ava appllcaLlons, all appllcaLlon Lhreads have Lhe same prlorlLy, =89&$;+
,JNFk@NIJNI=r. 1he Lhread prlorlLy mechanlsm ls a blunL lnsLrumenL, and lL's noL always obvlous whaL effecL changlng
prlorlLles wlll have, boosLlng a Lhread's prlorlLy mlghL do noLhlng or mlghL always cause one Lhread Lo be scheduled ln
preference Lo Lhe oLher, causlng sLarvaLlon.
lL ls generally wlse Lo reslsL Lhe LempLaLlon Lo Lweak Lhread prlorlLles. As soon as you sLarL modlfylng prlorlLles, Lhe
behavlor of your appllcaLlon becomes plaLformspeclflc and you lnLroduce Lhe rlsk of sLarvaLlon. ?ou can ofLen spoL a
program LhaL ls Lrylng Lo recover from prlorlLy Lweaklng or oLher responslveness problems by Lhe presence of
=89&$;+#0&&G or =89&$;+74&0; calls ln odd places, ln an aLLempL Lo glve more Llme Lo lowerprlorlLy Lhreads.
[3]

[3] 1he semanLlcs of =89&$;+74&0; (and =89&$;+#0&&GUZV) are undeflned [!LS 17.9], Lhe !vM ls free Lo lmplemenL Lhem as noops or LreaL
Lhem as schedullng hlnLs. ln parLlcular, Lhey are noL requlred Lo have Lhe semanLlcs of #0&&GUZV on unlx sysLems puL Lhe currenL Lhread aL Lhe
end of Lhe run queue for LhaL prlorlLy, yleldlng Lo oLher Lhreads of Lhe same prlorlLy Lhough some !vMs lmplemenL 74&0; ln Lhls way.

136 !ava Concurrency ln racLlce
Avold Lhe LempLaLlon Lo use Lhread prlorlLles, slnce Lhey lncrease plaLform dependence and can cause llveness
problems. MosL concurrenL appllcaLlons can use Lhe defaulL prlorlLy for all Lhreads.
@_ALADA -%%) N*3>%&3/#*&*33
Cne sLep removed from sLarvaLlon ls poor responslveness, whlch ls noL uncommon ln Cul appllcaLlons uslng
background Lhreads. ChapLer 9 developed a framework for offloadlng longrunnlng Lasks onLo background Lhreads so as
noL Lo freeze Lhe user lnLerface. CulnLenslve background Lasks can sLlll affecL responslveness because Lhey can
compeLe for Cu cycles wlLh Lhe evenL Lhread. 1hls ls one case where alLerlng Lhread prlorlLles makes sense, when
compuLelnLenslve background compuLaLlons would affecL responslveness. lf Lhe work done by oLher Lhreads are Lruly
background Lasks, lowerlng Lhelr prlorlLy can make Lhe foreground Lasks more responslve.
oor responslveness can also be caused by poor lock managemenL. lf a Lhread holds a lock for a long Llme (perhaps
whlle lLeraLlng a large collecLlon and performlng subsLanLlal work for each elemenL), oLher Lhreads LhaL need Lo access
LhaL collecLlon may have Lo walL a very long Llme.
@_ALALA 2/#*?%'<
Llvelock ls a form of llveness fallure ln whlch a Lhread, whlle noL blocked, sLlll cannoL make progress because lL keeps
reLrylng an operaLlon LhaL wlll always fall. Llvelock ofLen occurs ln LransacLlonal messaglng appllcaLlons, where Lhe
messaglng lnfrasLrucLure rolls back a LransacLlon lf a message cannoL be processed successfully, and puLs lL back aL Lhe
head of Lhe queue. lf a bug ln Lhe message handler for a parLlcular Lype of message causes lL Lo fall, every Llme Lhe
message ls dequeued and passed Lo Lhe buggy handler, Lhe LransacLlon ls rolled back. Slnce Lhe message ls now back aL
Lhe head of Lhe queue, Lhe handler ls called over and over wlLh Lhe same resulL. (1hls ls someLlmes called Lhe polson
message problem.) 1he message handllng Lhread ls noL blocked, buL lL wlll never make progress elLher. 1hls form of
llvelock ofLen comes from overeager errorrecovery code LhaL mlsLakenly LreaLs an unrecoverable error as a recoverable
one.
Llvelock can also occur when mulLlple cooperaLlng Lhreads change Lhelr sLaLe ln response Lo Lhe oLhers ln such a way
LhaL no Lhread can ever make progress. 1hls ls slmllar Lo whaL happens when Lwo overly pollLe people are walklng ln
opposlLe dlrecLlons ln a hallway: each sLeps ouL of Lhe oLher's way, and now Lhey are agaln ln each oLher's way. So Lhey
boLh sLep aslde agaln, and agaln, and agaln. . .
1he soluLlon for Lhls varleLy of llvelock ls Lo lnLroduce some randomness lnLo Lhe reLry mechanlsm. lor example, when
Lwo sLaLlons ln an LLherneL neLwork Lry Lo send a packeL on Lhe shared carrler aL Lhe same Llme, Lhe packeLs colllde. 1he
sLaLlons deLecL Lhe colllslon, and each Lrles Lo send Lhelr packeL agaln laLer. lf Lhey each reLry exacLly one second laLer,
Lhey colllde over and over, and nelLher packeL ever goes ouL, even lf Lhere ls plenLy of avallable bandwldLh. 1o avold
Lhls, we make each walL an amounL of Llme LhaL lncludes a random componenL. (1he LLherneL proLocol also lncludes
exponenLlal backoff afLer repeaLed colllslons, reduclng boLh congesLlon and Lhe rlsk of repeaLed fallure wlLh mulLlple
collldlng sLaLlons.) 8eLrylng wlLh random walLs and backoffs can be equally effecLlve for avoldlng llvelock ln concurrenL
appllcaLlons.
C(55")+
Llveness fallures are a serlous problem because Lhere ls no way Lo recover from Lhem shorL of aborLlng Lhe appllcaLlon.
1he mosL common form of llveness fallure ls lockorderlng deadlock. Avoldlng lock orderlng deadlock sLarLs aL deslgn
Llme: ensure LhaL when Lhreads acqulre mulLlple locks, Lhey do so ln a conslsLenL order. 1he besL way Lo do Lhls ls by
uslng open calls LhroughouL your program. 1hls greaLly reduces Lhe number of places where mulLlple locks are held aL
once, and makes lL more obvlous where Lhose places are.


137 68arL lll: Llveness, erformance, and 1esLlng 238ChapLer 11. erformance and ScalablllLy
$:">.*) @@A -*)6%)5"&'* "&0 C'"?"Y/?/.+
Cne of Lhe prlmary reasons Lo use Lhreads ls Lo lmprove performance.
[1]
uslng Lhreads can lmprove resource uLlllzaLlon
by leLLlng appllcaLlons more easlly explolL avallable processlng capaclLy, and can lmprove responslveness by leLLlng
appllcaLlons begln processlng new Lasks lmmedlaLely whlle exlsLlng Lasks are sLlll runnlng.
[1] Some mlghL argue Lhls ls Lhe only reason we puL up wlLh Lhe complexlLy Lhreads lnLroduce.
1hls chapLer explores Lechnlques for analyzlng, monlLorlng, and lmprovlng Lhe performance of concurrenL programs.
unforLunaLely, many of Lhe Lechnlques for lmprovlng performance also lncrease complexlLy, Lhus lncreaslng Lhe
llkellhood of safeLy and llveness fallures. Worse, some Lechnlques lnLended Lo lmprove performance are acLually
counLerproducLlve or Lrade one sorL of performance problem for anoLher. Whlle beLLer performance ls ofLen deslrable
and lmprovlng performance can be very saLlsfylng safeLy always comes flrsL. llrsL make your program rlghL, Lhen make
lL fasL and Lhen only lf your performance requlremenLs and measuremenLs Lell you lL needs Lo be fasLer. ln deslgnlng a
concurrenL appllcaLlon, squeezlng ouL Lhe lasL blL of performance ls ofLen Lhe leasL of your concerns.
@@A@A J:/&</&4 "Y%(. -*)6%)5"&'*
lmprovlng performance means dolng more work wlLh fewer resources. 1he meanlng of "resources" can vary, for a glven
acLlvlLy, some speclflc resource ls usually ln shorLesL supply, wheLher lL ls Cu cycles, memory, neLwork bandwldLh, l/C
bandwldLh, daLabase requesLs, dlsk space, or any number of oLher resources. When Lhe performance of an acLlvlLy ls
llmlLed by avallablllLy of a parLlcular resource, we say lL ls bound by LhaL resource: Cubound, daLabasebound, eLc.
Whlle Lhe goal may be Lo lmprove performance overall, uslng mulLlple Lhreads always lnLroduces some performance
cosLs compared Lo Lhe slngleLhreaded approach. 1hese lnclude Lhe overhead assoclaLed wlLh coordlnaLlng beLween
Lhreads (locklng, slgnallng, and memory synchronlzaLlon), lncreased conLexL swlLchlng, Lhread creaLlon and Leardown,
and schedullng overhead. When Lhreadlng ls employed effecLlvely, Lhese cosLs are more Lhan made up for by greaLer
LhroughpuL, responslveness, or capaclLy. Cn Lhe oLher hand, a poorly deslgned concurrenL appllcaLlon can perform even
worse Lhan a comparable sequenLlal one.
[2]

[2] A colleague provlded Lhls amuslng anecdoLe: he had been lnvolved ln Lhe LesLlng of an expenslve and complex appllcaLlon LhaL managed lLs
work vla a Lunable Lhread pool. AfLer Lhe sysLem was compleLe, LesLlng showed LhaL Lhe opLlmal number of Lhreads for Lhe pool was . . . 1. 1hls
should have been obvlous from Lhe ouLseL, Lhe LargeL sysLem was a slngleCu sysLem and Lhe appllcaLlon was almosL enLlrely Cubound.
ln uslng concurrency Lo achleve beLLer performance, we are Lrylng Lo do Lwo Lhlngs: uLlllze Lhe processlng resources we
have more effecLlvely, and enable our program Lo explolL addlLlonal processlng resources lf Lhey become avallable.
lrom a performance monlLorlng perspecLlve, Lhls means we are looklng Lo keep Lhe Cus as busy as posslble. (Cf
course, Lhls doesn'L mean burnlng cycles wlLh useless compuLaLlon, we wanL Lo keep Lhe Cus busy wlLh useful work.) lf
Lhe program ls compuLebound, Lhen we may be able Lo lncrease lLs capaclLy by addlng more processors, lf lL can'L even
keep Lhe processors we have busy, addlng more won'L help. 1hreadlng offers a means Lo keep Lhe Cu(s) "hoLLer" by
decomposlng Lhe appllcaLlon so Lhere ls always work Lo be done by an avallable processor.
@@A@A@A -*)6%)5"&'* H*)3(3 C'"?"Y/?/.+
AppllcaLlon performance can be measured ln a number of ways, such as servlce Llme, laLency, LhroughpuL, efflclency,
scalablllLy, or capaclLy. Some of Lhese (servlce Llme, laLency) aremeasures of "how fasL" a glven unlL of work can be
processed or acknowledged, oLhers (capaclLy, LhroughpuL) are measures of "how much" work can be performed wlLh a
glven quanLlLy of compuLlng resources.
ScalablllLy descrlbes Lhe ablllLy Lo lmprove LhroughpuL or capaclLy when addlLlonal compuLlng resources (such as
addlLlonal Cus, memory, sLorage, or l/C bandwldLh) are added.
ueslgnlng and Lunlng concurrenL appllcaLlons for scalablllLy can be very dlfferenL from LradlLlonal performance
opLlmlzaLlon. When Lunlng for performance, Lhe goal ls usually Lo do Lhe same work wlLh less efforL, such as by reuslng
prevlously compuLed resulLs Lhrough cachlng or replaclng an C(n
2
) algorlLhm wlLh an C(n log n) one. When Lunlng for
scalablllLy, you are lnsLead Lrylng Lo flnd ways Lo parallellze Lhe problem so you can Lake advanLage of addlLlonal
processlng resources Lo do more work wlLh more resources.
1hese Lwo aspecLs of performance how fasL and how much are compleLely separaLe, and someLlmes even aL odds
wlLh each oLher. ln order Lo achleve hlgher scalablllLy or beLLer hardware uLlllzaLlon, we ofLen end up lncreaslng Lhe
amounL of work done Lo process each lndlvldual Lask, such as when we dlvlde Lasks lnLo mulLlple "plpellned" subLasks.

138 !ava Concurrency ln racLlce
lronlcally, many of Lhe Lrlcks LhaL lmprove performance ln slngleLhreaded programs are bad for scalablllLy (see SecLlon
11.4.4 for an example).
1he famlllar LhreeLler appllcaLlon model ln whlch presenLaLlon, buslness loglc, and perslsLence are separaLed and may
be handled by dlfferenL sysLems lllusLraLes how lmprovemenLs ln scalablllLy ofLen come aL Lhe expense of
performance. A monollLhlc appllcaLlon where presenLaLlon, buslness loglc, and perslsLence are lnLerLwlned would
almosL cerLalnly provlde beLLer performance for Lhe flrsL unlL of work Lhan would a wellfacLored mulLlLler
lmplemenLaLlon dlsLrlbuLed over mulLlple sysLems. Pow could lL noL? 1he monollLhlc appllcaLlon would noL have Lhe
neLwork laLency lnherenL ln handlng off Lasks beLween Llers, nor would lL have Lo pay Lhe cosLs lnherenL ln separaLlng a
compuLaLlonal process lnLo dlsLlncL absLracLed layers (such as queulng overhead, coordlnaLlon overhead, and daLa
copylng).
Powever, when Lhe monollLhlc sysLem reaches lLs processlng capaclLy, we could have a serlous problem: lL may be
prohlblLlvely dlfflculL Lo slgnlflcanLly lncrease capaclLy. So we ofLen accepL Lhe performance cosLs of longer servlce Llme
or greaLer compuLlng resources used per unlL of work so LhaL our appllcaLlon can scale Lo handle greaLer load by addlng
more resources.
Cf Lhe varlous aspecLs of performance, Lhe "how much" aspecLs scalablllLy, LhroughpuL, and capaclLy are usually of
greaLer concern for server appllcaLlons Lhan Lhe "how fasL" aspecLs. (lor lnLeracLlve appllcaLlons, laLency Lends Lo be
more lmporLanL, so LhaL users need noL walL for lndlcaLlons of progress and wonder whaL ls golng on.) 1hls chapLer
focuses prlmarlly on scalablllLy raLher Lhan raw slngleLhreaded performance.
@@A@ADA =#"?("./&4 -*)6%)5"&'* J)"0*%663
nearly all englneerlng declslons lnvolve some form of Lradeoff. uslng Lhlcker sLeel ln a brldge span may lncrease lLs
capaclLy and safeLy, buL also lLs consLrucLlon cosL. Whlle sofLware englneerlng declslons don'L usually lnvolve Lradeoffs
beLween money and rlsk Lo human llfe, we ofLen have less lnformaLlon wlLh whlch Lo make Lhe rlghL Lradeoffs. lor
example, Lhe "qulcksorL" algorlLhm ls hlghly efflclenL for large daLa seLs, buL Lhe less sophlsLlcaLed "bubble sorL" ls
acLually more efflclenL for small daLa seLs. lf you are asked Lo lmplemenL an efflclenL sorL rouLlne, you need Lo know
someLhlng abouL Lhe slzes of daLa seLs lL wlll have Lo process, along wlLh meLrlcs LhaL Lell you wheLher you are Lrylng Lo
opLlmlze averagecase Llme, worsLcase Llme, or predlcLablllLy. unforLunaLely, LhaL lnformaLlon ls ofLen noL parL of Lhe
requlremenLs glven Lo Lhe auLhor of a llbrary sorL rouLlne. 1hls ls one of Lhe reasons why mosL opLlmlzaLlons are
premaLure: Lhey are ofLen underLaken before a clear seL of requlremenLs ls avallable.
Avold premaLure opLlmlzaLlon. llrsL make lL rlghL, Lhen make lL fasL lf lL ls noL already fasL enough.
When maklng englneerlng declslons, someLlmes you are Lradlng one form of cosL for anoLher (servlce Llme versus
memory consumpLlon), someLlmes you are Lradlng cosL for safeLy. SafeLy doesn'L necessarlly mean rlsk Lo human llves,
as lL dld ln Lhe brldge example. Many performance opLlmlzaLlons come aL Lhe cosL of readablllLy or malnLalnablllLy Lhe
more "clever" or nonobvlous code ls, Lhe harder lL ls Lo undersLand and malnLaln. SomeLlmes opLlmlzaLlons enLall
compromlslng good ob[ecLorlenLed deslgn prlnclples, such as breaklng encapsulaLlon, someLlmes Lhey lnvolve greaLer
rlsk of error, because fasLer algorlLhms are usually more compllcaLed. (lf you can'L spoL Lhe cosLs or rlsks, you probably
haven'L LhoughL lL Lhrough carefully enough Lo proceed.)
MosL performance declslons lnvolve mulLlple varlables and are hlghly slLuaLlonal. 8efore decldlng LhaL one approach ls
"fasLer" Lhan anoLher, ask yourself some quesLlons:
WhaL do you mean by "fasLer"?
under whaL condlLlons wlll Lhls approach acLually be fasLer? under llghL or heavy load? WlLh large or small daLa
seLs? Can you supporL your answer wlLh measuremenLs?
Pow ofLen are Lhese condlLlons llkely Lo arlse ln your slLuaLlon? Can you supporL your answer wlLh
measuremenLs?
ls Lhls code llkely Lo be used ln oLher slLuaLlons where Lhe condlLlons may be dlfferenL?
WhaL hldden cosLs, such as lncreased developmenL or malnLenance rlsk, are you Lradlng for Lhls lmproved
performance? ls Lhls a good Lradeoff?
1hese conslderaLlons apply Lo any performancerelaLed englneerlng declslon, buL Lhls ls a book abouL concurrency. Why
are we recommendlng such a conservaLlve approach Lo opLlmlzaLlon? 1he quesL for performance ls probably Lhe slngle
greaLesL source of concurrency bugs. 1he bellef LhaL synchronlzaLlon was "Loo slow" led Lo many cleverlooklng buL
dangerous ldloms for reduclng synchronlzaLlon (such as doublechecked locklng, dlscussed ln SecLlon 16.2.4), and ls
ofLen clLed as an excuse for noL followlng Lhe rules regardlng synchronlzaLlon. 8ecause concurrency bugs are among Lhe

139 68arL lll: Llveness, erformance, and 1esLlng 238ChapLer 11. erformance and ScalablllLy
mosL dlfflculL Lo Lrack down and ellmlnaLe, however, anyLhlng LhaL rlsks lnLroduclng Lhem musL be underLaken very
carefully.
Worse, when you Lrade safeLy for performance, you may geL nelLher. Lspeclally when lL comes Lo concurrency, Lhe
lnLulLlon of many developers abouL where a performance problem lles or whlch approach wlll be fasLer or more scalable
ls ofLen lncorrecL. lL ls Lherefore lmperaLlve LhaL any performance Lunlng exerclse be accompanled by concreLe
performance requlremenLs (so you know boLh when Lo Lune and when Lo sLop Lunlng) and wlLh a measuremenL
program ln place uslng a reallsLlc conflguraLlon and load proflle. Measure agaln afLer Lunlng Lo verlfy LhaL you've
achleved Lhe deslred lmprovemenLs. 1he safeLy and malnLenance rlsks assoclaLed wlLh many opLlmlzaLlons are bad
enough you don'L wanL Lo pay Lhese cosLs lf you don'L need Lo and you deflnlLely don'L wanL Lo pay Lhem lf you don'L
even geL Lhe deslred beneflL.
Measure, don'L guess.
1here are sophlsLlcaLed proflllng Lools on Lhe markeL for measurlng performance and Lracklng down performance
boLLlenecks, buL you don'L have Lo spend a loL of money Lo flgure ouL whaL your program ls dolng. lor example, Lhe free
G&9%M$9 appllcaLlon can glve you a good plcLure of how busy Lhe Cus are, and slnce your goal ls usually Lo keep Lhe
Cus busy, Lhls ls a very good way Lo evaluaLe wheLher you need performance Lunlng or how effecLlve your Lunlng has
been.
@@ADA F50":?b3 2"8
Some problems can be solved fasLer wlLh more resources Lhe more workers avallable for harvesLlng crops, Lhe fasLer
Lhe harvesL can be compleLed. CLher Lasks are fundamenLally serlal no number of addlLlonal workers wlll make Lhe
crops grow any fasLer. lf one of our prlmary reasons for uslng Lhreads ls Lo harness Lhe power of mulLlple processors, we
musL also ensure LhaL Lhe problem ls amenable Lo parallel decomposlLlon and LhaL our program effecLlvely explolLs Lhls
poLenLlal for parallellzaLlon.
MosL concurrenL programs have a loL ln common wlLh farmlng, conslsLlng of a mlx of parallellzable and serlal porLlons.
Amdahl's law descrlbes how much a program can LheoreLlcally be sped up by addlLlonal compuLlng resources, based on
Lhe proporLlon of parallellzable and serlal componenLs. lf l ls Lhe fracLlon of Lhe calculaLlon LhaL musL be execuLed
serlally, Lhen Amdahl's law says LhaL on a machlne wlLh n processors, we can achleve a speedup of aL mosL:
SpccJup
1
F +
1 -F
N

As n approaches lnflnlLy, Lhe maxlmum speedup converges Lo 1/l, meanlng LhaL a program ln whlch flfLy percenL of Lhe
processlng musL be execuLed serlally can be sped up only by a facLor of Lwo, regardless of how many processors are
avallable, and a program ln whlch Len percenL musL be execuLed serlally can be sped up by aL mosL a facLor of Len.
Amdahl's law also quanLlfles Lhe efflclency cosL of serlallzaLlon. WlLh Len processors, a program wlLh 10serlallzaLlon
can achleve aL mosL a speedup of 3.3 (aL 33 uLlllzaLlon), and wlLh 100 processors lL can achleve aL mosL a speedup of
9.2 (aL 9 uLlllzaLlon). lL Lakes a loL of lnefflclenLly uLlllzed Cus Lo never geL Lo LhaL facLor of Len.
llgure 11.1 shows Lhe maxlmum posslble processor uLlllzaLlon for varylng degrees of serlal execuLlon and numbers of
processors. (uLlllzaLlon ls deflned as Lhe speedup dlvlded by Lhe number of processors.) lL ls clear LhaL as processor
counLs lncrease, even a small percenLage of serlallzed execuLlon llmlLs how much LhroughpuL can be lncreased wlLh
addlLlonal compuLlng resources.

140 !ava Concurrency ln racLlce
S/4()* @@A@A K"1/5(5 9./?/O"./%& 9&0*) F50":?b3 2"8 6%) H")/%(3 C*)/"?/O"./%& -*)'*&."4*3A


ChapLer 6 explored ldenLlfylng loglcal boundarles for decomposlng appllcaLlons lnLo Lasks. 8uL ln order Lo predlcL whaL
klnd of speedup ls posslble from runnlng your appllcaLlon on a mulLlprocessor sysLem, you also need Lo ldenLlfy Lhe
sources of serlallzaLlon ln your Lasks.
lmaglne an appllcaLlon where n Lhreads execuLe ;2P29C ln LlsLlng 11.1, feLchlng Lasks from a shared work queue and
processlng Lhem, assume LhaL Lasks do noL depend on Lhe resulLs or slde effecLs of oLher Lasks. lgnorlng for a momenL
how Lhe Lasks geL onLo Lhe queue, how well wlll Lhls appllcaLlon scale as we add processors? AL flrsL glance, lL may
appear LhaL Lhe appllcaLlon ls compleLely parallellzable: Lasks do noL walL for each oLher, and Lhe more processors
avallable, Lhe more Lasks can be processed concurrenLly. Powever, Lhere ls a serlal componenL as well feLchlng Lhe Lask
from Lhe work queue. 1he work queue ls shared by all Lhe worker Lhreads, and lL wlll requlre some amounL of
synchronlzaLlon Lo malnLaln lLs lnLegrlLy ln Lhe face of concurrenL access. lf locklng ls used Lo guard Lhe sLaLe of Lhe
queue, Lhen whlle one Lhread ls dequelng a Lask, oLher Lhreads LhaL need Lo dequeue Lhelr nexL Lask musL walL and Lhls
ls where Lask processlng ls serlallzed.
1he processlng Llme of a slngle Lask lncludes noL only Lhe Llme Lo execuLe Lhe Lask N)""$M0&, buL also Lhe Llme Lo
dequeue Lhe Lask from Lhe shared work queue. lf Lhe work queue ls a 54"C&;K02*C4"6O)&)&, Lhe dequeue operaLlon
may block less Lhan wlLh a synchronlzed 54"C&;54#. because 54"C&;K02*C4"6O)&)& uses a more scalable algorlLhm,
buL accesslng any shared daLa sLrucLure fundamenLally lnLroduces an elemenL of serlallzaLlon lnLo a program.
1hls example also lgnores anoLher common source of serlallzaLlon: resulL handllng. All useful compuLaLlons produce
some sorL of resulL or slde effecLlf noL, Lhey can be ellmlnaLed as dead code. Slnce N)""$M0& provldes for no expllclL
resulL handllng, Lhese Lasks musL have some sorL of slde effecL, say wrlLlng Lhelr resulLs Lo a log flle or puLLlng Lhem ln a
daLa sLrucLure. Log flles and resulL conLalners are usually shared by mulLlple worker Lhreads and Lherefore are also a
source of serlallzaLlon. lf lnsLead each Lhread malnLalns lLs own daLa sLrucLure for resulLs LhaL are merged afLer all Lhe
Lasks are performed, Lhen Lhe flnal merge ls a source of serlallzaLlon.

141 68arL lll: Llveness, erformance, and 1esLlng 238ChapLer 11. erformance and ScalablllLy
2/3./&4 @@A@A C*)/"?/O*0 F''*33 .% " J"3< a(*(*A
G)M04* *0$## P29C&9=89&$; &-.&";# =89&$; W
G94/$.& %4"$0 K02*C4"6O)&)&RN)""$M0&T ()&)&[

G)M04* P29C&9=89&$;UK02*C4"6O)&)&RN)""$M0&T ()&)&V W
.84#+()&)& ] ()&)&[
\

G)M04* /24; 9)"UV W
?840& U.9)&V W
.97 W
N)""$M0& .$#C ] ()&)&+.$C&UV[
.$#C+9)"UV[
\ *$.*8 UI".&99)G.&;L-*&G.42" &V W
M9&$C[ Xc 1002? .89&$; .2 &-4. cX
\
\
\
\

All concurrenL appllcaLlons have some sources of serlallzaLlon, lf you Lhlnk yours does noL, look agaln.
@@ADA@A =1"5>?*T C*)/"?/O"./%& 7/00*& /& S)"5*8%)<3
1o see how serlallzaLlon can be hldden ln Lhe sLrucLure of an appllcaLlon, we can compare LhroughpuL as Lhreads are
added and lnfer dlfferences ln serlallzaLlon based on observed dlfferences ln scalablllLy. llgure 11.2 shows a slmple
appllcaLlon ln whlch mulLlple Lhreads repeaLedly remove an elemenL from a shared O)&)& and process lL, slmllar Lo
LlsLlng 11.1. 1he processlng sLep lnvolves only Lhreadlocal compuLaLlon. lf a Lhread flnds Lhe queue ls empLy, lL puLs a
baLch of new elemenLs on Lhe queue so LhaL oLher Lhreads have someLhlng Lo process on Lhelr nexL lLeraLlon. Accesslng
Lhe shared queue clearly enLalls some degree of serlallzaLlon, buL Lhe processlng sLep ls enLlrely parallellzable slnce lL
lnvolves no shared daLa.
S/4()* @@ADA $%5>")/&4 a(*(* ,5>?*5*&."./%&3A
[vlew full slze lmage]


1he curves ln llgure 11.2 compare LhroughpuL for Lwo Lhreadsafe O)&)& lmplemenLaLlons: a 54"C&;54#. wrapped wlLh
#7"*892"4:&;54#., and a D2"*)99&".54"C&;O)&)&. 1he LesLs were run on an 8way Sparc v880 sysLem runnlng Solarls.
Whlle each run represenLs Lhe same amounL of "work", we can see LhaL merely changlng queue lmplemenLaLlons can
have a blg lmpacL on scalablllLy.
1he LhroughpuL of D2"*)99&".54"C&;O)&)& conLlnues Lo lmprove unLll lL hlLs Lhe number of processors and Lhen
remalns mosLly consLanL. Cn Lhe oLher hand, Lhe LhroughpuL of Lhe synchronlzed 54"C&;54#. shows some
lmprovemenL up Lo Lhree Lhreads, buL Lhen falls off as synchronlzaLlon overhead lncreases. 8y Lhe Llme lL geLs Lo four or
flve Lhreads, conLenLlon ls so heavy LhaL every access Lo Lhe queue lock ls conLended and LhroughpuL ls domlnaLed by
conLexL swlLchlng.
1he dlfference ln LhroughpuL comes from dlfferlng degrees of serlallzaLlon beLween Lhe Lwo queue lmplemenLaLlons.
1he synchronlzed 54"C&;54#. guards Lhe enLlre queue sLaLe wlLh a slngle lock LhaL ls held for Lhe duraLlon of Lhe 2%%&9
or 9&32/& call, D2"*)99&".54"C&;O)&)& uses a sophlsLlcaLed nonblocklng queue algorlLhm (see SecLlon 13.4.2) LhaL

142 !ava Concurrency ln racLlce
uses aLomlc references Lo updaLe lndlvldual llnk polnLers. ln one, Lhe enLlre lnserLlon or removal ls serlallzed, ln Lhe
oLher, only updaLes Lo lndlvldual polnLers are serlallzed.
@@ADADA F>>?+/&4 F50":?b3 2"8 a("?/."./#*?+
Amdahl's law quanLlfles Lhe posslble speedup when more compuLlng resources are avallable, lf we can accuraLely
esLlmaLe Lhe fracLlon of execuLlon LhaL ls serlallzed. AlLhough measurlng serlallzaLlon dlrecLly can be dlfflculL, Amdahl's
law can sLlll be useful wlLhouL such measuremenL.
Slnce our menLal models are lnfluenced by our envlronmenL, many of us are used Lo Lhlnklng LhaL a mulLlprocessor
sysLem has Lwo or four processors, or maybe (lf we've goL a blg budgeL) as many as a few dozen, because Lhls ls Lhe
Lechnology LhaL has been wldely avallable ln recenL years. 8uL as mulLlcore Cus become malnsLream, sysLems wlll have
hundreds or even Lhousands of processors.
[3]
AlgorlLhms LhaL seem scalable on a fourway sysLem may have hldden
scalablllLy boLLlenecks LhaL have [usL noL yeL been encounLered.
[3] MarkeL updaLe: aL Lhls wrlLlng, Sun ls shlpplng lowend server sysLems based on Lhe 8core nlagara processor, and Azul ls shlpplng hlghend
server sysLems (96, 192, and 384way) based on Lhe 24core vega processor.
When evaluaLlng an algorlLhm, Lhlnklng "ln Lhe llmlL" abouL whaL would happen wlLh hundreds or Lhousands of
processors can offer some lnslghL lnLo where scallng llmlLs mlghL appear. lor example, SecLlons 11.4.2 and 11.4.3
dlscuss Lwo Lechnlques for reduclng lock granularlLy: lock spllLLlng (spllLLlng one lock lnLo Lwo) and lock sLrlplng (spllLLlng
one lock lnLo many). Looklng aL Lhem Lhrough Lhe lens of Amdahl's law, we see LhaL spllLLlng a lock ln Lwo does noL geL
us very far Lowards explolLlng many processors, buL lock sLrlplng seems much more promlslng because Lhe slze of Lhe
sLrlpe seL can be lncreased as processor counL lncreases. (Cf course, performance opLlmlzaLlons should always be
consldered ln llghL of acLual performance requlremenLs, ln some cases, spllLLlng a lock ln Lwo may be enough Lo meeL
Lhe requlremenLs.)
@@ALA $%3.3 ,&.)%0('*0 Y+ J:)*"03
SlngleLhreaded programs lncur nelLher schedullng nor synchronlzaLlon overhead, and need noL use locks Lo preserve
Lhe conslsLency of daLa sLrucLures. Schedullng and lnLerLhread coordlnaLlon have performance cosLs, for Lhreads Lo
offer a performance lmprovemenL, Lhe performance beneflLs of parallellzaLlon musL ouLwelgh Lhe cosLs lnLroduced by
concurrency.
@@ALA@A $%&.*1. C8/.':/&4
lf Lhe maln Lhread ls Lhe only schedulable Lhread, lL wlll almosL never be scheduled ouL. Cn Lhe oLher hand, lf Lhere are
more runnable Lhreads Lhan Cus, evenLually Lhe CS wlll preempL one Lhread so LhaL anoLher can use Lhe Cu. 1hls
causes a conLexL swlLch, whlch requlres savlng Lhe execuLlon conLexL of Lhe currenLly runnlng Lhread and resLorlng Lhe
execuLlon conLexL of Lhe newly scheduled Lhread.
ConLexL swlLches are noL free, Lhread schedullng requlres manlpulaLlng shared daLa sLrucLures ln Lhe CS and !vM. 1he
CS and !vMuse Lhe same Cus your program does, more Cu Llme spenL ln !vM and CS code means less ls avallable for
your program. 8uL CS and !vM acLlvlLy ls noL Lhe only cosL of conLexL swlLches. When a new Lhread ls swlLched ln, Lhe
daLa lL needs ls unllkely Lo be ln Lhe local processor cache, so a conLexL swlLch causes a flurry of cache mlsses, and Lhus
Lhreads run a llLLle more slowly when Lhey are flrsL scheduled. 1hls ls one of Lhe reasons LhaL schedulers glve each
runnable Lhread a cerLaln mlnlmum Llme quanLum even when many oLher Lhreads are walLlng: lL amorLlzes Lhe cosL of
Lhe conLexL swlLch and lLs consequences over more unlnLerrupLed execuLlon Llme, lmprovlng overall LhroughpuL (aL
some cosL Lo responslveness).
2/3./&4 @@ADA C+&':)%&/O"./%& .:". :"3 P% =66*'.A !"#$% !" %&'()

#7"*892"4:&; U"&? JM`&*.UVV W
XX ;2 #23&.84"6
\
When a Lhread blocks because lL ls walLlng for a conLended lock, Lhe !vM usually suspends Lhe Lhread and allows lL Lo be
swlLched ouL. lf Lhreads block frequenLly, Lhey wlll be unable Lo use Lhelr full schedullng quanLum. A program LhaL does
more blocklng (blocklng l/C, walLlng for conLended locks, or walLlng on condlLlon varlables) lncurs more conLexL
swlLches Lhan one LhaL ls Cubound, lncreaslng schedullng overhead and reduclng LhroughpuL. (nonblocklng
algorlLhms can also help reduce conLexL swlLches, see ChapLer 13.)

143 68arL lll: Llveness, erformance, and 1esLlng 238ChapLer 11. erformance and ScalablllLy
1he acLual cosL of conLexL swlLchlng varles across plaLforms, buL a good rule of Lhumb ls LhaL a conLexL swlLch cosLs Lhe
equlvalenL of 3,000 Lo 10,000 clock cycles, or several mlcroseconds on mosL currenL processors.
1he /3#.$. command on unlx sysLems and Lhe G&9%32" Lool on Wlndows sysLems reporL Lhe number of conLexL
swlLches and Lhe percenLage of Llme spenL ln Lhe kernel. Plgh kernel usage (over 10) ofLen lndlcaLes heavy schedullng
acLlvlLy, whlch may be caused by blocklng due Lo l/C or lock conLenLlon.
@@ALADA K*5%)+ C+&':)%&/O"./%&
1he performance cosL of synchronlzaLlon comes from several sources. 1he vlslblllLy guaranLees provlded by
#7"*892"4:&; and /20$.40& may enLall uslng speclal lnsLrucLlons called memory barrlers LhaL can flush or lnvalldaLe
caches, flush hardware wrlLe buffers, and sLall execuLlon plpellnes. Memory barrlers may also have lndlrecL
performance consequences because Lhey lnhlblL oLher compller opLlmlzaLlons, mosL operaLlons cannoL be reordered
wlLh memory barrlers.
When assesslng Lhe performance lmpacL of synchronlzaLlon, lL ls lmporLanL Lo dlsLlngulsh beLween conLended and
unconLended synchronlzaLlon. 1he #7"*892"4:&; mechanlsm ls opLlmlzed for Lhe unconLended case (/20$.40& ls
always unconLended), and aL Lhls wrlLlng, Lhe performance cosL of a "fasLpaLh" unconLended synchronlzaLlon ranges
from 20 Lo 230 clock cycles for mosL sysLems. Whlle Lhls ls cerLalnly noL zero, Lhe effecL of needed, unconLended
synchronlzaLlon ls rarely slgnlflcanL ln overall appllcaLlon performance, and Lhe alLernaLlve lnvolves compromlslng safeLy
and poLenLlally slgnlng yourself (or your successor) up for some very palnful bug hunLlng laLer.
Modern !vMs can reduce Lhe cosL of lncldenLal synchronlzaLlon by opLlmlzlng away locklng LhaL can be proven never Lo
conLend. lf a lock ob[ecL ls accesslble only Lo Lhe currenL Lhread, Lhe !vM ls permlLLed Lo opLlmlze away a lock
acqulslLlon because Lhere ls no way anoLher Lhread could synchronlze on Lhe same lock. lor example, Lhe lock
acqulslLlon ln LlsLlng 11.2 can always be ellmlnaLed by Lhe !vM.
More sophlsLlcaLed !vMs can use escape analysls Lo ldenLlfy when a local ob[ecL reference ls never publlshed Lo Lhe
heap and ls Lherefore Lhreadlocal. ln 6&.'.226&,$3&# ln LlsLlng 11.3, Lhe only reference Lo Lhe 54#. ls Lhe local varlable
#.226&#, and sLackconflned varlables are auLomaLlcally Lhreadlocal. A nalve execuLlon of 6&.'.226&,$3&# would
acqulre and release Lhe lock on Lhe B&*.29 four Llmes, once for each call Lo $;; or .2'.94"6. Powever, a smarL runLlme
compller can lnllne Lhese calls and Lhen see LhaL #.226&# and lLs lnLernal sLaLe never escape, and Lherefore LhaL all four
lock acqulslLlons can be ellmlnaLed.
[4]

[4] 1hls compller opLlmlzaLlon, called lock ellslon, ls performed by Lhe l8M !vM and ls expecLed ln PoLSpoL as of !ava 7.
2/3./&4 @@ALA $"&0/0".* 6%) 2%'< =?/3/%&A
G)M04* '.94"6 6&.'.226&,$3&#UV W
54#.R'.94"6T #.226&# ] "&? B&*.29R'.94"6TUV[
#.226&#+$;;UdF2&dV[
#.226&#+$;;Ud5$997dV[
#.226&#+$;;UdD)907dV[
9&.)9" #.226&#+.2'.94"6UV[
\
Lven wlLhouL escape analysls, compllers can also perform lock coarsenlng, Lhe merglng of ad[acenL #7"*892"4:&; blocks
uslng Lhe same lock. lor 6&.'.226&a,$3&#, a !vM LhaL performs lock coarsenlng mlghL comblne Lhe Lhree calls Lo $;;
and Lhe call Lo .2'.94"6 lnLo a slngle lock acqulslLlon and release, uslng heurlsLlcs on Lhe relaLlve cosL of
synchronlzaLlon versus Lhe lnsLrucLlons lnslde Lhe #7"*892"4:&; block.
[3]
noL only does Lhls reduce Lhe synchronlzaLlon
overhead, buL lL also glves Lhe opLlmlzer a much larger block Lo work wlLh, llkely enabllng oLher opLlmlzaLlons.
[3] A smarL dynamlc compller can flgure ouL LhaL Lhls meLhod always reLurns Lhe same sLrlng, and afLer Lhe flrsL execuLlon recomplle
6&.'.226&,$3&# Lo slmply reLurn Lhe value reLurned by Lhe flrsL execuLlon.
uon'L worry excesslvely abouL Lhe cosL of unconLended synchronlzaLlon. 1he baslc mechanlsm ls already qulLe fasL, and
!vMs can perform addlLlonal opLlmlzaLlons LhaL furLher reduce or ellmlnaLe Lhe cosL. lnsLead, focus opLlmlzaLlon efforLs
on areas where lock conLenLlon acLually occurs.
SynchronlzaLlon by one Lhread can also affecL Lhe performance of oLher Lhreads. SynchronlzaLlon creaLes Lrafflc on Lhe
shared memory bus, Lhls bus has a llmlLed bandwldLh and ls shared across all processors. lf Lhreads musL compeLe for
synchronlzaLlon bandwldLh, all Lhreads uslng synchronlzaLlon wlll suffer.
[6]

[6] 1hls aspecL ls someLlmes used Lo argue agalnsL Lhe use of nonblocklng algorlLhms wlLhouL some sorL of backoff, because under heavy
conLenLlon, nonblocklng algorlLhms generaLe more synchronlzaLlon Lrafflc Lhan lockbased ones. See ChapLer 13.

144 !ava Concurrency ln racLlce
@@ALALA ;?%'</&4
unconLended synchronlzaLlon can be handled enLlrely wlLhln Lhe !vM (8acon eL al., 1998), conLended synchronlzaLlon
may requlre CS acLlvlLy, whlch adds Lo Lhe cosL. When locklng ls conLended, Lhe loslng Lhread(s) musL block. 1he !vM
can lmplemenL blocklng elLher vla splnwalLlng (repeaLedly Lrylng Lo acqulre Lhe lock unLll lL succeeds) or by suspendlng
Lhe blocked Lhread Lhrough Lhe operaLlng sysLem. Whlch ls more efflclenL depends on Lhe relaLlonshlp beLween conLexL
swlLch overhead and Lhe Llme unLll Lhe lock becomes avallable, splnwalLlng ls preferable for shorL walLs and suspenslon
ls preferable for long walLs. Some !vMs choose beLween Lhe Lwo adapLlvely based on proflllng daLa of pasL walL Llmes,
buL mosL [usL suspend Lhreads walLlng for a lock.
Suspendlng a Lhread because lL could noL geL a lock, or because lL blocked on a condlLlon walL or blocklng l/C operaLlon,
enLalls Lwo addlLlonal conLexL swlLches and all Lhe aLLendanL CS and cache acLlvlLy: Lhe blocked Lhread ls swlLched ouL
before lLs quanLum has explred, and ls Lhen swlLched back ln laLer afLer Lhe lock or oLher resource becomes avallable.
(8locklng due Lo lock conLenLlon also has a cosL for Lhe Lhread holdlng Lhe lock: when lL releases Lhe lock, lL musL Lhen
ask Lhe CS Lo resume Lhe blocked Lhread.)
@@AMA N*0('/&4 2%'< $%&.*&./%&
We've seen LhaL serlallzaLlon hurLs scalablllLy and LhaL conLexL swlLches hurL performance. ConLended locklng causes
boLh, so reduclng lock conLenLlon can lmprove boLh performance and scalablllLy.
Access Lo resources guarded by an excluslve lock ls serlallzedonly one Lhread aL a Llme may access lL. Cf course, we use
locks for good reasons, such as prevenLlng daLa corrupLlon, buL Lhls safeLy comes aL a prlce. erslsLenL conLenLlon for a
lock llmlLs scalablllLy.
1he prlnclpal LhreaL Lo scalablllLy ln concurrenL appllcaLlons ls Lhe excluslve resource lock.
1wo facLors lnfluence Lhe llkellhood of conLenLlon for a lock: how ofLen LhaL lock ls requesLed and how long lL ls held
once acqulred.
[7]
lf Lhe producL of Lhese facLors ls sufflclenLly small, Lhen mosL aLLempLs Lo acqulre Lhe lock wlll be
unconLended, and lock conLenLlon wlll noL pose a slgnlflcanL scalablllLy lmpedlmenL. lf, however, Lhe lock ls ln
sufflclenLly hlgh demand, Lhreads wlll block walLlng for lL, ln Lhe exLreme case, processors wlll slL ldle even Lhough Lhere
ls plenLy of work Lo do.
[7] 1hls ls a corollary of LlLLle's law, a resulL from queulng Lheory LhaL says "Lhe average number of cusLomers ln a sLable sysLem ls equal Lo Lhelr
average arrlval raLe mulLlplled by Lhelr average Llme ln Lhe sysLem". (LlLLle, 1961)
1here are Lhree ways Lo reduce lock conLenLlon:
8educe Lhe duraLlon for whlch locks are held,
8educe Lhe frequency wlLh whlch locks are requesLed, or
8eplace excluslve locks wlLh coordlnaLlon mechanlsms LhaL permlL greaLer concurrency.
@@AMA@A P"))%8/&4 2%'< C'%>* G`R*. /&X R*. E(.`I
An effecLlve way Lo reduce Lhe llkellhood of conLenLlon ls Lo hold locks as brlefly as posslble. 1hls can be done by movlng
code LhaL doesn'L requlre Lhe lock ouL of #7"*892"4:&; blocks, especlally for expenslve operaLlons and poLenLlally
blocklng operaLlons such as l/C.
lL ls easy Lo see how holdlng a "hoL" lock for Loo long can llmlL scalablllLy, we saw an example of Lhls ln
'7"*892"4:&;<$*.294:&9 ln ChapLer 2. lf an operaLlon holds a lock for 2 mllllseconds and every operaLlon requlres LhaL
lock, LhroughpuL can be no greaLer Lhan 300 operaLlons per second, no maLLer how many processors are avallable.
8educlng Lhe Llme Lhe lock ls held Lo 1 mllllsecond lmproves Lhe locklnduced LhroughpuL llmlL Lo 1000 operaLlons per
second.
[8]

[8] AcLually, Lhls calculaLlon undersLaLes Lhe cosL of holdlng locks for Loo long because lL doesn'L Lake lnLo accounL Lhe conLexL swlLch overhead
generaLed by lncreased lock conLenLlon.
1..94M).&'.29& ln LlsLlng 11.4 shows an example of holdlng a lock longer Lhan necessary. 1he )#&952*$.42"F$.*8&#
meLhod looks up Lhe user's locaLlon ln a F$G and uses regular expresslon maLchlng Lo see lf Lhe resulLlng value maLches
Lhe supplled paLLern. 1he enLlre )#&952*$.42"F$.*8&# meLhod ls #7"*892"4:&;, buL Lhe only porLlon of Lhe code LhaL
acLually needs Lhe lock ls Lhe call Lo F$G+6&..

143 68arL lll: Llveness, erformance, and 1esLlng 238ChapLer 11. erformance and ScalablllLy
2/3./&4 @@AMA 7%?0/&4 " 2%'< 2%&4*) .:"& P*'*33")+A

b=89&$;'$%&
G)M04* *0$## 1..94M).&'.29& W
bH)$9;&;K7Ud.84#dV G94/$.& %4"$0 F$GR'.94"6e '.94"6T
$..94M).&# ] "&? E$#8F$GR'.94"6e '.94"6TUV[

G)M04* #7"*892"4:&; M220&$" )#&952*$.42"F$.*8&#U'.94"6 "$3&e
'.94"6 9&6&-GV W
'.94"6 C&7 ] d)#&9#+d _ "$3& _ d+02*$.42"d[
'.94"6 02*$.42" ] $..94M).&#+6&.UC&7V[
4% U02*$.42" ]] ")00V
9&.)9" %$0#&[
&0#&
9&.)9" @$..&9"+3$.*8&#U9&6&-Ge 02*$.42"V[
\
\
K&..&91..94M).&'.29& ln LlsLlng 11.3 rewrlLes 1..94M).&'.29& Lo reduce slgnlflcanLly Lhe lock duraLlon. 1he flrsL sLep
ls Lo consLrucL Lhe F$G key assoclaLed wlLh Lhe user's locaLlon, a sLrlng of Lhe form )#&9#+name+02*$.42". 1hls enLalls
lnsLanLlaLlng a '.94"6K)40;&9 ob[ecL, appendlng several sLrlngs Lo lL, and lnsLanLlaLlng Lhe resulL as a '.94"6. AfLer Lhe
locaLlon has been reLrleved, Lhe regular expresslon ls maLched agalnsL Lhe resulLlng locaLlon sLrlng. 8ecause
consLrucLlng Lhe key sLrlng and processlng Lhe regular expresslon do noL access shared sLaLe, Lhey need noL be execuLed
wlLh Lhe lock held. K&..&91..94M).&'.29& facLors Lhese sLeps ouL of Lhe #7"*892"4:&; block, Lhus reduclng Lhe Llme
Lhe lock ls held.
2/3./&4 @@AVA N*0('/&4 2%'< [()"./%&A
b=89&$;'$%&
G)M04* *0$## K&..&91..94M).&'.29& W
bH)$9;&;K7Ud.84#dV G94/$.& %4"$0 F$GR'.94"6e '.94"6T
$..94M).&# ] "&? E$#8F$GR'.94"6e '.94"6TUV[

G)M04* M220&$" )#&952*$.42"F$.*8&#U'.94"6 "$3&e '.94"6 9&6&-GV W
'.94"6 C&7 ] d)#&9#+d _ "$3& _ d+02*$.42"d[
'.94"6 02*$.42"[
#7"*892"4:&; U.84#V W
02*$.42" ] $..94M).&#+6&.UC&7V[
\
4% U02*$.42" ]] ")00V
9&.)9" %$0#&[
&0#&
9&.)9" @$..&9"+3$.*8&#U9&6&-Ge 02*$.42"V[
\
\
8educlng Lhe scope of Lhe lock ln )#&952*$.42"F$.*8&# subsLanLlally reduces Lhe number of lnsLrucLlons LhaL are
execuLed wlLh Lhe lock held. 8y Amdahl's law, Lhls removes an lmpedlmenL Lo scalablllLy because Lhe amounL of
serlallzed code ls reduced.
8ecause 1..94M).&'.29& has only one sLaLe varlable, $..94M).&#, we can lmprove lL furLher by Lhe Lechnlque of
delegaLlng Lhread safeLy (SecLlon 4.3). 8y replaclng $..94M).&# wlLh a Lhreadsafe F$G (a E$#8.$M0&, #7"*892"4:&;F$G,
or D2"*)99&".E$#8F$G), 1..94M).&'.29& can delegaLe all lLs Lhread safeLy obllgaLlons Lo Lhe underlylng Lhreadsafe
collecLlon. 1hls ellmlnaLes Lhe need for expllclL synchronlzaLlon ln 1..94M).&'.29&, reduces Lhe lock scope Lo Lhe
duraLlon of Lhe F$G access, and removes Lhe rlsk LhaL a fuLure malnLalner wlll undermlne Lhread safeLy by forgeLLlng Lo
acqulre Lhe approprlaLe lock before accesslng $..94M).&#.
Whlle shrlnklng #7"*892"4:&; blocks can lmprove scalablllLy, a #7"*892"4:&; block can be Loo smalloperaLlons LhaL
need Lo be aLomlc (such updaLlng mulLlple varlables LhaL parLlclpaLe ln an lnvarlanL) musL be conLalned ln a slngle
#7"*892"4:&; block. And because Lhe cosL of synchronlzaLlon ls nonzero, breaklng one #7"*892"4:&; block lnLo
mulLlple #7"*892"4:&; blocks (correcLness permlLLlng) aL some polnL becomes counLerproducLlve ln Lerms of
performance.
[9]
1he ldeal balance ls of course plaLformdependenL, buL ln pracLlce lL makes sense Lo worry abouL Lhe
slze of a #7"*892"4:&; block only when you can move "subsLanLlal" compuLaLlon or blocklng operaLlons ouL of lL.
[9] lf Lhe !vM performs lock coarsenlng, lL may undo Lhe spllLLlng of #7"*892"4:&; blocks anyway.
@@AMADA N*0('/&4 2%'< R)"&(?")/.+
1he oLher way Lo reduce Lhe fracLlon of Llme LhaL a lock ls held (and Lherefore Lhe llkellhood LhaL lL wlll be conLended) ls
Lo have Lhreads ask for lL less ofLen. 1hls can be accompllshed by lock spllLLlng and lock sLrlplng, whlch lnvolve uslng

146 !ava Concurrency ln racLlce
separaLe locks Lo guard mulLlple lndependenL sLaLe varlables prevlously guarded by a slngle lock. 1hese Lechnlques
reduce Lhe granularlLy aL whlch locklng occurs, poLenLlally allowlng greaLer scalablllLybuL uslng more locks also lncreases
Lhe rlsk of deadlock.
As a LhoughL experlmenL, lmaglne whaL would happen lf Lhere was only one lock for Lhe enLlre appllcaLlon lnsLead of a
separaLe lock for each ob[ecL. 1hen execuLlon of all #7"*892"4:&; blocks, regardless of Lhelr lock, would be serlallzed.
WlLh many Lhreads compeLlng for Lhe global lock, Lhe chance LhaL Lwo Lhreads wanL Lhe lock aL Lhe same Llme lncreases,
resulLlng ln more conLenLlon. So lf lock requesLs were lnsLead dlsLrlbuLed over a larger seL of locks, Lhere would be less
conLenLlon. lewer Lhreads would be blocked walLlng for locks, Lhus lncreaslng scalablllLy.
lf a lock guards more Lhan one lndependenL sLaLe varlable, you may be able Lo lmprove scalablllLy by spllLLlng lL lnLo
mulLlple locks LhaL each guard dlfferenL varlables. 1hls resulLs ln each lock belng requesLed less ofLen.
'&9/&9'.$.)# ln LlsLlng 11.6 shows a porLlon of Lhe monlLorlng lnLerface for a daLabase server LhaL malnLalns Lhe seL of
currenLly loggedon users and Lhe seL of currenLly execuLlng querles. As a user logs on or off or query execuLlon beglns
or ends, Lhe '&9/&9'.$.)# ob[ecL ls updaLed by calllng Lhe approprlaLe $;; or 9&32/& meLhod. 1he Lwo Lypes of
lnformaLlon are compleLely lndependenL, '&9/&9'.$.)# could even be spllL lnLo Lwo separaLe classes wlLh no loss of
funcLlonallLy.
lnsLead of guardlng boLh )#&9# and ()&94&# wlLh Lhe '&9/&9'.$.)# lock, we can lnsLead guard each wlLh a separaLe
lock, as shown ln LlsLlng 11.7. AfLer spllLLlng Lhe lock, each new flnergralned lock wlll see less locklng Lrafflc Lhan Lhe
orlglnal coarser lock would have. (uelegaLlng Lo a Lhreadsafe '&. lmplemenLaLlon for )#&9# and ()&94&# lnsLead of
uslng expllclL synchronlzaLlon would lmpllclLly provlde lock spllLLlng, as each '&. would use a dlfferenL lock Lo guard lLs
sLaLe.)
SpllLLlng a lock lnLo Lwo offers Lhe greaLesL posslblllLy for lmprovemenL when Lhe lock ls experlenclng moderaLe buL noL
heavy conLenLlon. SpllLLlng locks LhaL are experlenclng llLLle conLenLlon ylelds llLLle neL lmprovemenL ln performance or
LhroughpuL, alLhough lL mlghL lncrease Lhe load Lhreshold aL whlch performance sLarLs Lo degrade due Lo conLenLlon.
SpllLLlng locks experlenclng moderaLe conLenLlon mlghL acLually Lurn Lhem lnLo mosLly unconLended locks, whlch ls Lhe
mosL deslrable ouLcome for boLh performance and scalablllLy.
2/3./&4 @@AWA $"&0/0".* 6%) 2%'< C>?/../&4A
b=89&$;'$%&
G)M04* *0$## '&9/&9'.$.)# W
bH)$9;&;K7Ud.84#dV G)M04* %4"$0 '&.R'.94"6T )#&9#[
bH)$9;&;K7Ud.84#dV G)M04* %4"$0 '&.R'.94"6T ()&94&#[
+++
G)M04* #7"*892"4:&; /24; $;;!#&9U'.94"6 )V W )#&9#+$;;U)V[ \
G)M04* #7"*892"4:&; /24; $;;O)&97U'.94"6 (V W ()&94&#+$;;U(V[ \
G)M04* #7"*892"4:&; /24; 9&32/&!#&9U'.94"6 )V W
)#&9#+9&32/&U)V[
\
G)M04* #7"*892"4:&; /24; 9&32/&O)&97U'.94"6 (V W
()&94&#+9&32/&U(V[
\
\
2/3./&4 @@AZA '&9/&9'.$.)# N*6"'.%)*0 .% 93* C>?/. 2%'<3A
b=89&$;'$%&
G)M04* *0$## '&9/&9'.$.)# W
bH)$9;&;K7Ud)#&9#dV G)M04* %4"$0 '&.R'.94"6T )#&9#[
bH)$9;&;K7Ud()&94&#dV G)M04* %4"$0 '&.R'.94"6T ()&94&#[
+++
G)M04* /24; $;;!#&9U'.94"6 )V W
#7"*892"4:&; U)#&9#V W
)#&9#+$;;U)V[
\
\

G)M04* /24; $;;O)&97U'.94"6 (V W
#7"*892"4:&; U()&94&#V W
()&94&#+$;;U(V[
\
\
XX 9&32/& 3&.82;# #4340$907 9&%$*.29&; .2 )#& #G04. 02*C#
\
@@AMALA 2%'< C.)/>/&4
SpllLLlng a heavlly conLended lock lnLo Lwo ls llkely Lo resulL ln Lwo heavlly conLended locks. Whlle Lhls wlll produce a
small scalablllLy lmprovemenL by enabllng Lwo Lhreads Lo execuLe concurrenLly lnsLead of one, lL sLlll does noL

147 68arL lll: Llveness, erformance, and 1esLlng 238ChapLer 11. erformance and ScalablllLy
dramaLlcally lmprove prospecLs for concurrency on a sysLem wlLh many processors. 1he lock spllLLlng example ln Lhe
'&9/&9'.$.)# classes does noL offer any obvlous opporLunlLy for spllLLlng Lhe locks furLher.
Lock spllLLlng can someLlmes be exLended Lo parLlLlon locklng on a varlableslzed seL of lndependenL ob[ecLs, ln whlch
case lL ls called lock sLrlplng. lor example, Lhe lmplemenLaLlon of D2"*)99&".E$#8F$G uses an array of 16 locks, each of
whlch guards 1/16 of Lhe hash buckeLs, buckeL n ls guarded by lock n mod 16. Assumlng Lhe hash funcLlon provldes
reasonable spreadlng characLerlsLlcs and keys are accessed unlformly, Lhls should reduce Lhe demand for any glven lock
by approxlmaLely a facLor of 16. lL ls Lhls Lechnlque LhaL enables D2"*)99&".E$#8F$G Lo supporL up Lo 16 concurrenL
wrlLers. (1he number of locks could be lncreased Lo provlde even beLLer concurrency under heavy access on hlgh
processorcounL sysLems, buL Lhe number of sLrlpes should be lncreased beyond Lhe defaulL of 16 only when you have
evldence LhaL concurrenL wrlLers are generaLlng enough conLenLlon Lo warranL ralslng Lhe llmlL.)
Cne of Lhe downsldes of lock sLrlplng ls LhaL locklng Lhe collecLlon for excluslve access ls more dlfflculL and cosLly Lhan
wlLh a slngle lock. usually an operaLlon can be performed by acqulrlng aL mosL one lock, buL occaslonally you need Lo
lock Lhe enLlre collecLlon, as when D2"*)99&".E$#8F$G needs Lo expand Lhe map and rehash Lhe values lnLo a larger seL
of buckeLs. 1hls ls Lyplcally done by acqulrlng all of Lhe locks ln Lhe sLrlpe seL.
[10]

[10] 1he only way Lo acqulre an arblLrary seL of lnLrlnslc locks ls vla recurslon.
'.94G&;F$G ln LlsLlng 11.8 lllusLraLes lmplemenLlng a hashbased map uslng lock sLrlplng. 1here are ,k5JDl' locks, each
guardlng a subseL of Lhe buckeLs. MosL meLhods, llke 6&., need acqulre only a slngle buckeL lock. Some meLhods may
need Lo acqulre all Lhe locks buL, as ln Lhe lmplemenLaLlon for *0&$9, may noL need Lo acqulre Lhem all
slmulLaneously.
[11]

[11] Clearlng Lhe F$G ln Lhls way ls noL aLomlc, so Lhere ls noL necessarlly a Llme when Lhe '.94G&;aF$G ls acLually empLy lf oLher Lhreads are
concurrenLly addlng elemenLs, maklng Lhe operaLlon aLomlc would requlre acqulrlng all Lhe locks aL once. Powever, for concurrenL collecLlons LhaL
cllenLs Lyplcally cannoL lock for excluslve access, Lhe resulL of meLhods llke #4:& or 4#L3G.7 may be ouL of daLe by Lhe Llme Lhey reLurn anyway,
so Lhls behavlor, whlle perhaps somewhaL surprlslng, ls usually accepLable.
@@AMAMA F#%/0/&4 7%. S/*?03
Lock spllLLlng and lock sLrlplng can lmprove scalablllLy because Lhey enable dlfferenL Lhreads Lo operaLe on dlfferenL
daLa (or dlfferenL porLlons of Lhe same daLa sLrucLure) wlLhouL lnLerferlng wlLh each oLher. A program LhaL would
beneflL from lock spllLLlng necessarlly exhlblLs conLenLlon for a lock more ofLen Lhan for Lhe daLa guarded by LhaL lock. lf
a lock guards Lwo lndependenL varlables x and ?, and Lhread A wanLs Lo access x whlle 8 wanLs Lo access ? (as would be
Lhe case lf one Lhread called $;;!#&9 whlle anoLher called $;;O)&97 ln '&9/&9'.$.)#), Lhen Lhe Lwo Lhreads are noL
conLendlng for any daLa, even Lhough Lhey are conLendlng for a lock.

148 !ava Concurrency ln racLlce
2/3./&4 @@A\A 7"3:Y"3*0 K"> 93/&4 2%'< C.)/>/&4A
b=89&$;'$%&
G)M04* *0$## '.94G&;F$G W
XX '7"*892"4:$.42" G204*7h M)*C&.#f"g 6)$9;&; M7 02*C#f"x,k5JDl'g
G94/$.& #.$.4* %4"$0 4". ,k5JDl' ] ^z[
G94/$.& %4"$0 ,2;&fg M)*C&.#[
G94/$.& %4"$0 JM`&*.fg 02*C#[

G94/$.& #.$.4* *0$## ,2;& W +++ \

G)M04* '.94G&;F$GU4". ")3K)*C&.#V W
M)*C&.# ] "&? ,2;&f")3K)*C&.#g[
02*C# ] "&? JM`&*.f,k5JDl'g[
%29 U4". 4 ] Z[ 4 R ,k5JDl'[ 4__V
02*C#f4g ] "&? JM`&*.UV[
\

G94/$.& %4"$0 4". 8$#8UJM`&*. C&7V W
9&.)9" F$.8+$M#UC&7+8$#8D2;&UV x M)*C&.#+0&"6.8V[
\

G)M04* JM`&*. 6&.UJM`&*. C&7V W
4". 8$#8 ] 8$#8UC&7V[
#7"*892"4:&; U02*C#f8$#8 x ,k5JDl'gV W
%29 U,2;& 3 ] M)*C&.#f8$#8g[ 3 Y] ")00[ 3 ] 3+"&-.V
4% U3+C&7+&()$0#UC&7VV
9&.)9" 3+/$0)&[
\
9&.)9" ")00[
\

G)M04* /24; *0&$9UV W
%29 U4". 4 ] Z[ 4 R M)*C&.#+0&"6.8[ 4__V W
#7"*892"4:&; U02*C#f4 x ,k5JDl'gV W
M)*C&.#f4g ] ")00[
\
\
\
+++
\
Lock granularlLy cannoL be reduced when Lhere are varlables LhaL are requlred for every operaLlon. 1hls ls yeL anoLher
area where raw performance and scalablllLy are ofLen aL odds wlLh each oLher, common opLlmlzaLlons such as cachlng
frequenLly compuLed values can lnLroduce "hoL flelds" LhaL llmlL scalablllLy.
lf you were lmplemenLlng E$#8F$G, you would have a cholce of how #4:& compuLes Lhe number of enLrles ln Lhe F$G.
1he slmplesL approach ls Lo counL Lhe number of enLrles every Llme lL ls called. A common opLlmlzaLlon ls Lo updaLe a
separaLe counLer as enLrles are added or removed, Lhls sllghLly lncreases Lhe cosL of a G). or 9&32/& operaLlon Lo keep
Lhe counLer upLodaLe, buL reduces Lhe cosL of Lhe #4:& operaLlon from C(n) Lo C(1).
keeplng a separaLe counL Lo speed up operaLlons llke #4:& and 4#L3G.7 works flne for a slngleLhreaded or fully
synchronlzed lmplemenLaLlon, buL makes lL much harder Lo lmprove Lhe scalablllLy of Lhe lmplemenLaLlon because
every operaLlon LhaL modlfles Lhe map musL now updaLe Lhe shared counLer. Lven lf you use lock sLrlplng for Lhe hash
chalns, synchronlzlng access Lo Lhe counLer relnLroduces Lhe scalablllLy problems of excluslve locklng. WhaL looked llke a
performance opLlmlzaLlon cachlng Lhe resulLs of Lhe #4:& operaLlon has Lurned lnLo a scalablllLy llablllLy. ln Lhls case,
Lhe counLer ls called a hoL fleld because every muLaLlve operaLlon needs Lo access lL.
D2"*)99&".E$#8F$G avolds Lhls problem by havlng #4:& enumeraLe Lhe sLrlpes and add up Lhe number of elemenLs ln
each sLrlpe, lnsLead of malnLalnlng a global counL. 1o avold enumeraLlng every elemenL, D2"*)99&".E$#8F$G malnLalns
a separaLe counL fleld for each sLrlpe, also guarded by Lhe sLrlpe lock.
[12]

[12] lf #4:& ls called frequenLly compared Lo muLaLlve operaLlons, sLrlped daLa sLrucLures can opLlmlze for Lhls by cachlng Lhe collecLlon slze ln a
/20$.40& whenever #4:& ls called and lnvalldaLlng Lhe cache (seLLlng lL Lo 1) whenever Lhe collecLlon ls modlfled. lf Lhe cached value ls
nonnegaLlve on enLry Lo #4:&, lL ls accuraLe and can be reLurned, oLherwlse lL ls recompuLed.
@@AMAVA F?.*)&"./#*3 .% =1'?(3/#* 2%'<3
A Lhlrd Lechnlque for mlLlgaLlng Lhe effecL of lock conLenLlon ls Lo forego Lhe use of excluslve locks ln favor of a more
concurrencyfrlendly means of managlng shared sLaLe. 1hese lnclude uslng Lhe concurrenL collecLlons, readwrlLe locks,
lmmuLable ob[ecLs and aLomlc varlables.
N&$;P94.&52*C (see ChapLer 13) enforces a mulLlplereader, slnglewrlLer locklng dlsclpllne: more Lhan one reader can
access Lhe shared resource concurrenLly so long as none of Lhem wanLs Lo modlfy lL, buL wrlLers musL acqulre Lhe lock

149 68arL lll: Llveness, erformance, and 1esLlng 238ChapLer 11. erformance and ScalablllLy
excluslvely. lor readmosLly daLa sLrucLures, N&$;P94.&52*C can offer greaLer concurrency Lhan excluslve locklng, for
readonly daLa sLrucLures, lmmuLablllLy can ellmlnaLe Lhe need for locklng enLlrely.
ALomlc varlables (see ChapLer 13) offer a means of reduclng Lhe cosL of updaLlng "hoL flelds" such as sLaLlsLlcs counLers,
sequence generaLors, or Lhe reference Lo Lhe flrsL node ln a llnked daLa sLrucLure. (We used 1.234*52"6 Lo malnLaln Lhe
hlL counLer ln Lhe servleL examples ln ChapLer 2.) 1he aLomlc varlable classes provlde very flnegralned (and Lherefore
more scalable) aLomlc operaLlons on lnLegers or ob[ecL references, and are lmplemenLed uslng lowlevel concurrency
prlmlLlves (such as compareandswap) provlded by mosL modern processors. lf your class has a small number of hoL
flelds LhaL do noL parLlclpaLe ln lnvarlanLs wlLh oLher varlables, replaclng Lhem wlLh aLomlc varlables may lmprove
scalablllLy. (Changlng your algorlLhm Lo have fewer hoL flelds mlghL lmprove scalablllLy even more aLomlc varlables
reduce Lhe cosL of updaLlng hoL flelds, buL Lhey don'L ellmlnaLe lL.)
@@AMAWA K%&/.%)/&4 $-9 9./?/O"./%&
When LesLlng for scalablllLy, Lhe goal ls usually Lo keep Lhe processors fully uLlllzed. 1ools llke /3#.$. and 3G#.$. on
unlx sysLems or G&9%32" on Wlndows sysLems can Lell you [usL how "hoL" Lhe processors are runnlng.
lf Lhe Cus are asymmeLrlcally uLlllzed (some Cus are runnlng hoL buL oLhers are noL) your flrsL goal should be Lo flnd
lncreased parallellsm ln your program. AsymmeLrlc uLlllzaLlon lndlcaLes LhaL mosL of Lhe compuLaLlon ls golng on ln a
small seL of Lhreads, and your appllcaLlon wlll noL be able Lo Lake advanLage of addlLlonal processors.
lf Lhe Cus are noL fully uLlllzed, you need Lo flgure ouL why. 1here are several llkely causes:
lnsufflclenL load. lL may be LhaL Lhe appllcaLlon belng LesLed ls [usL noL sub[ecLed Lo enough load. ?ou can LesL for Lhls by
lncreaslng Lhe load and measurlng changes ln uLlllzaLlon, response Llme, or servlce Llme. CeneraLlng enough load Lo
saLuraLe an appllcaLlon can requlre subsLanLlal compuLer power, Lhe problem may be LhaL Lhe cllenL sysLems, noL Lhe
sysLem belng LesLed, are runnlng aL capaclLy.
l/Cbound. ?ou can deLermlne wheLher an appllcaLlon ls dlskbound uslng 42#.$. or G&9%32", and wheLher lL ls
bandwldLhllmlLed by monlLorlng Lrafflc levels on your neLwork.
LxLernally bound. lf your appllcaLlon depends on exLernal servlces such as a daLabase or web servlce, Lhe boLLleneck
may noL be ln your code. ?ou can LesL for Lhls by uslng a profller or daLabase admlnlsLraLlon Lools Lo deLermlne how
much Llme ls belng spenL walLlng for answers from Lhe exLernal servlce.
Lock conLenLlon. roflllng Lools can Lell you how much lock conLenLlon your appllcaLlon ls experlenclng and whlch locks
are "hoL". ?ou can ofLen geL Lhe same lnformaLlon wlLhouL a profller Lhrough random sampllng, Lrlggerlng a few Lhread
dumps and looklng for Lhreads conLendlng for locks. lf a Lhread ls blocked walLlng for a lock, Lhe approprlaLe sLack frame
ln Lhe Lhread dump lndlcaLes "walLlng Lo lock monlLor . . . " Locks LhaL are mosLly unconLended rarely show up ln a
Lhread dump, a heavlly conLended lock wlll almosL always have aL leasL one Lhread walLlng Lo acqulre lL and so wlll
frequenLly appear ln Lhread dumps.
lf your appllcaLlon ls keeplng Lhe Cus sufflclenLly hoL, you can use monlLorlng Lools Lo lnfer wheLher lL would beneflL
from addlLlonal Cus. A program wlLh only four Lhreads may be able Lo keep a 4way sysLem fully uLlllzed, buL ls unllkely
Lo see a performance boosL lf moved Lo an 8way sysLem slnce Lhere would need Lo be walLlng runnable Lhreads Lo Lake
advanLage of Lhe addlLlonal processors. (?ou may also be able Lo reconflgure Lhe program Lo dlvlde lLs workload over
more Lhreads, such as ad[usLlng a Lhread pool slze.) Cne of Lhe columns reporLed by /3#.$. ls Lhe number of Lhreads
LhaL are runnable buL noL currenLly runnlng because a Cu ls noL avallable, lf Cu uLlllzaLlon ls hlgh and Lhere are always
runnable Lhreads walLlng for a Cu, your appllcaLlon would probably beneflL from more processors.
@@AMAZA !(3. C"+ P% .% EY]*'. -%%?/&4
ln early !vM verslons, ob[ecL allocaLlon and garbage collecLlon were slow,
[13]
buL Lhelr performance has lmproved
subsLanLlally slnce Lhen. ln facL, allocaLlon ln !ava ls now fasLer Lhan 3$002* ls ln C: Lhe common code paLh for "&?
JM`&*. ln PoLSpoL 1.4.x and 3.0 ls approxlmaLely Len machlne lnsLrucLlons.
[13] As was everyLhlng else synchronlzaLlon, graphlcs, !vM sLarLup, reflecLlon predlcLably so ln Lhe flrsL verslon of an experlmenLal Lechnology.
1o work around "slow" ob[ecL llfecycles, many developers Lurned Lo ob[ecL poollng, where ob[ecLs are recycled lnsLead
of belng garbage collecLed and allocaLed anew when needed. Lven Laklng lnLo accounL lLs reduced garbage collecLlon
overhead, ob[ecL poollng has been shown Lo be a performance loss
[14]
for all buL Lhe mosL expenslve ob[ecLs (and a
serlous loss for llghL and medlumwelghL ob[ecLs) ln slngleLhreaded programs (Cllck, 2003).

130 !ava Concurrency ln racLlce
[14] ln addlLlon Lo belng a loss ln Lerms of Cu cycles, ob[ecL poollng has a number of oLher problems, among Lhem Lhe challenge of seLLlng pool
slzes correcLly (Loo small, and poollng has no effecL, Loo large, and lL puLs pressure on Lhe garbage collecLor, reLalnlng memory LhaL could be used
more effecLlvely for someLhlng else), Lhe rlsk LhaL an ob[ecL wlll noL be properly reseL Lo lLs newly allocaLed sLaLe, lnLroduclng subLle bugs, Lhe rlsk
LhaL a Lhread wlll reLurn an ob[ecL Lo Lhe pool buL conLlnue uslng lL, and LhaL lL makes more work for generaLlonal garbage collecLors by
encouraglng a paLLern of oldLoyoung references.
ln concurrenL appllcaLlons, poollng fares even worse. When Lhreads allocaLe new ob[ecLs, very llLLle lnLerLhread
coordlnaLlon ls requlred, as allocaLors Lyplcally use Lhreadlocal allocaLlon blocks Lo ellmlnaLe mosL synchronlzaLlon on
heap daLa sLrucLures. 8uL lf Lhose Lhreads lnsLead requesL an ob[ecL from a pool, some synchronlzaLlon ls necessary Lo
coordlnaLe access Lo Lhe pool daLa sLrucLure, creaLlng Lhe posslblllLy LhaL a Lhread wlll block. 8ecause blocklng a Lhread
due Lo lock conLenLlon ls hundreds of Llmes more expenslve Lhan an allocaLlon, even a small amounL of poollnduced
conLenLlon would be a scalablllLy boLLleneck. (Lven an unconLended synchronlzaLlon ls usually more expenslve Lhan
allocaLlng an ob[ecL.) 1hls ls yeL anoLher Lechnlque lnLended as a performance opLlmlzaLlon buL LhaL Lurned lnLo a
scalablllLy hazard. oollng has lLs uses,
[13]
buL ls of llmlLed uLlllLy as a performance opLlmlzaLlon.
[13] ln consLralned envlronmenLs, such as some !2ML or 81S! LargeLs, ob[ecL poollng may sLlll be requlred for effecLlve memory managemenL or Lo
manage responslveness.
AllocaLlng ob[ecLs ls usually cheaper Lhan synchronlzlng.
@@AVA =1"5>?*T $%5>")/&4 K"> -*)6%)5"&'*
1he slngleLhreaded performance of D2"*)99&".E$#8F$G ls sllghLly beLLer Lhan LhaL of a synchronlzed E$#8F$G, buL lL ls
ln concurrenL use LhaL lL really shlnes. 1he lmplemenLaLlon of D2"*)99&".E$#8F$G assumes Lhe mosL common
operaLlon ls reLrlevlng a value LhaL already exlsLs, and ls Lherefore opLlmlzed Lo provlde hlghesL performance and
concurrency for successful 6&. operaLlons.
1he ma[or scalablllLy lmpedlmenL for Lhe synchronlzed F$G lmplemenLaLlons ls LhaL Lhere ls a slngle lock for Lhe enLlre
map, so only one Lhread can access Lhe map aL a Llme. Cn Lhe oLher hand, D2"*)99&".E$#8F$G does no locklng for mosL
successful read operaLlons, and uses lock sLrlplng for wrlLe operaLlons and Lhose few read operaLlons LhaL do requlre
locklng. As a resulL, mulLlple Lhreads can access Lhe F$G concurrenLly wlLhouL blocklng.
llgure 11.3 lllusLraLes Lhe dlfferences ln scalablllLy beLween several F$G lmplemenLaLlons: D2"*)99&".E$#8F$G,
D2"*)99&".'C4G54#.F$G, and E$#8F$G and .9&&F$G wrapped wlLh #7"*892"4:&;F$G. 1he flrsL Lwo are Lhreadsafe by
deslgn, Lhe laLLer Lwo are made Lhreadsafe by Lhe synchronlzed wrapper. ln each run, n Lhreads concurrenLly execuLe a
LlghL loop LhaL selecLs a random key and aLLempLs Lo reLrleve Lhe value correspondlng Lo LhaL key. lf Lhe value ls noL
presenL, lL ls added Lo Lhe F$G wlLh probablllLy p = .6, and lf lL ls presenL, ls removed wlLh probablllLy p = .02. 1he LesLs
were run under a prerelease bulld of !ava 6 on an 8way Sparc v880, and Lhe graph dlsplays LhroughpuL normallzed Lo
Lhe oneLhread case for D2"*)99&".E$#8F$G. (1he scalablllLy gap beLween Lhe concurrenL and synchronlzed collecLlons ls
even larger on !ava 3.0.)
1he daLa for D2"*)99&".E$#8F$G and D2"*)99&".'C4G54#.F$G shows LhaL Lhey scale well Lo large numbers of Lhreads,
LhroughpuL conLlnues Lo lmprove as Lhreads are added. Whlle Lhe numbers of Lhreads ln llgure 11.3 may noL seem
large, Lhls LesL program generaLes more conLenLlon per Lhread Lhan a Lyplcal appllcaLlon because lL does llLLle oLher Lhan
pound on Lhe F$G, a real program would do addlLlonal Lhreadlocal work ln each lLeraLlon.
S/4()* @@ALA $%5>")/&4 C'"?"Y/?/.+ %6 F$G ,5>?*5*&."./%&3A
[vlew full slze lmage]


131 68arL lll: Llveness, erformance, and 1esLlng 238ChapLer 11. erformance and ScalablllLy

1he numbers for Lhe synchronlzed collecLlons are noL as encouraglng. erformance for Lhe oneLhread case ls
comparable Lo D2"*)99&".E$#8F$G, buL once Lhe load LranslLlons from mosLly unconLended Lo mosLly conLended
whlch happens here aL Lwo Lhreads Lhe synchronlzed collecLlons suffer badly. 1hls ls common behavlor for code whose
scalablllLy ls llmlLed by lock conLenLlon. So long as conLenLlon ls low, Llme per operaLlon ls domlnaLed by Lhe Llme Lo
acLually do Lhe work and LhroughpuL may lmprove as Lhreads are added. Cnce conLenLlon becomes slgnlflcanL, Llme per
operaLlon ls domlnaLed by conLexL swlLch and schedullng delays, and addlng more Lhreads has llLLle effecL on
LhroughpuL.
@@AWA N*0('/&4 $%&.*1. C8/.': E#*):*"0
Many Lasks lnvolve operaLlons LhaL may block, LranslLlonlng beLween Lhe runnlng and blocked sLaLes enLalls a conLexL
swlLch. Cne source of blocklng ln server appllcaLlons ls generaLlng log messages ln Lhe course of processlng requesLs, Lo
lllusLraLe how LhroughpuL can be lmproved by reduclng conLexL swlLches, we'll analyze Lhe schedullng behavlor of Lwo
logglng approaches.
MosL logglng frameworks are Lhln wrappers around G94".0", when you have someLhlng Lo log, [usL wrlLe lL ouL rlghL
Lhen and Lhere. AnoLher approach was shown ln 526P94.&9 on page 132: Lhe logglng ls performed ln a dedlcaLed
background Lhread lnsLead of by Lhe requesLlng Lhread. lrom Lhe developer's perspecLlve, boLh approaches are roughly
equlvalenL. 8uL Lhere may be a dlfference ln performance, dependlng on Lhe volume of logglng acLlvlLy, how many
Lhreads are dolng logglng, and oLher facLors such as Lhe cosL of conLexL swlLchlng.
[16]

[16] 8ulldlng a logger LhaL moves Lhe l/C Lo anoLher Lhread may lmprove performance, buL lL also lnLroduces a number of deslgn compllcaLlons,
such as lnLerrupLlon (whaL happens lf a Lhread blocked ln a logglng operaLlon ls lnLerrupLed?), servlce guaranLees (does Lhe logger guaranLee LhaL a
successfully queued log message wlll be logged prlor Lo servlce shuLdown?), saLuraLlon pollcy (whaL happens when Lhe producers log messages
fasLer Lhan Lhe logger Lhread can handle Lhem?), and servlce llfecycle (how do we shuL down Lhe logger, and how do we communlcaLe Lhe servlce
sLaLe Lo producers?).
1he servlce Llme for a logglng operaLlon lncludes whaLever compuLaLlon ls assoclaLed wlLh Lhe l/C sLream classes, lf Lhe
l/C operaLlon blocks, lL also lncludes Lhe duraLlon for whlch Lhe Lhread ls blocked. 1he operaLlng sysLem wlll deschedule
Lhe blocked Lhread unLll Lhe l/C compleLes, and probably a llLLle longer. When Lhe l/C compleLes, oLher Lhreads are
probably acLlve and wlll be allowed Lo flnlsh ouL Lhelr schedullng quanLa, and Lhreads may already be walLlng ahead of
us on Lhe schedullng queue furLher addlng Lo servlce Llme. AlLernaLlvely, lf mulLlple Lhreads are logglng slmulLaneously,
Lhere may be conLenLlon for Lhe ouLpuL sLream lock, ln whlch case Lhe resulL ls Lhe same as wlLh blocklng l/C Lhe
Lhread blocks walLlng for Lhe lock and geLs swlLched ouL. lnllne logglng lnvolves l/C and locklng, whlch can lead Lo
lncreased conLexL swlLchlng and Lherefore lncreased servlce Llmes.
lncreaslng requesL servlce Llme ls undeslrable for several reasons. llrsL, servlce Llme affecLs quallLy of servlce: longer
servlce Llmes mean someone ls walLlng longer for a resulL. 8uL more slgnlflcanLly, longer servlce Llmes ln Lhls case mean
more lock conLenLlon. 1he "geL ln, geL ouL" prlnclple of SecLlon 11.4.1 Lells us LhaL we should hold locks as brlefly as
posslble, because Lhe longer a lock ls held, Lhe more llkely LhaL lock wlll be conLended. lf a Lhread blocks walLlng for l/C
whlle holdlng a lock, anoLher Lhread ls more llkely Lo wanL Lhe lock whlle Lhe flrsL Lhread ls holdlng lL. ConcurrenL
sysLems perform much beLLer when mosL lock acqulslLlons are unconLended, because conLended lock acqulslLlon means
more conLexL swlLches. A codlng sLyle LhaL encourages more conLexL swlLches Lhus ylelds lower overall LhroughpuL.
Movlng Lhe l/C ouL of Lhe requesLprocesslng Lhread ls llkely Lo shorLen Lhe mean servlce Llme for requesL processlng.
1hreads calllng 026 no longer block walLlng for Lhe ouLpuL sLream lock or for l/C Lo compleLe, Lhey need only queue Lhe
message and can Lhen reLurn Lo Lhelr Lask. Cn Lhe oLher hand, we've lnLroduced Lhe posslblllLy of conLenLlon for Lhe
message queue, buL Lhe G). operaLlon ls llghLerwelghL Lhan Lhe logglng l/C (whlch mlghL requlre sysLem calls) and so ls
less llkely Lo block ln acLual use (as long as Lhe queue ls noL full). 8ecause Lhe requesL Lhread ls now less llkely Lo block, lL
ls less llkely Lo be conLexLswlLched ouL ln Lhe mlddle of a requesL. WhaL we've done ls Lurned a compllcaLed and
uncerLaln code paLh lnvolvlng l/C and posslble lock conLenLlon lnLo a sLralghLllne code paLh.
1o some exLenL, we are [usL movlng Lhe work around, movlng Lhe l/C Lo a Lhread where lLs cosL lsn'L percelved by Lhe
user (whlch may ln lLself be a wln). 8uL by movlng all Lhe logglng l/C Lo a slngle Lhread, we also ellmlnaLe Lhe chance of
conLenLlon for Lhe ouLpuL sLream and Lhus ellmlnaLe a source of blocklng. 1hls lmproves overall LhroughpuL because
fewer resources are consumed ln schedullng, conLexL swlLchlng, and lock managemenL.
Movlng Lhe l/C from many requesLprocesslng Lhreads Lo a slngle logger Lhread ls slmllar Lo Lhe dlfference beLween a
buckeL brlgade and a collecLlon of lndlvlduals flghLlng a flre. ln Lhe "hundred guys runnlng around wlLh buckeLs"
approach, you have a greaLer chance of conLenLlon aL Lhe waLer source and aL Lhe flre (resulLlng ln overall less waLer

132 !ava Concurrency ln racLlce
dellvered Lo Lhe flre), plus greaLer lnefflclency because each worker ls conLlnuously swlLchlng modes (fllllng, runnlng,
dumplng, runnlng, eLc.). ln Lhe buckeLbrlgade approach, Lhe flow of waLer from Lhe source Lo Lhe burnlng bulldlng ls
consLanL, less energy ls expended LransporLlng Lhe waLer Lo Lhe flre, and each worker focuses on dolng one [ob
conLlnuously. !usL as lnLerrupLlons are dlsrupLlve and producLlvlLyreduclng Lo humans, blocklng and conLexL swlLchlng
are dlsrupLlve Lo Lhreads.
C(55")+
8ecause one of Lhe mosL common reasons Lo use Lhreads ls Lo explolL mulLlple processors, ln dlscusslng Lhe
performance of concurrenL appllcaLlons, we are usually more concerned wlLh LhroughpuL or scalablllLy Lhan we are wlLh
raw servlce Llme. Amdahl's law Lells us LhaL Lhe scalablllLy of an appllcaLlon ls drlven by Lhe proporLlon of code LhaL musL
be execuLed serlally. Slnce Lhe prlmary source of serlallzaLlon ln !ava programs ls Lhe excluslve resource lock, scalablllLy
can ofLen be lmproved by spendlng less Llme holdlng locks, elLher by reduclng lock granularlLy, reduclng Lhe duraLlon for
whlch locks are held, or replaclng excluslve locks wlLh nonexcluslve or nonblocklng alLernaLlves.


133 68arL lll: Llveness, erformance, and 1esLlng 248ChapLer 12. 1esLlng ConcurrenL rograms
$:">.*) @DA J*3./&4 $%&'())*&. -)%4)"53
ConcurrenL programs employ slmllar deslgn prlnclples and paLLerns Lo sequenLlal programs. 1he dlfference ls LhaL
concurrenL programs have a degree of nondeLermlnlsm LhaL sequenLlal programs do noL, lncreaslng Lhe number of
poLenLlal lnLeracLlons and fallure modes LhaL musL be planned for and analyzed.
Slmllarly, LesLlng concurrenL programs uses and exLends ldeas from LesLlng sequenLlal ones. 1he same Lechnlques for
LesLlng correcLness and performance ln sequenLlal programs can be applled Lo concurrenL programs, buL wlLh
concurrenL programs Lhe space of Lhlngs LhaL can go wrong ls much larger. 1he ma[or challenge ln consLrucLlng LesLs for
concurrenL programs ls LhaL poLenLlal fallures may be rare probablllsLlc occurrences raLher Lhan deLermlnlsLlc ones,
LesLs LhaL dlsclose such fallures musL be more exLenslve and run for longer Lhan Lyplcal sequenLlal LesLs.
MosL LesLs of concurrenL classes fall lnLo one or boLh of Lhe classlc caLegorles of safeLy and llveness. ln ChapLer 1, we
deflned safeLy as "noLhlng bad ever happens" and llveness as "someLhlng good evenLually happens".
1esLs of safeLy, whlch verlfy LhaL a class's behavlor conforms Lo lLs speclflcaLlon, usually Lake Lhe form of LesLlng
lnvarlanLs. lor example, ln a llnked llsL lmplemenLaLlon LhaL caches Lhe slze of Lhe llsL every Llme lL ls modlfled, one
safeLy LesL would be Lo compare Lhe cached counL agalnsL Lhe acLual number of elemenLs ln Lhe llsL. ln a slngleLhreaded
program Lhls ls easy, slnce Lhe llsL conLenLs do noL change whlle you are LesLlng lLs properLles. 8uL ln a concurrenL
program, such a LesL ls llkely Lo be fraughL wlLh races unless you can observe Lhe counL fleld and counL Lhe elemenLs ln a
slngle aLomlc operaLlon. 1hls can be done by locklng Lhe llsL for excluslve access, employlng some sorL of "aLomlc
snapshoL" feaLure provlded by Lhe lmplemenLaLlon, or by uslng "LesL polnLs" provlded by Lhe lmplemenLaLlon LhaL leL
LesLs asserL lnvarlanLs or execuLe LesL code aLomlcally.
ln Lhls book, we've used Llmlng dlagrams Lo deplcL "unlucky" lnLeracLlons LhaL could cause fallures ln lncorrecLly
consLrucLed classes, LesL programs aLLempL Lo search enough of Lhe sLaLe space LhaL such bad luck evenLually occurs.
unforLunaLely, LesL code can lnLroduce Llmlng or synchronlzaLlon arLlfacLs LhaL can mask bugs LhaL mlghL oLherwlse
manlfesL Lhemselves.
[1]

[1] 8ugs LhaL dlsappear when you add debugglng or LesL code are playfully called Pelsenbugs.
Llveness properLles presenL Lhelr own LesLlng challenges. Llveness LesLs lnclude LesLs of progress and nonprogress,
whlch are hard Lo quanLlfy how do you verlfy LhaL a meLhod ls blocklng and noL merely runnlng slowly? Slmllarly, how
do you LesL LhaL an algorlLhm does noL deadlock? Pow long should you walL before you declare lL Lo have falled?
8elaLed Lo llveness LesLs are performance LesLs. erformance can be measured ln a number of ways, lncludlng:
1hroughpuL: Lhe raLe aL whlch a seL of concurrenL Lasks ls compleLed,
8esponslveness: Lhe delay beLween a requesL for and compleLlon of some acLlon (also called laLency), or
ScalablllLy: Lhe lmprovemenL ln LhroughpuL (or lack Lhereof) as more resources (usually Cus) are made avallable.
@DA@A J*3./&4 6%) $%))*'.&*33
ueveloplng unlL LesLs for a concurrenL class sLarLs wlLh Lhe same analysls as for a sequenLlal class ldenLlfylng lnvarlanLs
and posLcondlLlons LhaL are amenable Lo mechanlcal checklng. lf you are lucky, many of Lhese are presenL ln Lhe
speclflcaLlon, Lhe resL of Lhe Llme, wrlLlng LesLs ls an advenLure ln lLeraLlve speclflcaLlon dlscovery.
As a concreLe lllusLraLlon, we're golng Lo bulld a seL of LesL cases for a bounded buffer. LlsLlng 12.1 shows our
K2)";&;K)%%&9 lmplemenLaLlon, uslng '&3$G829& Lo lmplemenL Lhe requlred boundlng and blocklng.
K2)";&;K)%%&9 lmplemenLs a flxedlengLh arraybased queue wlLh blocklng G). and .$C& meLhods conLrolled by a palr
of counLlng semaphores. 1he $/$40$M0&I.&3# semaphore represenLs Lhe number of elemenLs LhaL can be removed
from Lhe buffer, and ls lnlLlally zero (slnce Lhe buffer ls lnlLlally empLy). Slmllarly, $/$40$M0&'G$*&# represenLs how
many lLems can be lnserLed lnLo Lhe buffer, and ls lnlLlallzed Lo Lhe slze of Lhe buffer.
A .$C& operaLlon flrsL requlres LhaL a permlL be obLalned from $/$40$M0&I.&3#. 1hls succeeds lmmedlaLely lf Lhe buffer
ls nonempLy, and oLherwlse blocks unLll Lhe buffer becomes nonempLy. Cnce a permlL ls obLalned, Lhe nexL elemenL
from Lhe buffer ls removed and a permlL ls released Lo Lhe $/$40$M0&'G$*&# semaphore.
[2]
1he G). operaLlon ls deflned
conversely, so LhaL on exlL from elLher Lhe G). or .$C& meLhods, Lhe sum of Lhe counLs of boLh semaphores always
equals Lhe bound. (ln pracLlce, lf you need a bounded buffer you should use 199$7K02*C4"6O)&)& or
54"C&;K02*C4"6O)&)& raLher Lhan rolllng your own, buL Lhe Lechnlque used here lllusLraLes how lnserLlons and
removals can be conLrolled ln oLher daLa sLrucLures as well.)

134 !ava Concurrency ln racLlce
[2] ln a counLlng semaphore, Lhe permlLs are noL represenLed expllclLly or assoclaLed wlLh an ownlng Lhread, a 9&0&$#& operaLlon creaLes a
permlL and an $*()49& operaLlon consumes one.
2/3./&4 @DA@A ;%(&0*0 ;(66*) 93/&4 '&3$G829&A
b=89&$;'$%&
G)M04* *0$## K2)";&;K)%%&9RLT W
G94/$.& %4"$0 '&3$G829& $/$40$M0&I.&3#e $/$40$M0&'G$*&#[
bH)$9;&;K7Ud.84#dV G94/$.& %4"$0 Lfg 4.&3#[
bH)$9;&;K7Ud.84#dV G94/$.& 4". G).@2#4.42" ] Ze .$C&@2#4.42" ] Z[

G)M04* K2)";&;K)%%&9U4". *$G$*4.7V W
$/$40$M0&I.&3# ] "&? '&3$G829&UZV[
$/$40$M0&'G$*&# ] "&? '&3$G829&U*$G$*4.7V[
4.&3# ] ULfgV "&? JM`&*.f*$G$*4.7g[
\
G)M04* M220&$" 4#L3G.7UV W
9&.)9" $/$40$M0&I.&3#+$/$40$M0&@&934.#UV ]] Z[
\
G)M04* M220&$" 4#<)00UV W
9&.)9" $/$40$M0&'G$*&#+$/$40$M0&@&934.#UV ]] Z[
\

G)M04* /24; G).UL -V .892?# I".&99)G.&;L-*&G.42" W
$/$40$M0&'G$*&#+$*()49&UV[
;2I"#&9.U-V[
$/$40$M0&I.&3#+9&0&$#&UV[
\
G)M04* L .$C&UV .892?# I".&99)G.&;L-*&G.42" W
$/$40$M0&I.&3#+$*()49&UV[
L 4.&3 ] ;2L-.9$*.UV[
$/$40$M0&'G$*&#+9&0&$#&UV[
9&.)9" 4.&3[
\

G94/$.& #7"*892"4:&; /24; ;2I"#&9.UL -V W
4". 4 ] G).@2#4.42"[
4.&3#f4g ] -[
G).@2#4.42" ] U__4 ]] 4.&3#+0&"6.8VS Z h 4[
\
G94/$.& #7"*892"4:&; L ;2L-.9$*.UV W
4". 4 ] .$C&@2#4.42"[
L - ] 4.&3#f4g[
4.&3#f4g ] ")00[
.$C&@2#4.42" ] U__4 ]] 4.&3#+0&"6.8VS Z h 4[
9&.)9" -[
\
\
@DA@A@A ;"3/' 9&/. J*3.3
1he mosL baslc unlL LesLs for K2)";&;K)%%&9 are slmllar Lo whaL we'd use ln a sequenLlal conLexLcreaLe a bounded
buffer, call lLs meLhods, and asserL posLcondlLlons and lnvarlanLs. Some lnvarlanLs LhaL qulckly come Lo mlnd are LhaL a
freshly creaLed buffer should ldenLlfy lLself as empLy, and also as noL full. A slmllar buL sllghLly more compllcaLed safeLy
LesL ls Lo lnserL n elemenLs lnLo a buffer wlLh capaclLy n (whlch should succeed wlLhouL blocklng), and LesL LhaL Lhe
buffer recognlzes LhaL lL ls full (and noL empLy). !unlL LesL meLhods for Lhese properLles are shown ln LlsLlng 12.2.
2/3./&4 @DADA ;"3/' 9&/. J*3.3 6%) K2)";&;K)%%&9A
*0$## K2)";&;K)%%&9=&#. &-.&";# =&#.D$#& W
/24; .&#.I#L3G.7P8&"D2"#.9)*.&;UV W
K2)";&;K)%%&9RI".&6&9T MM ] "&? K2)";&;K)%%&9RI".&6&9TU^ZV[
$##&9.=9)&UMM+4#L3G.7UVV[
$##&9.<$0#&UMM+4#<)00UVV[
\

/24; .&#.I#<)001%.&9@).#UV .892?# I".&99)G.&;L-*&G.42" W
K2)";&;K)%%&9RI".&6&9T MM ] "&? K2)";&;K)%%&9RI".&6&9TU^ZV[
%29 U4". 4 ] Z[ 4 R ^Z[ 4__V
MM+G).U4V[
$##&9.=9)&UMM+4#<)00UVV[
$##&9.<$0#&UMM+4#L3G.7UVV[
\
\
1hese slmple LesL meLhods are enLlrely sequenLlal. lncludlng a seL of sequenLlal LesLs ln your LesL sulLe ls ofLen helpful,
slnce Lhey can dlsclose when a problem ls noL relaLed Lo concurrency lssues before you sLarL looklng for daLa races.

133 68arL lll: Llveness, erformance, and 1esLlng 248ChapLer 12. 1esLlng ConcurrenL rograms
@DA@ADA J*3./&4 ;?%'</&4 E>*)"./%&3
1esLs of essenLlal concurrency properLles requlre lnLroduclng more Lhan one Lhread. MosL LesLlng frameworks are noL
very concurrencyfrlendly: Lhey rarely lnclude faclllLles Lo creaLe Lhreads or monlLor Lhem Lo ensure LhaL Lhey do noL dle
unexpecLedly. lf a helper Lhread creaLed by a LesL case dlscovers a fallure, Lhe framework usually does noL know wlLh
whlch LesL Lhe Lhread ls assoclaLed, so some work may be requlred Lo relay success or fallure lnformaLlon back Lo Lhe
maln LesL runner Lhread so lL can be reporLed.
lor Lhe conformance LesLs for `$/$+).40+*2"*)99&"., lL was lmporLanL LhaL fallures be clearly assoclaLed wlLh a
speclflc LesL. Pence Lhe !S8 166 LxperL Croup creaLed a base class
[3]
LhaL provlded meLhods Lo relay and reporL fallures
durlng .&$9A2?", followlng Lhe convenLlon LhaL every LesL musL walL unLll all Lhe Lhreads lL creaLed LermlnaLe. ?ou may
noL need Lo go Lo such lengLhs, Lhe key requlremenLs are LhaL lL be clear wheLher Lhe LesLs passed and LhaL fallure
lnformaLlon ls reporLed somewhere for use ln dlagnoslng Lhe problem.
[3] http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/test/tck/JSR166TestCase.java
lf a meLhod ls supposed Lo block under cerLaln condlLlons, Lhen a LesL for LhaL behavlor should succeed only lf Lhe
Lhread does noL proceed. 1esLlng LhaL a meLhod blocks ls slmllar Lo LesLlng LhaL a meLhod Lhrows an excepLlon, lf Lhe
meLhod reLurns normally, Lhe LesL has falled.
1esLlng LhaL a meLhod blocks lnLroduces an addlLlonal compllcaLlon: once Lhe meLhod successfully blocks, you have Lo
convlnce lL somehow Lo unblock. 1he obvlous way Lo do Lhls ls vla lnLerrupLlonsLarL a blocklng acLlvlLy ln a separaLe
Lhread, walL unLll Lhe Lhread blocks, lnLerrupL lL, and Lhen asserL LhaL Lhe blocklng operaLlon compleLed. Cf course, Lhls
requlres your blocklng meLhods Lo respond Lo lnLerrupLlon by reLurnlng early or Lhrowlng I".&99)G.&;L-*&G.42".
1he "walL unLll Lhe Lhread blocks" parL ls easler sald Lhan done, ln pracLlce, you have Lo make an arblLrary declslon
abouL how long Lhe few lnsLrucLlons belng execuLed could posslbly Lake, and walL longer Lhan LhaL. ?ou should be
prepared Lo lncrease Lhls value lf you are wrong (ln whlch case you wlll see spurlous LesL fallures).
LlsLlng 12.3 shows an approach Lo LesLlng blocklng operaLlons. lL creaLes a "Laker" Lhread LhaL aLLempLs Lo .$C& an
elemenL from an empLy buffer. lf .$C& succeeds, lL reglsLers fallure. 1he LesL runner Lhread sLarLs Lhe Laker Lhread, walLs
a long Llme, and Lhen lnLerrupLs lL. lf Lhe Laker Lhread has correcLly blocked ln Lhe .$C& operaLlon, lL wlll Lhrow
I".&99)G.&;L-*&G.42", and Lhe *$.*8 block for Lhls excepLlon LreaLs Lhls as success and leLs Lhe Lhread exlL. 1he maln
LesL runner Lhread Lhen aLLempLs Lo `24" wlLh Lhe Laker Lhread and verlfles LhaL Lhe [oln reLurned successfully by calllng
=89&$;+4#104/&, lf Lhe Laker Lhread responded Lo Lhe lnLerrupL, Lhe `24" should compleLe qulckly.
1he Llmed `24" ensures LhaL Lhe LesL compleLes even lf .$C& geLs sLuck ln some unexpecLed way. 1hls LesL meLhod LesLs
several properLles of .$C&noL only LhaL lL blocks buL LhaL, when lnLerrupLed, lL Lhrows I".&99)G.&;L-*&G.42". 1hls ls
one of Lhe few cases ln whlch lL ls approprlaLe Lo subclass =89&$; expllclLly lnsLead of uslng a N)""$M0& ln a pool: ln
order Lo LesL proper LermlnaLlon wlLh `24". 1he same approach can be used Lo LesL LhaL Lhe Laker Lhread unblocks afLer
an elemenL ls placed ln Lhe queue by Lhe maln Lhread.
lL ls LempLlng Lo use =89&$;+6&.'.$.& Lo verlfy LhaL Lhe Lhread ls acLually blocked on a condlLlon walL, buL Lhls
approach ls noL rellable. 1here ls noLhlng LhaL requlres a blocked Lhread ever Lo enLer Lhe P1I=I,H or =IFLAkP1I=I,H
sLaLes, slnce Lhe !vM can choose Lo lmplemenL blocklng by splnwalLlng lnsLead. Slmllarly, because spurlous wakeups
from JM`&*.+?$4. or D2";4.42"+$?$4. are permlLLed (see ChapLer 14), a Lhread ln Lhe P1I=I,H or =IFLAkP1I=I,H
sLaLe may Lemporarlly LranslLlon Lo N!,,1K5L even lf Lhe condlLlon for whlch lL ls walLlng ls noL yeL Lrue. Lven lgnorlng
Lhese lmplemenLaLlon opLlons, lL may Lake some Llme for Lhe LargeL Lhread Lo seLLle lnLo a blocklng sLaLe. 1he resulL of
=89&$;+6&.'.$.& should noL be used for concurrency conLrol, and ls of llmlLed usefulness for LesLlng lLs prlmary uLlllLy
ls as a source of debugglng lnformaLlon.

136 !ava Concurrency ln racLlce
2/3./&4 @DALA J*3./&4 ;?%'</&4 "&0 N*3>%&3/#*&*33 .% ,&.*))(>./%&A
/24; .&#.=$C&K02*C#P8&"L3G.7UV W
%4"$0 K2)";&;K)%%&9RI".&6&9T MM ] "&? K2)";&;K)%%&9RI".&6&9TU^ZV[
=89&$; .$C&9 ] "&? =89&$;UV W
G)M04* /24; 9)"UV W
.97 W
4". )")#&; ] MM+.$C&UV[
%$40UV[ XX 4% ?& 6&. 8&9&e 4.m# $" &9929
\ *$.*8 UI".&99)G.&;L-*&G.42" #)**&##V W \
\\[
.97 W
.$C&9+#.$9.UV[
=89&$;+#0&&GU5JDl!@kAL=LD=k=IFLJ!=V[
.$C&9+4".&99)G.UV[
.$C&9+`24"U5JDl!@kAL=LD=k=IFLJ!=V[
$##&9.<$0#&U.$C&9+4#104/&UVV[
\ *$.*8 UL-*&G.42" )"&-G&*.&;V W
%$40UV[
\
\
@DA@ALA J*3./&4 C"6*.+
1he LesLs ln LlsLlngs 12.2 and 12.3 LesL lmporLanL properLles of Lhe bounded buffer, buL are unllkely Lo dlsclose errors
sLemmlng from daLa races. 1o LesL LhaL a concurrenL class performs correcLly under unpredlcLable concurrenL access, we
need Lo seL up mulLlple Lhreads performlng G). and .$C& operaLlons over some amounL of Llme and Lhen somehow LesL
LhaL noLhlng wenL wrong.
ConsLrucLlng LesLs Lo dlsclose safeLy errors ln concurrenL classes ls a chlckenandegg problem: Lhe LesL programs
Lhemselves are concurrenL programs. ueveloplng good concurrenL LesLs can be more dlfflculL Lhan developlng Lhe
classes Lhey LesL.
1he challenge Lo consLrucLlng effecLlve safeLy LesLs for concurrenL classes ls ldenLlfylng easlly checked properLles LhaL
wlll, wlLh hlgh probablllLy, fall lf someLhlng goes wrong, whlle aL Lhe same Llme noL leLLlng Lhe fallureaudlLlng code llmlL
concurrency arLlflclally. lL ls besL lf checklng Lhe LesL properLy does noL requlre any synchronlzaLlon.
Cne approach LhaL works well wlLh classes used ln producerconsumer deslgns (llke K2)";&;K)%%&9) ls Lo check LhaL
everyLhlng puL lnLo a queue or buffer comes ouL of lL, and LhaL noLhlng else does. A nalve lmplemenLaLlon of Lhls
approach would lnserL Lhe elemenL lnLo a "shadow" llsL when lL ls puL on Lhe queue, remove lL from Lhe llsL when lL ls
removed from Lhe queue, and asserL LhaL Lhe shadow llsL ls empLy when Lhe LesL has flnlshed. 8uL Lhls approach would
dlsLorL Lhe schedullng of Lhe LesL Lhreads because modlfylng Lhe shadow llsL would requlre synchronlzaLlon and posslbly
blocklng.
A beLLer approach ls Lo compuLe checksums of Lhe elemenLs LhaL are enqueued and dequeued uslng an ordersenslLlve
checksum funcLlon, and compare Lhem. lf Lhey maLch, Lhe LesL passes. 1hls approach works besL when Lhere ls a slngle
producer puLLlng elemenLs lnLo Lhe buffer and a slngle consumer Laklng Lhem ouL, because lL can LesL noL only LhaL Lhe
rlghL elemenLs (probably) came ouL buL LhaL Lhey came ouL ln Lhe rlghL order.
LxLendlng Lhls approach Lo a mulLlpleproducer, mulLlpleconsumer slLuaLlon requlres uslng a checksum funcLlon LhaL ls
lnsenslLlve Lo Lhe order ln whlch Lhe elemenLs are comblned, so LhaL mulLlple checksums can be comblned afLer Lhe
LesL. CLherwlse, synchronlzlng access Lo a shared checksum fleld could become a concurrency boLLleneck or dlsLorL Lhe
Llmlng of Lhe LesL. (Any commuLaLlve operaLlon, such as addlLlon or xC8, meeLs Lhese requlremenLs.)
1o ensure LhaL your LesL acLually LesLs whaL you Lhlnk lL does, lL ls lmporLanL LhaL Lhe checksums Lhemselves noL be
guessable by Lhe compller. lL would be a bad ldea Lo use consecuLlve lnLegers as your LesL daLa because Lhen Lhe resulL
would always be Lhe same, and a smarL compller could concelvably [usL precompuLe lL.
1o avold Lhls problem, LesL daLa should be generaLed randomly, buL many oLherwlse effecLlve LesLs are compromlsed by
a poor cholce of random number generaLor (8nC). 8andom number generaLlon can creaLe coupllngs beLween classes
and Llmlng arLlfacLs because mosL random number generaLor classes are Lhreadsafe and Lherefore lnLroduce addlLlonal
synchronlzaLlon.
[4]
Clvlng each Lhread lLs own 8nC allows a nonLhreadsafe 8nC Lo be used.
[4] Many benchmarks are, unbeknownsL Lo Lhelr developers or users, slmply LesLs of how greaL a concurrency boLLleneck Lhe 8nC ls.
8aLher Lhan uslng a generalpurpose 8nC, lL ls beLLer Lo use slmple pseudorandom funcLlons. ?ou don'L need hlgh
quallLy randomness, all you need ls enough randomness Lo ensure Lhe numbers change from run Lo run. 1he -29a'84%.
funcLlon ln LlsLlng 12.4 (Marsaglla, 2003) ls among Lhe cheapesL medlumquallLy random number funcLlons. SLarLlng lL

137 68arL lll: Llveness, erformance, and 1esLlng 248ChapLer 12. 1esLlng ConcurrenL rograms
off wlLh values based on 8$#8D2;& and "$"2=43& makes Lhe sums boLh unguessable and almosL always dlfferenL for
each run.
2/3./&4 @DAMA K*0/(5Q("?/.+ N"&0%5 P(5Y*) R*&*)".%) C(/."Y?* 6%) J*3./&4A
#.$.4* 4". -29'84%.U4". 7V W
7 {] U7 RR zV[
7 {] U7 TTT j^V[
7 {] U7 RR vV[
9&.)9" 7[
\
@).=$C&=&#. ln LlsLlngs 12.3 and 12.6 sLarLs n producer Lhreads LhaL generaLe elemenLs and enqueue Lhem, and n
consumer Lhreads LhaL dequeue Lhem. Lach Lhread updaLes Lhe checksum of Lhe elemenLs as Lhey go ln or ouL, uslng a
per Lhread checksum LhaL ls comblned aL Lhe end of Lhe LesL run so as Lo add no more synchronlzaLlon or conLenLlon
Lhan requlred Lo LesL Lhe buffer.
uependlng on your plaLform, creaLlng and sLarLlng a Lhread can be a moderaLely heavywelghL operaLlon. lf your Lhread
ls shorLrunnlng and you sLarL a number of Lhreads ln a loop, Lhe Lhreads run sequenLlally raLher Lhan concurrenLly ln
Lhe worsL case. Lven ln Lhe noLqulLeworsL case, Lhe facL LhaL Lhe flrsL Lhread has a head sLarL on Lhe oLhers means LhaL
you may geL fewer lnLerleavlngs Lhan expecLed: Lhe flrsL Lhread runs by lLself for some amounL of Llme, and Lhen Lhe
flrsL Lwo Lhreads run concurrenLly for some amounL of Llme, and only evenLually are all Lhe Lhreads runnlng
concurrenLly. (1he same Lhlng happens aL Lhe end of Lhe run: Lhe Lhreads LhaL goL a head sLarL also flnlsh early.)
We presenLed a Lechnlque for mlLlgaLlng Lhls problem ln SecLlon 3.3.1, uslng a D2)".A2?"5$.*8 as a sLarLlng gaLe and
anoLher as a flnlsh gaLe. AnoLher way Lo geL Lhe same effecL ls Lo use a D7*04*K$994&9, lnlLlallzed wlLh Lhe number of
worker Lhreads plus one, and have Lhe worker Lhreads and Lhe LesL drlver walL aL Lhe barrler aL Lhe beglnnlng and end of
Lhelr run. 1hls ensures LhaL all Lhreads are up and runnlng before any sLarL worklng. @).=$C&=&#. uses Lhls Lechnlque Lo
coordlnaLe sLarLlng and sLopplng Lhe worker Lhreads, creaLlng more poLenLlal concurrenL lnLerleavlngs. We sLlll can'L
guaranLee LhaL Lhe scheduler won'L run each Lhread Lo compleLlon sequenLlally, buL maklng Lhe runs long enough
reduces Lhe exLenL Lo whlch schedullng dlsLorLs our resulLs.
1he flnal Lrlck employed by @).=$C&=&#. ls Lo use a deLermlnlsLlc LermlnaLlon crlLerlon so LhaL no addlLlonal lnLer
Lhread coordlnaLlon ls needed Lo flgure ouL when Lhe LesL ls flnlshed. 1he .&#. meLhod sLarLs exacLly as many producers
as consumers and each of Lhem G).s or .$C&s Lhe same number of elemenLs, so Lhe LoLal number of lLems added and
removed ls Lhe same.
1esLs llke @).=$C&=&#. Lend Lo be good aL flndlng safeLy vlolaLlons. lor example, a common error ln lmplemenLlng
semaphoreconLrolled buffers ls Lo forgeL LhaL Lhe code acLually dolng Lhe lnserLlon and exLracLlon requlres muLual
excluslon (uslng #7"*892"4:&; or N&&".9$".52*C). A sample run of @).=$C&=&#. wlLh a verslon of K2)";&;K)%%&9 LhaL
omlLs maklng ;2I"#&9. and ;2L-.9$*. #7"*892"4:&; falls falrly qulckly. 8unnlng @).=$C&=&#. wlLh a few dozen
Lhreads lLeraLlng a few mllllon Llmes on buffers of varlous capaclLy on varlous sysLems lncreases our confldence abouL
Lhe lack of daLa corrupLlon ln G). and .$C&.
1esLs should be run on mulLlprocessor sysLems Lo lncrease Lhe dlverslLy of poLenLlal lnLerleavlngs. Powever, havlng
more Lhan a few Cus does noL necessarlly make LesLs more effecLlve. 1o maxlmlze Lhe chance of deLecLlng Llmlng
senslLlve daLa races, Lhere should be more acLlve Lhreads Lhan Cus, so LhaL aL any glven Llme some Lhreads are runnlng
and some are swlLched ouL, Lhus reduclng Lhe predlcLablllLy of lnLeracLlons beLween Lhreads.

138 !ava Concurrency ln racLlce
2/3./&4 @DAVA -)%0('*)'%&3(5*) J*3. -)%4)"5 6%) K2)";&;K)%%&9A
G)M04* *0$## @).=$C&=&#. W
G94/$.& #.$.4* %4"$0 L-&*).29'&9/4*& G220
] L-&*).29#+"&?D$*8&;=89&$;@220UV[
G94/$.& %4"$0 1.234*I".&6&9 G).')3 ] "&? 1.234*I".&6&9UZV[
G94/$.& %4"$0 1.234*I".&6&9 .$C&')3 ] "&? 1.234*I".&6&9UZV[
G94/$.& %4"$0 D7*04*K$994&9 M$994&9[
G94/$.& %4"$0 K2)";&;K)%%&9RI".&6&9T MM[
G94/$.& %4"$0 4". "=94$0#e "@$49#[

G)M04* #.$.4* /24; 3$4"U'.94"6fg $96#V W
"&? @).=$C&=&#.U^Ze ^Ze ^ZZZZZV+.&#.UV[ XX #$3G0& G$9$3&.&9#
G220+#8).;2?"UV[
\

@).=$C&=&#.U4". *$G$*4.7e 4". "G$49#e 4". ".94$0#V W
.84#+MM ] "&? K2)";&;K)%%&9RI".&6&9TU*$G$*4.7V[
.84#+"=94$0# ] ".94$0#[
.84#+"@$49# ] "G$49#[
.84#+M$994&9 ] "&? D7*04*K$994&9U"G$49#c j _ ^V[
\

/24; .&#.UV W
.97 W
%29 U4". 4 ] Z[ 4 R "@$49#[ 4__V W
G220+&-&*).&U"&? @92;)*&9UVV[
G220+&-&*).&U"&? D2"#)3&9UVV[
\
M$994&9+$?$4.UV[ XX ?$4. %29 $00 .89&$;# .2 M& 9&$;7
M$994&9+$?$4.UV[ XX ?$4. %29 $00 .89&$;# .2 %4"4#8
$##&9.L()$0#UG).')3+6&.UVe .$C&')3+6&.UVV[
\ *$.*8 UL-*&G.42" &V W
.892? "&? N)".43&L-*&G.42"U&V[
\
\

*0$## @92;)*&9 43G0&3&".# N)""$M0& W Xc 54#.4"6 ^j+z cX \

*0$## D2"#)3&9 43G0&3&".# N)""$M0& W Xc 54#.4"6 ^j+z cX \
\
2/3./&4 @DAWA -)%0('*) "&0 $%&3(5*) $?"33*3 93*0 /& @).=$C&=&#.A
Xc 4""&9 *0$##&# 2% @).=$C&=&#. U54#.4"6 ^j+pV cX
*0$## @92;)*&9 43G0&3&".# N)""$M0& W
G)M04* /24; 9)"UV W
.97 W
4". #&&; ] U.84#+8$#8D2;&UV { U4".V'7#.&3+"$"2=43&UVV[
4". #)3 ] Z[
M$994&9+$?$4.UV[
%29 U4". 4 ] "=94$0#[ 4 T Z[ aa4V W
MM+G).U#&&;V[
#)3 _] #&&;[
#&&; ] -29'84%.U#&&;V[
\
G).')3+6&.1";1;;U#)3V[
M$994&9+$?$4.UV[
\ *$.*8 UL-*&G.42" &V W
.892? "&? N)".43&L-*&G.42"U&V[
\
\
\

*0$## D2"#)3&9 43G0&3&".# N)""$M0& W
G)M04* /24; 9)"UV W
.97 W
M$994&9+$?$4.UV[
4". #)3 ] Z[
%29 U4". 4 ] "=94$0#[ 4 T Z[ aa4V W
#)3 _] MM+.$C&UV[
\
.$C&')3+6&.1";1;;U#)3V[
M$994&9+$?$4.UV[
\ *$.*8 UL-*&G.42" &V W
.892? "&? N)".43&L-*&G.42"U&V[
\
\
\


139 68arL lll: Llveness, erformance, and 1esLlng 248ChapLer 12. 1esLlng ConcurrenL rograms
ln LesLs LhaL run unLll Lhey compleLe a flxed number of operaLlons, lL ls posslble LhaL Lhe LesL case wlll never flnlsh lf Lhe
code belng LesLed encounLers an excepLlon due Lo a bug. 1he mosL common way Lo handle Lhls ls Lo have Lhe LesL
framework aborL LesLs LhaL do noL LermlnaLe wlLhln a cerLaln amounL of Llme, how long Lo walL should be deLermlned
emplrlcally, and fallures musL Lhen be analyzed Lo ensure LhaL Lhe problem wasn'L [usL LhaL you dldn'L walL long enough.
(1hls problem ls noL unlque Lo LesLlng concurrenL classes, sequenLlal LesLs musL also dlsLlngulsh beLween longrunnlng
and lnflnlLe loops.)
@DA@AMA J*3./&4 N*3%()'* K"&"4*5*&.
1he LesLs so far have been concerned wlLh a class's adherence Lo lLs speclflcaLlon LhaL lL does whaL lL ls supposed Lo do.
A secondary aspecL Lo LesL ls LhaL lL does noL do Lhlngs lL ls noL supposed Lo do, such as leak resources. Any ob[ecL LhaL
holds or manages oLher ob[ecLs should noL conLlnue Lo malnLaln references Lo Lhose ob[ecLs longer Lhan necessary. Such
sLorage leaks prevenL garbage collecLors from reclalmlng memory (or Lhreads, flle handles, sockeLs, daLabase
connecLlons, or oLher llmlLed resources) and can lead Lo resource exhausLlon and appllcaLlon fallure.
8esource managemenL lssues are especlally lmporLanL for classes llke K2)";&;K)%%&9Lhe enLlre reason for boundlng a
buffer ls Lo prevenL appllcaLlon fallure due Lo resource exhausLlon when producers geL Loo far ahead of consumers.
8oundlng causes overly producLlve producers Lo block raLher Lhan conLlnue Lo creaLe work LhaL wlll consume more and
more memory or oLher resources.
undeslrable memory reLenLlon can be easlly LesLed wlLh heaplnspecLlon Lools LhaL measure appllcaLlon memory usage,
a varleLy of commerclal and opensource heapproflllng Lools can do Lhls. 1he .&#.5&$C meLhod ln LlsLlng 12.7 conLalns
placeholders for a heaplnspecLlon Lool Lo snapshoL Lhe heap, whlch forces a garbage collecLlon
[3]
and Lhen records
lnformaLlon abouL Lhe heap slze and memory usage.
[3] 1echnlcally, lL ls lmposslble Lo force a garbage collecLlon, '7#.&3+6* only suggesLs Lo Lhe !vM LhaL Lhls mlghL be a good Llme Lo perform a
garbage collecLlon. PoLSpoL can be lnsLrucLed Lo lgnore '7#.&3+6* calls wlLh aooh_A4#$M0&L-G04*4.HD.
1he .&#.5&$C meLhod lnserLs several large ob[ecLs lnLo a bounded buffer and Lhen removes Lhem, memory usage aL
heap snapshoL #2 should be approxlmaLely Lhe same as aL heap snapshoL #1. Cn Lhe oLher hand, lf ;2L-.9$*. forgoL Lo
null ouL Lhe reference Lo Lhe reLurned elemenL (4.&3#f4g]")00), Lhe reporLed memory usage aL Lhe Lwo snapshoLs
would deflnlLely noL be Lhe same. (1hls ls one of Lhe few Llmes where expllclL nulllng ls necessary, mosL of Lhe Llme, lL ls
elLher noL helpful or acLually harmful [L! lLem 3].)
@DA@AVA 93/&4 $"??Y"'<3
Callbacks Lo cllenLprovlded code can be helpful ln consLrucLlng LesL cases, callbacks are ofLen made aL known polnLs ln
an ob[ecL's llfecycle LhaL are good opporLunlLles Lo asserL lnvarlanLs. lor example, =89&$;@220L-&*).29 makes calls Lo
Lhe Lask N)""$M0&s and Lo Lhe =89&$;<$*.297.
2/3./&4 @DAZA J*3./&4 6%) N*3%()'* 2*"<3A
*0$## K46 W ;2)M0&fg ;$.$ ] "&? ;2)M0&f^ZZZZZg[ \

/24; .&#.5&$CUV .892?# I".&99)G.&;L-*&G.42" W
K2)";&;K)%%&9RK46T MM ] "&? K2)";&;K)%%&9RK46TUD1@1DI=rV[
4". 8&$G'4:&^ ] Xc #"$G#82. 8&$G cX [
%29 U4". 4 ] Z[ 4 R D1@1DI=r[ 4__V
MM+G).U"&? K46UVV[
%29 U4". 4 ] Z[ 4 R D1@1DI=r[ 4__V
MM+.$C&UV[
4". 8&$G'4:&j ] Xc #"$G#82. 8&$G cX [
$##&9.=9)&UF$.8+$M#U8&$G'4:&^a8&$G'4:&jV R =ENL'EJ5AV[
\
1esLlng a Lhread pool lnvolves LesLlng a number of elemenLs of execuLlon pollcy: LhaL addlLlonal Lhreads are creaLed
when Lhey are supposed Lo, buL noL when Lhey are noL supposed Lo, LhaL ldle Lhreads geL reaped when Lhey are
supposed Lo, eLc. ConsLrucLlng a comprehenslve LesL sulLe LhaL covers all Lhe posslblllLles ls a ma[or efforL, buL many of
Lhem can be LesLed falrly slmply lndlvldually.
We can lnsLrumenL Lhread creaLlon by uslng a cusLom Lhread facLory. =&#.4"6=89&$;<$*.297 ln LlsLlng 12.8 malnLalns a
counL of creaLed Lhreads, LesL cases can Lhen verlfy Lhe number of Lhreads creaLed durlng a LesL run.
=&#.4"6=89&$;<$*.297 could be exLended Lo reLurn a cusLom =89&$; LhaL also records when Lhe Lhread LermlnaLes, so
LhaL LesL cases can verlfy LhaL Lhreads are reaped ln accordance wlLh Lhe execuLlon pollcy.

160 !ava Concurrency ln racLlce
2/3./&4 @DA\A J:)*"0 S"'.%)+ 6%) J*3./&4 =89&$;@220L-&*).29A
*0$## =&#.4"6=89&$;<$*.297 43G0&3&".# =89&$;<$*.297 W
G)M04* %4"$0 1.234*I".&6&9 ")3D9&$.&; ] "&? 1.234*I".&6&9UV[
G94/$.& %4"$0 =89&$;<$*.297 %$*.297
] L-&*).29#+;&%$)0.=89&$;<$*.297UV[

G)M04* =89&$; "&?=89&$;UN)""$M0& 9V W
")3D9&$.&;+4"*9&3&".1";H&.UV[
9&.)9" %$*.297+"&?=89&$;U9V[
\
\
lf Lhe core pool slze ls smaller Lhan Lhe maxlmum slze, Lhe Lhread pool should grow as demand for execuLlon lncreases.
SubmlLLlng longrunnlng Lasks Lo Lhe pool makes Lhe number of execuLlng Lasks sLay consLanL for long enough Lo make a
few asserLlons, such as LesLlng LhaL Lhe pool ls expanded as expecLed, as shown ln LlsLlng 12.9.
2/3./&4 @DA^A J*3. K*.:%0 .% H*)/6+ J:)*"0 -%%? =1>"&3/%&A
G)M04* /24; .&#.@220L-G$"#42"UV .892?# I".&99)G.&;L-*&G.42" W
4". F1ok'IuL ] ^Z[
L-&*).29'&9/4*& &-&* ] L-&*).29#+"&?<4-&;=89&$;@220UF1ok'IuLV[

%29 U4". 4 ] Z[ 4 R ^Zc F1ok'IuL[ 4__V
&-&*+&-&*).&U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W
.97 W
=89&$;+#0&&GU52"6+F1okB15!LV[
\ *$.*8 UI".&99)G.&;L-*&G.42" &V W
=89&$;+*)99&".=89&$;UV+4".&99)G.UV[
\
\
\V[
%29 U4". 4 ] Z[
4 R jZ qq .89&$;<$*.297+")3D9&$.&;+6&.UV R F1ok'IuL[
4__V
=89&$;+#0&&GU^ZZV[
$##&9.L()$0#U.89&$;<$*.297+")3D9&$.&;+6&.UVe F1ok'IuLV[
&-&*+#8).;2?",2?UV[
\
@DA@AWA R*&*)"./&4 K%)* ,&.*)?*"#/&43
Slnce many of Lhe poLenLlal fallures ln concurrenL code are lowprobablllLy evenLs, LesLlng for concurrency errors ls a
numbers game, buL Lhere are some Lhlngs you can do Lo lmprove your chances. We've already menLloned how runnlng
on mulLlprocessor sysLems wlLh fewer processors Lhan acLlve Lhreads can generaLe more lnLerleavlngs Lhan elLher a
slngleprocessor sysLem or one wlLh many processors. Slmllarly, LesLlng on a varleLy of sysLems wlLh dlfferenL processor
counLs, operaLlng sysLems, and processor archlLecLures can dlsclose problems LhaL mlghL noL occur on all sysLems.
A useful Lrlck for lncreaslng Lhe number of lnLerleavlngs, and Lherefore more effecLlvely explorlng Lhe sLaLe space of
your programs, ls Lo use =89&$;+74&0; Lo encourage more conLexL swlLches durlng operaLlons LhaL access shared sLaLe.
(1he effecLlveness of Lhls Lechnlque ls plaLformspeclflc, slnce Lhe !vM ls free Lo LreaL =89&$;+74&0; as a noop [!LS
17.9], uslng a shorL buL nonzero sleep would be slower buL more rellable.) 1he meLhod ln LlsLlng 12.10 Lransfers credlLs
from one accounL Lo anoLher, beLween Lhe Lwo updaLe operaLlons, lnvarlanLs such as "sum of all accounLs equals zero"
do noL hold. 8y someLlmes yleldlng ln Lhe mlddle of an operaLlon, you may acLlvaLe LlmlngsenslLlve bugs ln code LhaL
does noL use adequaLe synchronlzaLlon Lo access sLaLe. 1he lnconvenlence of addlng Lhese calls for LesLlng and removlng
Lhem for producLlon can be reduced by addlng Lhem uslng aspecLorlenLed programmlng (AC) Lools.
2/3./&4 @DA@_A 93/&4 =89&$;+74&0; .% R*&*)".* K%)* ,&.*)?*"#/&43A
G)M04* #7"*892"4:&; /24; .9$"#%&9D9&;4.#U1**2)". %923e
1**2)". .2e
4". $32)".V W
%923+#&.K$0$"*&U%923+6&.K$0$"*&UV a $32)".V[
4% U9$";23+"&-.I".U^ZZZV T =ENL'EJ5AV
=89&$;+74&0;UV[
.2+#&.K$0$"*&U.2+6&.K$0$"*&UV _ $32)".V[
\
@DADA J*3./&4 6%) -*)6%)5"&'*
erformance LesLs are ofLen exLended verslons of funcLlonallLy LesLs. ln facL, lL ls almosL always worLhwhlle Lo lnclude
some baslc funcLlonallLy LesLlng wlLhln performance LesLs Lo ensure LhaL you are noL LesLlng Lhe performance of broken
code.

161 68arL lll: Llveness, erformance, and 1esLlng 248ChapLer 12. 1esLlng ConcurrenL rograms
Whlle Lhere ls deflnlLely overlap beLween performance and funcLlonallLy LesLs, Lhey have dlfferenL goals. erformance
LesLs seek Lo measure endLoend performance meLrlcs for represenLaLlve use cases. lcklng a reasonable seL of usage
scenarlos ls noL always easy, ldeally, LesLs should reflecL how Lhe ob[ecLs belng LesLed are acLually used ln your
appllcaLlon.
ln some cases an approprlaLe LesL scenarlo ls obvlous. 8ounded buffers are nearly always used ln producerconsumer
deslgns, so lL ls senslble Lo measure Lhe LhroughpuL of producers feedlng daLa Lo consumers. We can easlly exLend
@).=$C&=&#. Lo become a performance LesL for Lhls scenarlo.
A common secondary goal of performance LesLlng ls Lo selecL slzlngs emplrlcally for varlous boundsnumbers of Lhreads,
buffer capaclLles, and so on. Whlle Lhese values mlghL Lurn ouL Lo be senslLlve enough Lo plaLform characLerlsLlcs (such
as processor Lype or even processor sLepplng level, number of Cus, or memory slze) Lo requlre dynamlc conflguraLlon,
lL ls equally common LhaL reasonable cholces for Lhese values work well across a wlde range of sysLems.
@DADA@A =1.*&0/&4 -(.J"<*J*3. .% F00 J/5/&4
1he prlmary exLenslon we have Lo make Lo @).=$C&=&#. ls Lo measure Lhe Llme Laken for a run. 8aLher Lhan aLLempLlng
Lo measure Lhe Llme for a slngle operaLlon, we geL a more accuraLe measure by Llmlng Lhe enLlre run and dlvldlng by Lhe
number of operaLlons Lo geL a peroperaLlon Llme. We are already uslng a D7*04*K$994&9 Lo sLarL and sLop Lhe worker
Lhreads, so we can exLend Lhls by uslng a barrler acLlon LhaL measures Lhe sLarL and end Llme, as shown ln LlsLlng 12.11.
We can modlfy Lhe lnlLlallzaLlon of Lhe barrler Lo use Lhls barrler acLlon by uslng Lhe consLrucLor for D7*04*K$994&9 LhaL
accepLs a barrler acLlon:
2/3./&4 @DA@@A ;"))/*)Y"3*0 J/5*)A
.84#+.43&9 ] "&? K$994&9=43&9UV[
.84#+M$994&9 ] "&? D7*04*K$994&9U"G$49# c j _ ^e .43&9V[
G)M04* *0$## K$994&9=43&9 43G0&3&".# N)""$M0& W
G94/$.& M220&$" #.$9.&;[
G94/$.& 02"6 #.$9.=43&e &";=43&[

G)M04* #7"*892"4:&; /24; 9)"UV W
02"6 . ] '7#.&3+"$"2=43&UV[
4% UY#.$9.&;V W
#.$9.&; ] .9)&[
#.$9.=43& ] .[
\ &0#&
&";=43& ] .[
\
G)M04* #7"*892"4:&; /24; *0&$9UV W
#.$9.&; ] %$0#&[
\
G)M04* #7"*892"4:&; 02"6 6&.=43&UV W
9&.)9" &";=43& a #.$9.=43&[
\
\
1he modlfled .&#. meLhod uslng Lhe barrlerbased Llmer ls shown ln LlsLlng 12.12.
We can learn several Lhlngs from runnlng =43&;@).=$C&=&#.. Cne ls Lhe LhroughpuL of Lhe producerconsumer handoff
operaLlon for varlous comblnaLlons of parameLers, anoLher ls how Lhe bounded buffer scales wlLh dlfferenL numbers of
Lhreads, a Lhlrd ls how we mlghL selecL Lhe bound slze. Answerlng Lhese quesLlons requlres runnlng Lhe LesL for varlous
comblnaLlons of parameLers, so we'll need amaln LesL drlver, shown ln LlsLlng 12.13.
llgure 12.1 shows some sample resulLs on a 4way machlne, uslng buffer capaclLles of 1, 10, 100, and 1000. We see
lmmedlaLely LhaL a buffer slze of one causes very poor LhroughpuL, Lhls ls because each Lhread can make only a Llny blL
of progress before blocklng and walLlng for anoLher Lhread. lncreaslng buffer slze Lo Len helps dramaLlcally, buL
lncreases pasL Len offer dlmlnlshlng reLurns.

162 !ava Concurrency ln racLlce
S/4()* @DA@A =43&;@).=$C&=&#. 8/.: H")/%(3 ;(66*) $">"'/./*3A


lL may be somewhaL puzzllng aL flrsL LhaL addlng a loL more Lhreads degrades performance only sllghLly. 1he reason ls
hard Lo see from Lhe daLa, buL easy Lo see on a Cu performance meLer such as G&9%M$9 whlle Lhe LesL ls runnlng: even
wlLh many Lhreads, noL much compuLaLlon ls golng on, and mosL of lL ls spenL blocklng and unblocklng Lhreads. So Lhere
ls plenLy of Cu slack for more Lhreads Lo do Lhe same Lhlng wlLhouL hurLlng performance very much.
Powever, be careful abouL concludlng from Lhls daLa LhaL you can always add more Lhreads Lo a producerconsumer
program LhaL uses a bounded buffer. 1hls LesL ls falrly arLlflclal ln how lL slmulaLes Lhe appllcaLlon, Lhe producers do
almosL no work Lo generaLe Lhe lLem placed on Lhe queue, and Lhe consumers do almosL no work wlLh Lhe lLem
reLrleved. lf Lhe worker Lhreads ln a real producerconsumer appllcaLlon do some nonLrlvlal work Lo produce and
consume lLems (as ls generally Lhe case), Lhen Lhls slack would dlsappear and Lhe effecLs of havlng Loo many Lhreads
could be very noLlceable. 1he prlmary purpose of Lhls LesL ls Lo measure whaL consLralnLs Lhe producerconsumer
handoff vla Lhe bounded buffer lmposes on overall LhroughpuL.
2/3./&4 @DA@DA J*3./&4 8/.: " ;"))/*)Y"3*0 J/5*)A
G)M04* /24; .&#.UV W
.97 W
.43&9+*0&$9UV[
%29 U4". 4 ] Z[ 4 R "@$49#[ 4__V W
G220+&-&*).&U"&? @92;)*&9UVV[
G220+&-&*).&U"&? D2"#)3&9UVV[
\
M$994&9+$?$4.UV[
M$994&9+$?$4.UV[
02"6 "#@&9I.&3 ] .43&9+6&.=43&UV X U"@$49#c U02"6V"=94$0#V[
'7#.&3+2).+G94".Ud=892)68G).h d _ "#@&9I.&3 _ d "#X4.&3dV[
$##&9.L()$0#UG).')3+6&.UVe .$C&')3+6&.UVV[
\ *$.*8 UL-*&G.42" &V W
.892? "&? N)".43&L-*&G.42"U&V[
\
\


163 68arL lll: Llveness, erformance, and 1esLlng 248ChapLer 12. 1esLlng ConcurrenL rograms
2/3./&4 @DA@LA [)/#*) -)%4)"5 6%) =43&;@).=$C&=&#.A
G)M04* #.$.4* /24; 3$4"U'.94"6fg $96#V .892?# L-*&G.42" W
4". .G. ] ^ZZZZZ[ XX .94$0# G&9 .89&$;
%29 U4". *$G ] ^[ *$G R] ^ZZZ[ *$Gc] ^ZV W
'7#.&3+2).+G94".0"UdD$G$*4.7h d _ *$GV[
%29 U4". G$49# ] ^[ G$49# R] ^jt[ G$49#c] jV W
=43&;@).=$C&=&#. . ]
"&? =43&;@).=$C&=&#.U*$Ge G$49#e .G.V[
'7#.&3+2).+G94".Ud@$49#h d _ G$49# _ dy.dV[
.+.&#.UV[
'7#.&3+2).+G94".Udy.dV[
=89&$;+#0&&GU^ZZZV[
.+.&#.UV[
'7#.&3+2).+G94".0"UV[
=89&$;+#0&&GU^ZZZV[
\
\
G220+#8).;2?"UV[
\
@DADADA $%5>")/&4 K(?./>?* F?4%)/.:53
Whlle K2)";&;K)%%&9 ls a falrly solld lmplemenLaLlon LhaL performs reasonably well, lL Lurns ouL Lo be no maLch for
elLher 199$7K02*C4"6O)&)& or 54"C&;K02*C4"6O)&)& (whlch explalns why Lhls buffer algorlLhm wasn'L selecLed for
lncluslon ln Lhe class llbrary). 1he `$/$+).40+*2"*)99&". algorlLhms have been selecLed and Luned, ln parL uslng LesLs
[usL llke Lhose descrlbed here, Lo be as efflclenL as we know how Lo make Lhem, whlle sLlll offerlng a wlde range of
funcLlonallLy.
[6]
1he maln reason K2)";&;K)%%&9 fares poorly ls LhaL G). and .$C& each have mulLlple operaLlons LhaL
could encounLer conLenLlon acqulre a semaphore, acqulre a lock, release a semaphore. CLher lmplemenLaLlon
approaches have fewer polnLs aL whlch Lhey mlghL conLend wlLh anoLher Lhread.
[6] ?ou mlghL be able Lo ouLperform Lhem lf you boLh are a concurrency experL and can glve up some of Lhe provlded funcLlonallLy.
llgure 12.2 shows comparaLlve LhroughpuL on a dual hyperLhreaded machlne for all Lhree classes wlLh 236elemenL
buffers, uslng a varlanL of =43&;@).=$C&=&#.. 1hls LesL suggesLs LhaL 54"C&;K02*C4"6O)&)& scales beLLer Lhan
199$7K02*C4"6O)&)&. 1hls may seem odd aL flrsL: a llnked queue musL allocaLe a llnk node ob[ecL for each lnserLlon,
and hence seems Lo be dolng more work Lhan Lhe arraybased queue. Powever, even Lhough lL has more allocaLlon and
CC overhead, a llnked queue allows more concurrenL access by G).s and .$C&s Lhan an arraybased queue because Lhe
besL llnked queue algorlLhms allow Lhe head and Lall Lo be updaLed lndependenLly. 8ecause allocaLlon ls usually
Lhreadlocal, algorlLhms LhaL can reduce conLenLlon by dolng more allocaLlon usually scale beLLer. (1hls ls anoLher
lnsLance ln whlch lnLulLlon based on LradlLlonal performance Lunlng runs counLer Lo whaL ls needed for scalablllLy.)
S/4()* @DADA $%5>")/&4 ;?%'</&4 a(*(* ,5>?*5*&."./%&3A

@DADALA K*"3()/&4 N*3>%&3/#*&*33
So far we have focused on measurlng LhroughpuL, whlch ls usually Lhe mosL lmporLanL performance meLrlc for
concurrenL programs. 8uL someLlmes lL ls more lmporLanL Lo know how long an lndlvldual acLlon mlghL Lake Lo
compleLe, and ln Lhls case we wanL Lo measure Lhe varlance of servlce Llme. SomeLlmes lL makes sense Lo allow a longer
average servlce Llme lf lL leLs us obLaln a smaller varlance, predlcLablllLy ls a valuable performance characLerlsLlc Loo.
Measurlng varlance allows us Lo esLlmaLe Lhe answers Lo quallLyofservlce quesLlons llke "WhaL percenLage of
operaLlons wlll succeed ln under 100 mllllseconds?"

164 !ava Concurrency ln racLlce
PlsLograms of Lask compleLlon Llmes are normally Lhe besL way Lo vlsuallze varlance ln servlce Llme. varlances are only
sllghLly more dlfflculL Lo measure Lhan averages you need Lo keep Lrack of perLask compleLlon Llmes ln addlLlon Lo
aggregaLe compleLlon Llme. Slnce Llmer granularlLy can be a facLor ln measurlng lndlvldual Lask Llme (an lndlvldual Lask
may Lake less Lhan or close Lo Lhe smallesL "Llmer Llck", whlch would dlsLorL measuremenLs of Lask duraLlon), Lo avold
measuremenL arLlfacLs we can measure Lhe run Llme of small baLches of G). and .$C& operaLlons lnsLead.
llgure 12.3 shows Lhe perLask compleLlon Llmes of a varlanL of =43&;@).=$C&=&#. uslng a buffer slze of 1000 ln whlch
each of 236 concurrenL Lasks lLeraLes only 1000 lLems for nonfalr (shaded bars) and falr semaphores (open bars).
(SecLlon 13.3 explalns falr versus nonfalr queulng for locks and semaphores.) CompleLlon Llmes for nonfalr semaphores
range from 104 Lo 8,714 ms, a facLor of over elghLy. lL ls posslble Lo reduce Lhls range by forclng more falrness ln
concurrency conLrol, Lhls ls easy Lo do ln K2)";&;K)%%&9 by lnlLlallzlng Lhe semaphores Lo falr mode. As llgure 12.3
shows, Lhls succeeds ln greaLly reduclng Lhe varlance (now ranglng only from 38,194 Lo 38,207 ms), buL unforLunaLely
also greaLly reduces Lhe LhroughpuL. (A longerrunnlng LesL wlLh more Lyplcal klnds of Lasks would probably show an
even larger LhroughpuL reducLlon.)
S/4()* @DALA $%5>?*./%& J/5* 7/3.%4)"5 6%) =43&;@).=$C&=&#. 8/.: [*6"(?. GP%&6"/)I "&0 S"/) C*5">:%)*3A

We saw before LhaL very small buffer slzes cause heavy conLexL swlLchlng and poor LhroughpuL even ln nonfalr mode,
because nearly every operaLlon lnvolves a conLexL swlLch. As an lndlcaLlon LhaL Lhe cosL of falrness resulLs prlmarlly from
blocklng Lhreads, we can rerun Lhls LesL wlLh a buffer slze of one and see LhaL nonfalr semaphores now perform
comparably Lo falr semaphores. llgure 12.4 shows LhaL falrness doesn'L make Lhe average much worse or Lhe varlance
much beLLer ln Lhls case.
S/4()* @DAMA $%5>?*./%& J/5* 7/3.%4)"5 6%) =43&;@).=$C&=&#. 8/.: C/&4?*/.*5 ;(66*)3A

So, unless Lhreads are conLlnually blocklng anyway because of LlghL synchronlzaLlon requlremenLs, nonfalr semaphores
provlde much beLLer LhroughpuL and falr semaphores provldes lower varlance. 8ecause Lhe resulLs are so dramaLlcally
dlfferenL, '&3$G829& forces lLs cllenLs Lo declde whlch of Lhe Lwo facLors Lo opLlmlze for.

163 68arL lll: Llveness, erformance, and 1esLlng 248ChapLer 12. 1esLlng ConcurrenL rograms
@DALA F#%/0/&4 -*)6%)5"&'* J*3./&4 -/.6"??3
ln Lheory, developlng performance LesLs ls easyflnd a Lyplcal usage scenarlo, wrlLe a program LhaL execuLes LhaL scenarlo
many Llmes, and Llme lL. ln pracLlce, you have Lo waLch ouL for a number of codlng plLfalls LhaL prevenL performance
LesLs from yleldlng meanlngful resulLs.
@DALA@A R")Y"4* $%??*'./%&
1he Llmlng of garbage collecLlon ls unpredlcLable, so Lhere ls always Lhe posslblllLy LhaL Lhe garbage collecLor wlll run
durlng a measured LesL run. lf a LesL program performs n lLeraLlons and Lrlggers no garbage collecLlon buL lLeraLlon n +
1would Lrlgger a garbage collecLlon, a small varlaLlon ln Lhe slze of Lhe run could have a blg (buL spurlous) effecL on Lhe
measured Llme per lLeraLlon.
1here are Lwo sLraLegles for prevenLlng garbage collecLlon from blaslng your resulLs. Cne ls Lo ensure LhaL garbage
collecLlon does noL run aL all durlng your LesL (you can lnvoke Lhe !vM wlLh a/&9M2#&h6* Lo flnd ouL), alLernaLlvely, you
can make sure LhaL Lhe garbage collecLor runs a number of Llmes durlng your run so LhaL Lhe LesL program adequaLely
reflecLs Lhe cosL of ongolng allocaLlon and garbage collecLlon. 1he laLLer sLraLegy ls ofLen beLLerlL requlres a longer LesL
and ls more llkely Lo reflecL realworld performance.
MosL producerconsumer appllcaLlons lnvolve a falr amounL of allocaLlon and garbage collecLlonproducers allocaLe new
ob[ecLs LhaL are used and dlscarded by consumers. 8unnlng Lhe bounded buffer LesL for long enough Lo lncur mulLlple
garbage collecLlons ylelds more accuraLe resulLs.
@DALADA [+&"5/' $%5>/?"./%&
WrlLlng and lnLerpreLlng performance benchmarks for dynamlcally complled languages llke !ava ls far more dlfflculL Lhan
for sLaLlcally complled languages llke C or C++. 1he PoLSpoL !vM (and oLher modern !vMs) uses a comblnaLlon of
byLecode lnLerpreLaLlon and dynamlc compllaLlon. When a class ls flrsL loaded, Lhe !vM execuLes lL by lnLerpreLlng Lhe
byLecode. AL some polnL, lf a meLhod ls run ofLen enough, Lhe dynamlc compller klcks ln and converLs lL Lo machlne
code, when compllaLlon compleLes, lL swlLches from lnLerpreLaLlon Lo dlrecL execuLlon.
1he Llmlng of compllaLlon ls unpredlcLable. ?our Llmlng LesLs should run only afLer all code has been complled, Lhere ls
no value ln measurlng Lhe speed of Lhe lnLerpreLed code slnce mosL programs run long enough LhaL all frequenLly
execuLed code paLhs are complled. Allowlng Lhe compller Lo run durlng a measured LesL run can blas LesL resulLs ln Lwo
ways: compllaLlon consumes Cu resources, and measurlng Lhe run Llme of a comblnaLlon of lnLerpreLed and complled
code ls noL a meanlngful performance meLrlc. llgure 12.3 shows how Lhls can blas your resulLs. 1he Lhree Llmellnes
represenL Lhe execuLlon of Lhe same number of lLeraLlons: Llmellne A represenLs all lnLerpreLed execuLlon, 8 represenLs
compllaLlon halfway Lhrough Lhe run, and C represenLs compllaLlon early ln Lhe run. 1he polnL aL whlch compllaLlon runs
serlously affecLs Lhe measured peroperaLlon runLlme.
[7]

[7] 1he !vM may choose Lo perform compllaLlon ln Lhe appllcaLlon Lhread or ln Lhe background Lhread, each can blas Llmlng resulLs ln dlfferenL
ways.
S/4()* @DAVA N*3(?.3 ;/"3*0 Y+ [+&"5/' $%5>/?"./%&A

Code may also be decomplled (reverLlng Lo lnLerpreLed execuLlon) and recomplled for varlous reasons, such as loadlng a
class LhaL lnvalldaLes assumpLlons made by prlor compllaLlons, or gaLherlng sufflclenL proflllng daLa Lo declde LhaL a
code paLh should be recomplled wlLh dlfferenL opLlmlzaLlons.
Cne way Lo prevenL compllaLlon from blaslng your resulLs ls Lo run your program for a long Llme (aL leasL several
mlnuLes) so LhaL compllaLlon and lnLerpreLed execuLlon represenL a small fracLlon of Lhe LoLal run Llme. AnoLher
approach ls Lo use an unmeasured "warmup" run, ln whlch your code ls execuLed enough Lo be fully complled when

166 !ava Concurrency ln racLlce
you acLually sLarL Llmlng. Cn PoLSpoL, runnlng your program wlLh aooh_@94".D23G40$.42" prlnLs ouL a message when
dynamlc compllaLlon runs, so you can verlfy LhaL Lhls ls prlor Lo, raLher Lhan durlng, measured LesL runs.
8unnlng Lhe same LesL several Llmes ln Lhe same !vM lnsLance can be used Lo valldaLe Lhe LesLlng meLhodology. 1he flrsL
group of resulLs should be dlscarded as warmup, seelng lnconslsLenL resulLs ln Lhe remalnlng groups suggesLs LhaL Lhe
LesL should be examlned furLher Lo deLermlne why Lhe Llmlng resulLs are noL repeaLable.
1he !vM uses varlous background Lhreads for housekeeplng Lasks. When measurlng mulLlple unrelaLed compuLaLlonally
lnLenslve acLlvlLles ln a slngle run, lL ls a good ldea Lo place expllclL pauses beLween Lhe measured Lrlals Lo glve Lhe !vM
a chance Lo caLch up wlLh background Lasks wlLh mlnlmal lnLerference from measured Lasks. (When measurlng mulLlple
relaLed acLlvlLles, however, such as mulLlple runs of Lhe same LesL, excludlng !vM background Lasks ln Lhls way may glve
unreallsLlcally opLlmlsLlc resulLs.)
@DALALA 9&)*"?/3./' C"5>?/&4 %6 $%0* -".:3
8unLlme compllers use proflllng lnformaLlon Lo help opLlmlze Lhe code belng complled. 1he !vM ls permlLLed Lo use
lnformaLlon speclflc Lo Lhe execuLlon ln order Lo produce beLLer code, whlch means LhaL complllng meLhod M ln one
program may generaLe dlfferenL code Lhan complllng M ln anoLher. ln some cases, Lhe !vM may make opLlmlzaLlons
based on assumpLlons LhaL may only be Lrue Lemporarlly, and laLer back Lhem ouL by lnvalldaLlng Lhe complled code lf
Lhey become unLrue.
[8]

[8] lor example, Lhe !vM can use monomorphlc call LransformaLlon Lo converL a vlrLual meLhod call Lo a dlrecL meLhod call lf no classes currenLly
loaded overrlde LhaL meLhod, buL lL lnvalldaLes Lhe complled code lf a class ls subsequenLly loaded LhaL overrldes Lhe meLhod.
As a resulL, lL ls lmporLanL LhaL your LesL programs noL only adequaLely approxlmaLe Lhe usage paLLerns of a Lyplcal
appllcaLlon, buL also approxlmaLe Lhe seL of code paLhs used by such an appllcaLlon. CLherwlse, a dynamlc compller
could make speclal opLlmlzaLlons Lo a purely slngleLhreaded LesL program LhaL could noL be applled ln real appllcaLlons
conLalnlng aL leasL occaslonal parallellsm. 1herefore, LesLs of mulLlLhreaded performance should normally be mlxed wlLh
LesLs of slngleLhreaded performance, even lf you wanL Lo measure only slngleLhreaded performance. (1hls lssue does
noL arlse ln =43&;@).=$C&=&#. because even Lhe smallesL LesL case uses Lwo Lhreads.)
@DALAMA 9&)*"?/3./' [*4)**3 %6 $%&.*&./%&
ConcurrenL appllcaLlons Lend Lo lnLerleave Lwo very dlfferenL sorLs of work: accesslng shared daLa, such as feLchlng Lhe
nexL Lask from a shared work queue, and Lhreadlocal compuLaLlon (execuLlng Lhe Lask, assumlng Lhe Lask lLself does noL
access shared daLa). uependlng on Lhe relaLlve proporLlons of Lhe Lwo Lypes of work, Lhe appllcaLlon wlll experlence
dlfferenL levels of conLenLlon and exhlblL dlfferenL performance and scallng behavlors.
lf n Lhreads are feLchlng Lasks from a shared work queue and execuLlng Lhem, and Lhe Lasks are compuLelnLenslve and
longrunnlng (and do noL access shared daLa very much), Lhere wlll be almosL no conLenLlon, LhroughpuL ls domlnaLed
by Lhe avallablllLy of Cu resources. Cn Lhe oLher hand, lf Lhe Lasks are very shorLllved, Lhere wlll be a loL of conLenLlon
for Lhe work queue and LhroughpuL ls domlnaLed by Lhe cosL of synchronlzaLlon.
1o obLaln reallsLlc resulLs, concurrenL performance LesLs should Lry Lo approxlmaLe Lhe Lhreadlocal compuLaLlon done
by a Lyplcal appllcaLlon ln addlLlon Lo Lhe concurrenL coordlnaLlon under sLudy. lf Lhe Lhe work done for each Lask ln an
appllcaLlon ls slgnlflcanLly dlfferenL ln naLure or scope from Lhe LesL program, lL ls easy Lo arrlve aL unwarranLed
concluslons abouL where Lhe performance boLLlenecks lle. We saw ln SecLlon 11.3 LhaL, for lockbased classes such as
Lhe synchronlzed F$G lmplemenLaLlons, wheLher access Lo Lhe lock ls mosLly conLended or mosLly unconLended can
have a dramaLlc effecL on LhroughpuL. 1he LesLs ln LhaL secLlon do noLhlng buL pound on Lhe F$G, even wlLh Lwo
Lhreads, all aLLempLs Lo access Lhe F$G are conLended. Powever, lf an appllcaLlon dld a slgnlflcanL amounL of Lhread
local compuLaLlon for each Llme lL accesses Lhe shared daLa sLrucLure, Lhe conLenLlon level mlghL be low enough Lo offer
good performance.
ln Lhls regard, =43&;@).=$C&=&#. may be a poor model for some appllcaLlons. Slnce Lhe worker Lhreads do noL do very
much, LhroughpuL ls domlnaLed by coordlnaLlon overhead, and Lhls ls noL necessarlly Lhe case ln all appllcaLlons LhaL
exchange daLa beLween producers and consumers vla bounded buffers.
@DALAVA [*"0 $%0* =?/5/&"./%&
Cne of Lhe challenges of wrlLlng good benchmarks (ln any language) ls LhaL opLlmlzlng compllers are adepL aL spoLLlng
and ellmlnaLlng dead code code LhaL has no effecL on Lhe ouLcome. Slnce benchmarks ofLen don'L compuLe anyLhlng,
Lhey are an easy LargeL for Lhe opLlmlzer. MosL of Lhe Llme, lL ls a good Lhlng when Lhe opLlmlzer prunes dead code from
a program, buL for a benchmark Lhls ls a blg problem because Lhen you are measurlng less execuLlon Lhan you Lhlnk. lf
you're lucky, Lhe opLlmlzer wlll prune away your enLlre program, and Lhen lL wlll be obvlous LhaL your daLa ls bogus. lf

167 68arL lll: Llveness, erformance, and 1esLlng 248ChapLer 12. 1esLlng ConcurrenL rograms
you're unlucky, deadcode ellmlnaLlon wlll [usL speed up your program by some facLor LhaL could be explalned by oLher
means.
ueadcode ellmlnaLlon ls a problem ln benchmarklng sLaLlcally complled languages Loo, buL deLecLlng LhaL Lhe compller
has ellmlnaLed a good chunk of your benchmark ls a loL easler because you can look aL Lhe machlne code and see LhaL a
parL of your program ls mlsslng. WlLh dynamlcally complled languages, LhaL lnformaLlon ls noL easlly accesslble.
Many mlcrobenchmarks perform much "beLLer" when run wlLh PoLSpoL's a#&9/&9 compller Lhan wlLh a*04&"., noL [usL
because Lhe server compller can produce more efflclenL code, buL also because lL ls more adepL aL opLlmlzlng dead
code. unforLunaLely, Lhe deadcode ellmlnaLlon LhaL made such shorL work of your benchmark won'L do qulLe as well
wlLh code LhaL acLually does someLhlng. 8uL you should sLlll prefer a#&9/&9 Lo a*04&". for boLh producLlon and LesLlng
on mulLlprocessor sysLems you [usL have Lo wrlLe your LesLs so LhaL Lhey are noL suscepLlble Lo deadcode ellmlnaLlon.
WrlLlng effecLlve performance LesLs requlres Lrlcklng Lhe opLlmlzer lnLo noL opLlmlzlng away your benchmark as dead
code. 1hls requlres every compuLed resulL Lo be used somehow by your program ln a way LhaL does noL requlre
synchronlzaLlon or subsLanLlal compuLaLlon.
ln @).=$C&=&#., we compuLe Lhe checksum of elemenLs added Lo and removed from Lhe queue and comblne Lhese
checksums across all Lhe Lhreads, buL Lhls could sLlll be opLlmlzed away lf we do noL acLually use Lhe checksum value.
We happen Lo need lL Lo verlfy Lhe correcLness of Lhe algorlLhm, buL you can ensure LhaL a value ls used by prlnLlng lL
ouL. Powever, you should avold dolng l/C whlle Lhe LesL ls acLually runnlng, so as noL Lo dlsLorL Lhe run Llme
measuremenL.
A cheap Lrlck for prevenLlng a calculaLlon from belng opLlmlzed away wlLhouL lnLroduclng Loo much overhead ls Lo
compuLe Lhe 8$#8D2;& of Lhe fleld of some derlved ob[ecL, compare lL Lo an arblLrary value such as Lhe currenL value of
'7#.&3+ "$"2=43&, and prlnL a useless and lgnorable message lf Lhey happen Lo maLch:
4% U%22+-+8$#8D2;&UV ]] '7#.&3+"$"2=43&UVV
'7#.&3+2).+G94".Ud dV[
1he comparlson wlll rarely succeed, and lf lL does, lLs only effecL wlll be Lo lnserL a harmless space characLer lnLo Lhe
ouLpuL. (1he G94". meLhod buffers ouLpuL unLll G94".0" ls called, so ln Lhe rare case LhaL 8$#8D2;& and
'7#.&3+"$"2=43& are equal no l/C ls acLually performed.)
noL only should every compuLed resulL be used, buL resulLs should also be unguessable. CLherwlse, a smarL dynamlc
opLlmlzlng compller ls allowed Lo replace acLlons wlLh precompuLed resulLs. We addressed Lhls ln Lhe consLrucLlon of
@).=$C&=&#., buL any LesL program whose lnpuL ls sLaLlc daLa ls vulnerable Lo Lhls opLlmlzaLlon.
@DAMA $%5>?*5*&.")+ J*3./&4 F>>)%"':*3
Whlle we'd llke Lo belleve LhaL an effecLlve LesLlng program should "flnd all Lhe bugs", Lhls ls an unreallsLlc goal. nASA
devoLes more of lLs englneerlng resources Lo LesLlng (lL ls esLlmaLed Lhey employ 20 LesLers for each developer) Lhan any
commerclal enLlLy could afford Loand Lhe code produced ls sLlll noL free of defecLs. ln complex programs, no amounL of
LesLlng can flnd all codlng errors.
1he goal of LesLlng ls noL so much Lo flnd errors as lL ls Lo lncrease confldence LhaL Lhe code works as expecLed. Slnce lL
ls unreallsLlc Lo assume you can flnd all Lhe bugs, Lhe goal of a quallLy assurance (CA) plan should be Lo achleve Lhe
greaLesL posslble confldence glven Lhe LesLlng resources avallable. More Lhlngs can go wrong ln a concurrenL program
Lhan ln a sequenLlal one, and Lherefore more LesLlng ls requlred Lo achleve Lhe same level of confldence. So far we've
focused prlmarlly on Lechnlques for consLrucLlng effecLlve unlL and performance LesLs. 1esLlng ls crlLlcally lmporLanL for
bulldlng confldence LhaL concurrenL classes behave correcLly, buL should be only one of Lhe CA meLholologles you
employ.
ulfferenL CA meLhodologles are more effecLlve aL flndlng some Lypes of defecLs and less effecLlve aL flndlng oLhers. 8y
employlng complemenLary LesLlng meLhodologles such as code revlew and sLaLlc analysls, you can achleve greaLer
confldence Lhan you could wlLh any slngle approach.
@DAMA@A $%0* N*#/*8
As effecLlve and lmporLanL as unlL and sLress LesLs are for flndlng concurrency bugs, Lhey are no subsLlLuLe for rlgorous
code revlew by mulLlple people. (Cn Lhe oLher hand, code revlew ls no subsLlLuLe for LesLlng elLher.) ?ou can and should
deslgn LesLs Lo maxlmlze Lhelr chances of dlscoverlng safeLy errors, and you should run Lhem frequenLly, buL you should
noL neglecL Lo have concurrenL code revlewed carefully by someone besldes lLs auLhor. Lven concurrency experLs make

168 !ava Concurrency ln racLlce
mlsLakes, Laklng Lhe Llme Lo have someone else revlew Lhe code ls almosL always worLhwhlle. LxperL concurrenL
programmers are beLLer aL flndlng subLle races Lhan are mosL LesL programs. (Also, plaLform lssues such as !vM
lmplemenLaLlon deLalls or processor memory models can prevenL bugs from showlng up on parLlcular hardware or
sofLware conflguraLlons.) Code revlew also has oLher beneflLs, noL only can lL flnd errors, buL lL ofLen lmproves Lhe
quallLy of commenLs descrlblng Lhe lmplemenLaLlon deLalls, Lhus reduclng fuLure malnLenance cosL and rlsk.
@DAMADA C."./' F&"?+3/3 J%%?3
As of Lhls wrlLlng, sLaLlc analysls Lools are rapldly emerglng as an effecLlve complemenL Lo formal LesLlng and code
revlew. SLaLlc code analysls ls Lhe process of analyzlng code wlLhouL execuLlng lL, and code audlLlng Lools can analyze
classes Lo look for lnsLances of common bug paLLerns. SLaLlc analysls Lools such as Lhe opensource llnd8ugs
[9]
conLaln
bugpaLLern deLecLors for many common codlng errors, many of whlch can easlly be mlssed by LesLlng or code revlew.
[9] http://findbugs.sourceforge.net
SLaLlc analysls Lools produce a llsL of warnlngs LhaL musL be examlned by hand Lo deLermlne wheLher Lhey represenL
acLual errors. PlsLorlcally, Lools llke 04". produced so many false warnlngs as Lo scare developers away, buL Lools llke
llnd8ugs have been Luned Lo produce many fewer false alarms. SLaLlc analysls Lools are sLlll somewhaL prlmlLlve
(especlally ln Lhelr lnLegraLlon wlLh developmenL Lools and llfecycle), buL Lhey are already effecLlve enough Lo be a
valuable addlLlon Lo Lhe LesLlng process.
As of Lhls wrlLlng, llnd8ugs lncludes deLecLors for Lhe followlng concurrencyrelaLed bug paLLerns, and more are belng
added all Lhe Llme:
lnconslsLenL synchronlzaLlon. Many ob[ecLs follow Lhe synchronlzaLlon pollcy of guardlng all varlables wlLh Lhe ob[ecL's
lnLrlnslc lock. lf a fleld ls accessed frequenLly buL noL always wlLh Lhe .84# lock held, Lhls may lndlcaLe LhaL Lhe
synchronlzaLlon pollcy ls noL belng conslsLenLly followed.
Analysls Lools musL guess aL Lhe synchronlzaLlon pollcy because !ava classes do noL have formal concurrency
speclflcaLlons. ln Lhe fuLure, lf annoLaLlons such as bH)$9;&;K7 are sLandardlzed, audlLlng Lools could lnLerpreL
annoLaLlons raLher Lhan havlng Lo guess aL Lhe relaLlonshlp beLween varlables and locks, Lhus lmprovlng Lhe quallLy of
analysls.
lnvoklng =89&$;+9)". =89&$; lmplemenLs N)""$M0& and Lherefore has a 9)" meLhod. Powever, lL ls almosL always a
mlsLake Lo call =89&$;+9)" dlrecLly, usually Lhe programmer meanL Lo call =89&$;+#.$9..
unreleased lock. unllke lnLrlnslc locks, expllclL locks (see ChapLer 13) are noL auLomaLlcally released when conLrol exlLs
Lhe scope ln whlch Lhey were acqulred. 1he sLandard ldlom ls Lo release Lhe lock from a %4"$007 block, oLherwlse Lhe
lock can remaln unreleased ln Lhe evenL of an L-*&G.42".
LmpLy #7"*892"4:&; block. Whlle empLy #7"*892"4:&; blocks do have semanLlcs under Lhe !ava Memory Model, Lhey
are frequenLly used lncorrecLly, and Lhere are usually beLLer soluLlons Lo whaLever problem Lhe developer was Lrylng Lo
solve.
uoublechecked locklng. uoublechecked locklng ls a broken ldlom for reduclng synchronlzaLlon overhead ln lazy
lnlLlallzaLlon (see SecLlon 16.2.4) LhaL lnvolves readlng a shared muLable fleld wlLhouL approprlaLe synchronlzaLlon.
SLarLlng a Lhread from a consLrucLor. SLarLlng a Lhread from a consLrucLor lnLroduces Lhe rlsk of subclasslng problems,
and can allow Lhe .84# reference Lo escape Lhe consLrucLor.
noLlflcaLlon errors. 1he "2.4%7 and "2.4%7100 meLhods lndlcaLe LhaL an ob[ecL's sLaLe may have changed ln a way LhaL
would unblock Lhreads LhaL are walLlng on Lhe assoclaLed condlLlon queue. 1hese meLhods should be called only when
Lhe sLaLe assoclaLed wlLh Lhe condlLlon queue has changed. A #7"*892"4:&; block LhaL calls "2.4%7 or "2.4%7100 buL
does noL modlfy any sLaLe ls llkely Lo be an error. (See ChapLer 14.)
CondlLlon walL errors. When walLlng on a condlLlon queue, JM`&*.+?$4. or D2";4.42"+ $?$4. should be called ln a
loop, wlLh Lhe approprlaLe lock held, afLer LesLlng some sLaLe predlcaLe (see ChapLer 14). Calllng JM`&*.+?$4. or
D2";4.42"+$?$4. wlLhouL Lhe lock held, noL ln a loop, or wlLhouL LesLlng some sLaLe predlcaLe ls almosL cerLalnly an
error.
Mlsuse of 52*C and D2";4.42". uslng a 52*C as Lhe lock argumenL for a #7"*892"4:&; block ls llkely Lo be a Lypo, as ls
calllng D2";4.42"+?$4. lnsLead of $?$4. (Lhough Lhe laLLer would llkely be caughL ln LesLlng, slnce lL would Lhrow an
I00&6$0F2"4.29'.$.&L-*&G.42" Lhe flrsL Llme lL was called).

169 68arL lll: Llveness, erformance, and 1esLlng 248ChapLer 12. 1esLlng ConcurrenL rograms
Sleeplng or walLlng whlle holdlng a lock. Calllng =89&$;+#0&&G wlLh a lock held can prevenL oLher Lhreads from maklng
progress for a long Llme and ls Lherefore a poLenLlally serlous llveness hazard. Calllng JM`&*.+?$4. or D2";4.42"+$?$4.
wlLh Lwo locks held poses a slmllar hazard.
Spln loops. Code LhaL does noLhlng buL spln (busy walL) checklng a fleld for an expecLed value can wasLe Cu Llme and, lf
Lhe fleld ls noL volaLlle, ls noL guaranLeed Lo LermlnaLe. LaLches or condlLlon walLs are ofLen a beLLer Lechnlque when
walLlng for a sLaLe LranslLlon Lo occur.
@DAMALA F3>*'.%)/*&.*0 J*3./&4 J*':&/Q(*3
As of Lhls wrlLlng, aspecLorlenLed programmlng (AC) Lechnlques have only llmlLed appllcablllLy Lo concurrency,
because mosL popular AC Lools do noL yeL supporL polnLcuLs aL synchronlzaLlon polnLs. Powever, AC can be applled
Lo asserL lnvarlanLs or some aspecLs of compllance wlLh synchronlzaLlon pollcles. lor example, (Laddad, 2003) provldes
an example of uslng an aspecL Lo wrap all calls Lo nonLhreadsafe Swlng meLhods wlLh Lhe asserLlon LhaL Lhe call ls
occurrlng ln Lhe evenL Lhread. As lL requlres no code changes, Lhls Lechnlque ls easy Lo apply and can dlsclose subLle
publlcaLlon and LhreadconflnemenL errors.
@DAMAMA -)%6/?*)3 "&0 K%&/.%)/&4 J%%?3
MosL commerclal proflllng Lools have some supporL for Lhreads. 1hey vary ln feaLure seL and effecLlveness, buL can
ofLen provlde lnslghL lnLo whaL your program ls dolng (alLhough proflllng Lools are usually lnLruslve and can subsLanLlally
affecL program Llmlng and behavlor). MosL offer a dlsplay showlng a Llmellne for each Lhread wlLh dlfferenL colors for
Lhe varlous Lhread sLaLes (runnable, blocked walLlng for a lock, blocked walLlng for l/C, eLc.). Such a dlsplay can show
how effecLlvely your program ls uLlllzlng Lhe avallable Cu resources, and lf lL ls dolng badly, where Lo look for Lhe
cause. (Many profllers also clalm feaLures for ldenLlfylng whlch locks are causlng conLenLlon, buL ln pracLlce Lhese
feaLures are ofLen a blunLer lnsLrumenL Lhan ls deslred for analyzlng a program's locklng behavlor.)
1he bullLln !Mx agenL also offers some llmlLed feaLures for monlLorlng Lhread behavlor. 1he =89&$;I"%2 class lncludes
Lhe Lhread's currenL sLaLe and, lf Lhe Lhread ls blocked, Lhe lock or condlLlon queue on whlch lL ls blocked. lf Lhe "Lhread
conLenLlon monlLorlng" feaLure ls enabled (lL ls dlsabled by defaulL because of lLs performance lmpacL), =89&$;I"%2 also
lncludes Lhe number of Llmes LhaL Lhe Lhread has blocked walLlng for a lock or noLlflcaLlon, and Lhe cumulaLlve amounL
of Llme lL has spenL walLlng.
C(55")+
1esLlng concurrenL programs for correcLness can be exLremely challenglng because many of Lhe posslble fallure modes
of concurrenL programs are lowprobablllLy evenLs LhaL are senslLlve Lo Llmlng, load, and oLher hardLoreproduce
condlLlons. lurLher, Lhe LesLlng lnfrasLrucLure can lnLroduce addlLlonal synchronlzaLlon or Llmlng consLralnLs LhaL can
mask concurrency problems ln Lhe code belng LesLed. 1esLlng concurrenL programs for performance can be equally
challenglng, !ava programs are more dlfflculL Lo LesL Lhan programs wrlLLen ln sLaLlcally complled languages llke C,
because Llmlng measuremenLs can be affecLed by dynamlc compllaLlon, garbage collecLlon, and adapLlve opLlmlzaLlon.
1o have Lhe besL chance of flndlng laLenL bugs before Lhey occur ln producLlon, comblne LradlLlonal LesLlng Lechnlques
(belng careful Lo avold Lhe plLfalls dlscussed here) wlLh code revlews and auLomaLed analysls Lools. Lach of Lhese
Lechnlques flnds problems LhaL Lhe oLhers are llkely Lo mlss.


170 !ava Concurrency ln racLlce
-"). ,HT F0#"&'*0 J%>/'3

=>.?@(, 36. LxpllclL Locks
=>.?@(, 38. 8ulldlng CusLom Synchronlzers
=>.?@(, 3B. ALomlc varlables and nonblocklng SynchronlzaLlon
=>.?@(, 3:. 1he !ava Memory Model
!??(&"#C !. AnnoLaLlons for Concurrency
1#D)#%E,.?>*

171 78arL lv: Advanced 1oplcs 238ChapLer 13 LxpllclL Locks
$:">.*) @L =1>?/'/. 2%'<3
8efore !ava 3.0, Lhe only mechanlsms for coordlnaLlng access Lo shared daLa were #7"*892"4:&; and /20$.40&. !ava
3.0 adds anoLher opLlon: N&&".9$".52*C. ConLrary Lo whaL some have wrlLLen, N&&".9$".52*C ls noL a replacemenL for
lnLrlnslc locklng, buL raLher an alLernaLlve wlLh advanced feaLures for when lnLrlnslc locklng proves Loo llmlLed.
@LA@A 2%'< "&0 N**&.)"&.2%'<
1he 52*C lnLerface, shown ln LlsLlng 13.1, deflnes a number of absLracL locklng operaLlons. unllke lnLrlnslc locklng, 52*C
offers a cholce of uncondlLlonal, polled, Llmed, and lnLerrupLlble lock acqulslLlon, and all lock and unlock operaLlons are
expllclL. 52*C lmplemenLaLlons musL provlde Lhe same memoryvlslblllLy semanLlcs as lnLrlnslc locks, buL can dlffer ln
Lhelr locklng semanLlcs, schedullng algorlLhms, orderlng guaranLees, and performance characLerlsLlcs.
(52*C+"&?D2";4.42" ls covered ln ChapLer 14.)
2/3./&4 @LA@A 52*C ,&.*)6"'*A
G)M04* 4".&9%$*& 52*C W
/24; 02*CUV[
/24; 02*CI".&99)G.4M07UV .892?# I".&99)G.&;L-*&G.42"[
M220&$" .9752*CUV[
M220&$" .9752*CU02"6 .43&2).e =43&!"4. )"4.V
.892?# I".&99)G.&;L-*&G.42"[
/24; )"02*CUV[
D2";4.42" "&?D2";4.42"UV[
\
N&&".9$".52*C lmplemenLs 52*C, provldlng Lhe same muLual excluslon and memoryvlslblllLy guaranLees as
#7"*892"4:&;. Acqulrlng a N&&".9$".52*C has Lhe same memory semanLlcs as enLerlng a #7"*892"4:&; block, and
releaslng a N&&".9$".52*C has Lhe same memory semanLlcs as exlLlng a #7"*892"4:&; block. (Memory vlslblllLy ls
covered ln SecLlon 3.1 and ln ChapLer 16.) And, llke #7"*892"4:&;, N&&".9$".52*C offers reenLranL locklng semanLlcs
(see SecLlon 2.3.2). N&&".9$".52*C supporLs all of Lhe lockacqulslLlon modes deflned by 52*C, provldlng more flexlblllLy
for deallng wlLh lock unavallablllLy Lhan does #7"*892"4:&;.
Why creaLe a new locklng mechanlsm LhaL ls so slmllar Lo lnLrlnslc locklng? lnLrlnslc locklng works flne ln mosL slLuaLlons
buL has some funcLlonal llmlLaLlons lL ls noL posslble Lo lnLerrupL a Lhread walLlng Lo acqulre a lock, or Lo aLLempL Lo
acqulre a lock wlLhouL belng wllllng Lo walL for lL forever. lnLrlnslc locks also musL be released ln Lhe same block of code
ln whlch Lhey are acqulred, Lhls slmpllfles codlng and lnLeracLs nlcely wlLh excepLlon handllng, buL makes nonblock
sLrucLured locklng dlsclpllnes lmposslble. none of Lhese are reasons Lo abandon #7"*892"4:&;, buL ln some cases a
more flexlble locklng mechanlsm offers beLLer llveness or performance.
LlsLlng 13.2 shows Lhe canonlcal form for uslng a 52*C. 1hls ldlom ls somewhaL more compllcaLed Lhan uslng lnLrlnslc
locks: Lhe lock musL be released ln a %4"$007 block. CLherwlse, Lhe lock would never be released lf Lhe guarded code
were Lo Lhrow an excepLlon. When uslng locklng, you musL also conslder whaL happens lf an excepLlon ls Lhrown ouL of
Lhe .97 block, lf lL ls posslble for Lhe ob[ecL Lo be lefL ln an lnconslsLenL sLaLe, addlLlonal =N7a*$.*8 or =N7a%4"$007
blocks may be needed. (?ou should always conslder Lhe effecL of excepLlons when uslng any form of locklng, lncludlng
lnLrlnslc locklng.)
lalllng Lo use %4"$007 Lo release a 52*C ls a Llcklng Llme bomb. When lL goes off, you wlll have a hard Llme Lracklng
down lLs orlgln as Lhere wlll be no record of where or when Lhe 52*C should have been released. 1hls ls one reason noL
Lo use N&&".9$".52*C as a blankeL subsLlLuLe for #7"*892"4:&;: lL ls more "dangerous" because lL doesn'L
auLomaLlcally clean up Lhe lock when conLrol leaves Lhe guarded block. Whlle rememberlng Lo release Lhe lock from a
%4"$007 block ls noL all LhaL dlfflculL, lL ls also noL lmposslble Lo forgeL.
[1]

[1] llnd8ugs has an "unreleased lock" deLecLor ldenLlfylng when a 52*C ls noL released ln all code paLhs ouL of Lhe block ln whlch lL was acqulred.
2/3./&4 @LADA R(")0/&4 EY]*'. C.".* 93/&4 N&&".9$".52*CA
52*C 02*C ] "&? N&&".9$".52*CUV[
+++
02*C+02*CUV[
.97 W
XX )G;$.& 2M`&*. #.$.&
XX *$.*8 &-*&G.42"# $"; 9&#.29& 4"/$94$".# 4% "&*&##$97
\ %4"$007 W
02*C+)"02*CUV[
\


172 !ava Concurrency ln racLlce
@LA@A@A -%??*0 "&0 J/5*0 2%'< F'Q(/3/./%&
1he Llmed and polled lockacqulslLlon modes provlded by =9752*C allow more sophlsLlcaLed error recovery Lhan
uncondlLlonal acqulslLlon. WlLh lnLrlnslc locks, a deadlock ls faLal Lhe only way Lo recover ls Lo resLarL Lhe appllcaLlon,
and Lhe only defense ls Lo consLrucL your program so LhaL lnconslsLenL lock orderlng ls lmposslble. 1lmed and polled
locklng offer anoLher opLlon: probablllsLlc deadlock avoldance.
uslng Llmed or polled lock acqulslLlon (=9752*C) leLs you regaln conLrol lf you cannoL acqulre all Lhe requlred locks,
release Lhe ones you dld acqulre, and Lry agaln (or aL leasL log Lhe fallure and do someLhlng else). LlsLlng 13.3 shows an
alLernaLe way of addresslng Lhe dynamlc orderlng deadlock from SecLlon 10.1.2: use =N752*C Lo aLLempL Lo acqulre
boLh locks, buL back off and reLry lf Lhey cannoL boLh be acqulred. 1he sleep Llme has a flxed componenL and a random
componenL Lo reduce Lhe llkellhood of llvelock. lf Lhe locks cannoL be acqulred wlLhln Lhe speclfled Llme,
.9$"#%&9F2"&7 reLurns a fallure sLaLus so LhaL Lhe operaLlon can fall gracefully. (See [C! 2.3.1.2] and [C! 2.3.1.3] for
more examples of uslng polled locks for deadlock avoldance.)
1lmed locks are also useful ln lmplemenLlng acLlvlLles LhaL manage a Llme budgeL (see SecLlon 6.3.7). When an acLlvlLy
wlLh a Llme budgeL calls a blocklng meLhod, lL can supply a LlmeouL correspondlng Lo Lhe remalnlng Llme ln Lhe budgeL.
1hls leLs acLlvlLles LermlnaLe early lf Lhey cannoL dellver a resulL wlLhln Lhe deslred Llme. WlLh lnLrlnslc locks, Lhere ls no
way Lo cancel a lock acqulslLlon once lL ls sLarLed, so lnLrlnslc locks puL Lhe ablllLy Lo lmplemenL LlmebudgeLed acLlvlLles
aL rlsk.
1he Lravel porLal example ln LlsLlng 6.17 on page 134 creaLes a separaLe Lask for each carrenLal company from whlch lL
was sollclLlng blds. SollclLlng a bld probably lnvolves some sorL of neLworkbased requesL mechanlsm, such as a web
servlce requesL. 8uL sollclLlng a bld mlghL also requlre excluslve access Lo a scarce resource, such as a dlrecL
communlcaLlons llne Lo Lhe company.
We saw one way Lo ensure serlallzed access Lo a resource ln SecLlon 9.3: a slngleLhreaded execuLor. AnoLher approach
ls Lo use an excluslve lock Lo guard access Lo Lhe resource. 1he code ln LlsLlng 13.4 Lrles Lo send a message on a shared
communlcaLlons llne guarded by a 52*C, buL falls gracefully lf lL cannoL do so wlLhln lLs Llme budgeL. 1he Llmed =N752*C
makes lL pracLlcal Lo lncorporaLe excluslve locklng lnLo such a LlmellmlLed acLlvlLy.
@LA@ADA ,&.*))(>./Y?* 2%'< F'Q(/3/./%&
!usL as Llmed lock acqulslLlon allows excluslve locklng Lo be used wlLhln LlmellmlLed acLlvlLles, lnLerrupLlble lock
acqulslLlon allows locklng Lo be used wlLhln cancellable acLlvlLles. SecLlon 7.1.6 ldenLlfled several mechanlsms, such as
acqulrlng an lnLrlnslc lock, LhaL are noL responslve Lo lnLerrupLlon. 1hese nonlnLerrupLlble blocklng mechanlsms
compllcaLe Lhe lmplemenLaLlon of cancellable Lasks. 1he 02*CI".&99)G.4M07 meLhod allows you Lo Lry Lo acqulre a lock
whlle remalnlng responslve Lo lnLerrupLlon, and lLs lncluslon ln 52*C avolds creaLlng anoLher caLegory of non
lnLerrupLlble blocklng mechanlsms.

173 78arL lv: Advanced 1oplcs 238ChapLer 13 LxpllclL Locks
2/3./&4 @LALA F#%/0/&4 2%'<%)0*)/&4 [*"0?%'< 93/&4 .9702*CA
G)M04* M220&$" .9$"#%&9F2"&7U1**2)". %9231**.e
1**2)". .21**.e
A200$9132)". $32)".e
02"6 .43&2).e
=43&!"4. )"4.V
.892?# I"#)%%4*4&".<)";#L-*&G.42"e I".&99)G.&;L-*&G.42" W
02"6 %4-&;A&0$7 ] 6&.<4-&;A&0$7D23G2"&".,$"2#U.43&2).e )"4.V[
02"6 9$";F2; ] 6&.N$";23A&0$7F2;)0)#,$"2#U.43&2).e )"4.V[
02"6 #.2G=43& ] '7#.&3+"$"2=43&UV _ )"4.+.2,$"2#U.43&2).V[

?840& U.9)&V W
4% U%9231**.+02*C+.9752*CUVV W
.97 W
4% U.21**.+02*C+.9752*CUVV W
.97 W
4% U%9231**.+6&.K$0$"*&UV+*23G$9&=2U$32)".V
R ZV
.892? "&? I"#)%%4*4&".<)";#L-*&G.42"UV[
&0#& W
%9231**.+;&M4.U$32)".V[
.21**.+*9&;4.U$32)".V[
9&.)9" .9)&[
\
\ %4"$007 W
.21**.+02*C+)"02*CUV[
\
\
\ %4"$007 W
%9231**.+02*C+)"02*CUV[
\
\
4% U'7#.&3+"$"2=43&UV R #.2G=43&V
9&.)9" %$0#&[
,1,J'LDJ,A'+#0&&GU%4-&;A&0$7 _ 9";+"&-.52"6UV x 9$";F2;V[
\
\
2/3./&4 @LAMA 2%'</&4 8/.: " J/5* ;(04*.A
G)M04* M220&$" .97'&";J"'8$9&;54"&U'.94"6 3&##$6&e
02"6 .43&2).e =43&!"4. )"4.V
.892?# I".&99)G.&;L-*&G.42" W
02"6 "$"2#=252*C ] )"4.+.2,$"2#U.43&2).V
a &#.43$.&;,$"2#=2'&";U3&##$6&V[
4% UY02*C+.9752*CU"$"2#=252*Ce ,1,J'LDJ,A'VV
9&.)9" %$0#&[
.97 W
9&.)9" #&";J"'8$9&;54"&U3&##$6&V[
\ %4"$007 W
02*C+)"02*CUV[
\
\
1he canonlcal sLrucLure of lnLerrupLlble lock acqulslLlon ls sllghLly more compllcaLed Lhan normal lock acqulslLlon, as Lwo
=N7 blocks are needed. (lf Lhe lnLerrupLlble lock acqulslLlon can Lhrow I".&99)G.&;L-*&G.42", Lhe sLandard .97a
%4"$007 locklng ldlom works.) LlsLlng 13.3 uses 02*CI".&99)G.4M07 Lo lmplemenL #&";J"'8$9&;54"& from LlsLlng 13.4
so LhaL we can call lL from a cancellable Lask. 1he Llmed =N752*C ls also responslve Lo lnLerrupLlon and so can be used
when you need boLh Llmed and lnLerrupLlble lock acqulslLlon.
2/3./&4 @LAVA ,&.*))(>./Y?* 2%'< F'Q(/3/./%&A
G)M04* M220&$" #&";J"'8$9&;54"&U'.94"6 3&##$6&V
.892?# I".&99)G.&;L-*&G.42" W
02*C+02*CI".&99)G.4M07UV[
.97 W
9&.)9" *$"*&00$M0&'&";J"'8$9&;54"&U3&##$6&V[
\ %4"$007 W
02*C+)"02*CUV[
\
\

G94/$.& M220&$" *$"*&00$M0&'&";J"'8$9&;54"&U'.94"6 3&##$6&V
.892?# I".&99)G.&;L-*&G.42" W +++ \
@LA@ALA P%&Y?%'<3.)('.()*0 2%'</&4
WlLh lnLrlnslc locks, acqulrerelease palrs are blocksLrucLureda lock ls always released ln Lhe same baslc block ln whlch
lL was acqulred, regardless of how conLrol exlLs Lhe block. AuLomaLlc lock release slmpllfles analysls and prevenLs
poLenLlal codlng errors, buL someLlmes a more flexlble locklng dlsclpllne ls needed.

174 !ava Concurrency ln racLlce
ln ChapLer 11, we saw how reduclng lock granularlLy can enhance scalablllLy. Lock sLrlplng allows dlfferenL hash chalns ln
a hashbased collecLlon Lo use dlfferenL locks. We can apply a slmllar prlnclple Lo reduce locklng granularlLy ln a llnked
llsL by uslng a separaLe lock for each llnk node, allowlng dlfferenL Lhreads Lo operaLe lndependenLly on dlfferenL
porLlons of Lhe llsL. 1he lock for a glven node guards Lhe llnk polnLers and Lhe daLa sLored ln LhaL node, so when
Lraverslng or modlfylng Lhe llsL we musL hold Lhe lock on one node unLll we acqulre Lhe lock on Lhe nexL node, only Lhen
can we release Lhe lock on Lhe flrsL node. An example of Lhls Lechnlque, called handoverhand locklng or lock coupllng,
appears ln [C! 2.3.1.4].
@LADA -*)6%)5"&'* $%&3/0*)"./%&3
When N&&".9$".52*C was added ln !ava 3.0, lL offered far beLLer conLended performance Lhan lnLrlnslc locklng. lor
synchronlzaLlon prlmlLlves, conLended performance ls Lhe key Lo scalablllLy: lf more resources are expended on lock
managemenL and schedullng, fewer are avallable for Lhe appllcaLlon. A beLLer lock lmplemenLaLlon makes fewer sysLem
calls, forces fewer conLexL swlLches, and lnlLlaLes less memorysynchronlzaLlon Lrafflc on Lhe shared memory bus,
operaLlons LhaL are Llmeconsumlng and dlverL compuLlng resources from Lhe program.
!ava 6 uses an lmproved algorlLhm for managlng lnLrlnslc locks, slmllar Lo LhaL used by N&&".9$".52*C, LhaL closes Lhe
scalablllLy gap conslderably. llgure 13.1 shows Lhe performance dlfference beLween lnLrlnslc locks and N&&".9$".52*C
on !ava 3.0 and on a prerelease bulld of !ava 6 on a fourway CpLeron sysLem runnlng Solarls. 1he curves represenL Lhe
"speedup" of N&&".9$".52*C over lnLrlnslc locklng on a slngle !vM verslon. Cn !ava 3.0, N&&".9$".52*C offers
conslderably beLLer LhroughpuL, buL on !ava 6, Lhe Lwo are qulLe close.
[2]
1he LesL program ls Lhe same one used ln
SecLlon 11.3, Lhls Llme comparlng Lhe LhroughpuL of a E$#8F$G guarded by an lnLrlnslc lock and by a N&&".9$".52*C.
[2] 1hough Lhls parLlcular graph doesn'L show lL, Lhe scalablllLy dlfference beLween !ava 3.0 and !ava 6 really does come from lmprovemenL ln
lnLrlnslc locklng, raLher Lhan from regresslon ln N&&".9$".a52*C.
S/4()* @LA@A ,&.)/&3/' 2%'</&4 H*)3(3 N&&".9$".52*C -*)6%)5"&'* %& !"#" VA_ "&0 !"#" WA


Cn !ava 3.0, Lhe performance of lnLrlnslc locklng drops dramaLlcally ln golng from one Lhread (no conLenLlon) Lo more
Lhan one Lhread, Lhe performance of N&&".9$".52*C drops far less, showlng lLs beLLer scalablllLy. 8uL on !ava 6, lL ls a
dlfferenL sLorylnLrlnslc locks no longer fall aparL under conLenLlon, and Lhe Lwo scale falrly slmllarly.
Craphs llke llgure 13.1 remlnd us LhaL sLaLemenLs of Lhe form "x ls fasLer Lhan ?" are aL besL shorLllved. erformance
and scalablllLy are senslLlve Lo plaLform facLors such as Cu, processor counL, cache slze, and !vM characLerlsLlcs, all of
whlch can change over Llme.
[3]

[3] When we sLarLed Lhls book, N&&".9$".52*C seemed Lhe lasL word ln lock scalablllLy. Less Lhan a year laLer, lnLrlnslc locklng glves lL a good
run for lLs money. erformance ls noL [usL a movlng LargeL, lL can be a fasLmovlng LargeL.
erformance ls a movlng LargeL, yesLerday's benchmark showlng LhaL x ls fasLer Lhan ? may already be ouL of daLe
Loday.

173 78arL lv: Advanced 1oplcs 238ChapLer 13 LxpllclL Locks
@LALA S"/)&*33
1he N&&".9$".52*C consLrucLor offers a cholce of Lwo falrness opLlons: creaLe a nonfalr lock (Lhe defaulL) or a falr lock.
1hreads acqulre a falr lock ln Lhe order ln whlch Lhey requesLed lL, whereas a nonfalr lock permlLs barglng: Lhreads
requesLlng a lock can [ump ahead of Lhe queue of walLlng Lhreads lf Lhe lock happens Lo be avallable when lL ls
requesLed. ('&3$G829& also offers Lhe cholce of falr or nonfalr acqulslLlon orderlng.) nonfalr N&&".9$".52*Cs do noL go
ouL of Lhelr way Lo promoLe barglngLhey slmply don'L prevenL a Lhread from barglng lf lL shows up aL Lhe rlghL Llme.
WlLh a falr lock, a newly requesLlng Lhread ls queued lf Lhe lock ls held by anoLher Lhread or lf Lhreads are queued
walLlng for Lhe lock, wlLh a nonfalr lock, Lhe Lhread ls queued only lf Lhe lock ls currenLly held.
[4]

[4] 1he polled .9752*C always barges, even for falr locks.
Wouldn'L we wanL all locks Lo be falr? AfLer all, falrness ls good and unfalrness ls bad, rlghL? (!usL ask your klds.) When lL
comes Lo locklng, Lhough, falrness has a slgnlflcanL performance cosL because of Lhe overhead of suspendlng and
resumlng Lhreads. ln pracLlce, a sLaLlsLlcal falrness guaranLee promlslng LhaL a blocked Lhread wlll evenLually acqulre
Lhe lock ls ofLen good enough, and ls far less expenslve Lo dellver. Some algorlLhms rely on falr queulng Lo ensure Lhelr
correcLness, buL Lhese are unusual. ln mosL cases, Lhe performance beneflLs of nonfalr locks ouLwelgh Lhe beneflLs of
falr queulng.
llgure 13.2 shows anoLher run of Lhe F$G performance LesL, Lhls Llme comparlng E$#8F$G wrapped wlLh falr and non
falr N&&".9$".52*C# on a fourway CpLeron sysLem runnlng Solarls, ploLLed on a log scale.
[3]
1he falrness penalLy ls
nearly Lwo orders of magnlLude. uon'L pay for falrness lf you don'L need lL.
[3] 1he graph for D2"*)99&".E$#8F$G ls falrly wlggly ln Lhe reglon beLween four and elghL Lhreads. 1hese varlaLlons almosL cerLalnly come
from measuremenL nolse, whlch could be lnLroduced by colncldenLal lnLeracLlons wlLh Lhe hash codes of Lhe elemenLs, Lhread schedullng, map
reslzlng, garbage collecLlon or oLher memorysysLem effecLs, or by Lhe CS decldlng Lo run some perlodlc housekeeplng Lask around Lhe Llme LhaL
LesL case ran. 1he reallLy ls LhaL Lhere are all sorLs of varlaLlons ln performance LesLs LhaL usually aren'L worLh boLherlng Lo conLrol. We made no
aLLempL Lo clean up our graphs arLlflclally, because realworld performance measuremenLs are also full of nolse.
S/4()* @LADA S"/) H*)3(3 P%&6"/) 2%'< -*)6%)5"&'*A


Cne reason barglng locks perform so much beLLer Lhan falr locks under heavy conLenLlon ls LhaL Lhere can be a
slgnlflcanL delay beLween when a suspended Lhread ls resumed and when lL acLually runs. LeL's say Lhread A holds a lock
and Lhread 8 asks for LhaL lock. Slnce Lhe lock ls busy, 8 ls suspended. When A releases Lhe lock, 8 ls resumed so lL can
Lry agaln. ln Lhe meanLlme, Lhough, lf Lhread C requesLs Lhe lock, Lhere ls a good chance LhaL C can acqulre Lhe lock, use
lL, and release lL before 8 even flnlshes waklng up. ln Lhls case, everyone wlns: 8 geLs Lhe lock no laLer Lhan lL oLherwlse
would have, C geLs lL much earller, and LhroughpuL ls lmproved.
lalr locks Lend Lo work besL when Lhey are held for a relaLlvely long Llme or when Lhe mean Llme beLween lock requesLs
ls relaLlvely long. ln Lhese cases, Lhe condlLlon under whlch barglng provldes a LhroughpuL advanLage when Lhe lock ls
unheld buL a Lhread ls currenLly waklng up Lo clalm lL ls less llkely Lo hold.
Llke Lhe defaulL N&&".9$".52*C, lnLrlnslc locklng offers no deLermlnlsLlc falrness guaranLees, buL Lhe sLaLlsLlcal falrness
guaranLees of mosL locklng lmplemenLaLlons are good enough for almosL all slLuaLlons. 1he language speclflcaLlon does
noL requlre Lhe !vM Lo lmplemenL lnLrlnslc locks falrly, and no producLlon !vMs do. N&&".9$".52*C does noL depress
lock falrness Lo new lowslL only makes expllclL someLhlng LhaL was presenL all along.

176 !ava Concurrency ln racLlce
@LAMA $:%%3/&4 ;*.8**& C+&':)%&/O*0 "&0 N**&.)"&.2%'<
N&&".9$".52*C provldes Lhe same locklng and memory semanLlcs as lnLrlnslc locklng, as well as addlLlonal feaLures such
as Llmed lock walLs, lnLerrupLlble lock walLs, falrness, and Lhe ablllLy Lo lmplemenL nonblocksLrucLured locklng. 1he
performance of N&&".9$".52*C appears Lo domlnaLe LhaL of lnLrlnslc locklng, wlnnlng sllghLly on !ava 6 and dramaLlcally
on !ava 3.0. So why noL deprecaLe #7"*892"4:&; and encourage all new concurrenL code Lo use N&&".9$".52*C? Some
auLhors have ln facL suggesLed Lhls, LreaLlng #7"*892"4:&; as a "legacy" consLrucL. 8uL Lhls ls Laklng a good Lhlng way
Loo far.
lnLrlnslc locks sLlll have slgnlflcanL advanLages over expllclL locks. 1he noLaLlon ls famlllar and compacL, and many
exlsLlng programs already use lnLrlnslc locklngand mlxlng Lhe Lwo could be confuslng and errorprone. N&&".9$".a52*C
ls deflnlLely a more dangerous Lool Lhan synchronlzaLlon, lf you forgeL Lo wrap Lhe )"02*C call ln a %4"$007 block, your
code wlll probably appear Lo run properly, buL you've creaLed a Llme bomb LhaL may well hurL lnnocenL bysLanders.
Save N&&".9$".52*C for slLuaLlons ln whlch you need someLhlng N&&".9$".52*C provldes LhaL lnLrlnslc locklng doesn'L.
N&&".9$".52*C ls an advanced Lool for slLuaLlons where lnLrlnslc locklng ls noL pracLlcal. use lL lf you need lLs advanced
feaLures: Llmed, polled, or lnLerrupLlble lock acqulslLlon, falr queulng, or nonblocksLrucLured locklng. CLherwlse, prefer
#7"*892"4:&;.
under !ava 3.0, lnLrlnslc locklng has anoLher advanLage over N&&".9$".52*C: Lhread dumps show whlch call frames
acqulred whlch locks and can deLecL and ldenLlfy deadlocked Lhreads. 1he !vM knows noLhlng abouL whlch Lhreads hold
N&&".9$".52*Cs and Lherefore cannoL help ln debugglng Lhreadlng problems uslng N&&".9$".52*C. 1hls dlsparlLy ls
addressed ln !ava 6 by provldlng a managemenL and monlLorlng lnLerface wlLh whlch locks can reglsLer, enabllng locklng
lnformaLlon for N&&".9$".52*Cs Lo appear ln Lhread dumps and Lhrough oLher managemenL and debugglng lnLerfaces.
1he avallablllLy of Lhls lnformaLlon for debugglng ls a subsLanLlal, lf mosLly Lemporary, advanLage for #7"*892"4:&;,
locklng lnformaLlon ln Lhread dumps has saved many programmers from uLLer consLernaLlon. 1he nonblocksLrucLured
naLure of N&&".9$".52*C sLlll means LhaL lock acqulslLlons cannoL be Lled Lo speclflc sLack frames, as Lhey can wlLh
lnLrlnslc locks.
luLure performance lmprovemenLs are llkely Lo favor #7"*892"4:&; over N&&".9$".52*C. 8ecause #7"*892"4:&; ls
bullL lnLo Lhe !vM, lL can perform opLlmlzaLlons such as lock ellslon for Lhreadconflned lock ob[ecLs and lock coarsenlng
Lo ellmlnaLe synchronlzaLlon wlLh lnLrlnslc locks (see SecLlon 11.3.2), dolng Lhls wlLh llbrarybased locks seems far less
llkely. unless you are deploylng on !ava 3.0 for Lhe foreseeable fuLure and you have a demonsLraLed need for
N&&".9$".52*C's scalablllLy beneflLs on LhaL plaLform, lL ls noL a good ldea Lo choose N&&".9$".52*C over
#7"*892"4:&; for performance reasons.
@LAVA N*"08)/.* 2%'<3
N&&".9$".52*C lmplemenLs a sLandard muLualexcluslon lock: aL mosL one Lhread aL a Llme can hold a N&&".9$".52*C.
8uL muLual excluslon ls frequenLly a sLronger locklng dlsclpllne Lhan needed Lo preserve daLa lnLegrlLy, and Lhus llmlLs
concurrency more Lhan necessary. MuLual excluslon ls a conservaLlve locklng sLraLegy LhaL prevenLs wrlLer/wrlLer and
wrlLer/reader overlap, buL also prevenLs reader/reader overlap. ln many cases, daLa sLrucLures are "readmosLly"Lhey
are muLable and are someLlmes modlfled, buL mosL accesses lnvolve only readlng. ln Lhese cases, lL would be nlce Lo
relax Lhe locklng requlremenLs Lo allow mulLlple readers Lo access Lhe daLa sLrucLure aL once. As long as each Lhread ls
guaranLeed an upLodaLe vlew of Lhe daLa and no oLher Lhread modlfles Lhe daLa whlle Lhe readers are vlewlng lL, Lhere
wlll be no problems. 1hls ls whaL readwrlLe locks allow: a resource can be accessed by mulLlple readers or a slngle
wrlLer aL a Llme, buL noL boLh.
N&$;P94.&52*C, shown ln LlsLlng 13.6, exposes Lwo 52*C ob[ecLsone for readlng and one for wrlLlng. 1o read daLa
guarded by a N&$;P94.&52*C you musL flrsL acqulre Lhe read lock, and Lo modlfy daLa guarded by a N&$;P94.&52*C you
musL flrsL acqulre Lhe wrlLe lock. Whlle Lhere may appear Lo be Lwo separaLe locks, Lhe read lock and wrlLe lock are
slmply dlfferenL vlews of an lnLegraLed readwrlLe lock ob[ecL.
2/3./&4 @LAWA N&$;P94.&52*C ,&.*)6"'*A
G)M04* 4".&9%$*& N&$;P94.&52*C W
52*C 9&$;52*CUV[
52*C ?94.&52*CUV[
\


177 78arL lv: Advanced 1oplcs 238ChapLer 13 LxpllclL Locks
1he locklng sLraLegy lmplemenLed by readwrlLe locks allows mulLlple slmulLaneous readers buL only a slngle wrlLer. Llke
52*C, N&$;P94.&52*C admlLs mulLlple lmplemenLaLlons LhaL can vary ln performance, schedullng guaranLees,
acqulslLlon preference, falrness, or locklng semanLlcs.
8eadwrlLe locks are a performance opLlmlzaLlon deslgned Lo allow greaLer concurrency ln cerLaln slLuaLlons. ln pracLlce,
readwrlLe locks can lmprove performance for frequenLly accessed readmosLly daLa sLrucLures on mulLlprocessor
sysLems, under oLher condlLlons Lhey perform sllghLly worse Lhan excluslve locks due Lo Lhelr greaLer complexlLy.
WheLher Lhey are an lmprovemenL ln any glven slLuaLlon ls besL deLermlned vla proflllng, because N&$;P94.&52*C uses
52*C for Lhe read and wrlLe porLlons of Lhe lock, lL ls relaLlvely easy Lo swap ouL a readwrlLe lock for an excluslve one lf
proflllng deLermlnes LhaL a readwrlLe lock ls noL a wln.
1he lnLeracLlon beLween Lhe read and wrlLe locks allows for a number of posslble lmplemenLaLlons. Some of Lhe
lmplemenLaLlon opLlons for a N&$;P94.&52*C are:
8elease preference. When a wrlLer releases Lhe wrlLe lock and boLh readers and wrlLers are queued up, who should be
glven preference readers, wrlLers, or whoever asked flrsL?
8eader barglng. lf Lhe lock ls held by readers buL Lhere are walLlng wrlLers, should newly arrlvlng readers be granLed
lmmedlaLe access, or should Lhey walL behlnd Lhe wrlLers? Allowlng readers Lo barge ahead of wrlLers enhances
concurrency buL runs Lhe rlsk of sLarvlng wrlLers.
8eenLrancy. Are Lhe read and wrlLe locks reenLranL?
uowngradlng. lf a Lhread holds Lhe wrlLe lock, can lL acqulre Lhe read lock wlLhouL releaslng Lhe wrlLe lock? 1hls would
leL a wrlLer "downgrade" Lo a read lock wlLhouL leLLlng oLher wrlLers modlfy Lhe guarded resource ln Lhe meanLlme.
upgradlng. Can a read lock be upgraded Lo a wrlLe lock ln preference Lo oLher walLlng readers or wrlLers? MosL read
wrlLe lock lmplemenLaLlons do noL supporL upgradlng, because wlLhouL an expllclL upgrade operaLlon lL ls deadlock
prone. (lf Lwo readers slmulLaneously aLLempL Lo upgrade Lo a wrlLe lock, nelLher wlll release Lhe read lock.)
N&&".9$".N&$;P94.&52*C provldes reenLranL locklng semanLlcs for boLh locks. Llke N&&".9$".52*C, a
N&&".9$".N&$;P94.&52*C can be consLrucLed as nonfalr (Lhe defaulL) or falr. WlLh a falr lock, preference ls glven Lo Lhe
Lhread LhaL has been walLlng Lhe longesL, lf Lhe lock ls held by readers and a Lhread requesLs Lhe wrlLe lock, no more
readers are allowed Lo acqulre Lhe read lock unLll Lhe wrlLer has been servlced and releases Lhe wrlLe lock. WlLh a non
falr lock, Lhe order ln whlch Lhreads are granLed access ls unspeclfled. uowngradlng from wrlLer Lo reader ls permlLLed,
upgradlng from reader Lo wrlLer ls noL (aLLempLlng Lo do so resulLs ln deadlock).
Llke N&&".9$".52*C, Lhe wrlLe lock ln N&&".9$".N&$;P94.&52*C has a unlque owner and can be released only by Lhe
Lhread LhaL acqulred lL. ln !ava 3.0, Lhe read lock behaves more llke a '&3$G829& Lhan a lock, malnLalnlng only Lhe counL
of acLlve readers, noL Lhelr ldenLlLles. 1hls behavlor was changed ln !ava 6 Lo keep Lrack also of whlch Lhreads have been
granLed Lhe read lock.
[6]

[6] Cne reason for Lhls change ls LhaL under !ava 3.0, Lhe lock lmplemenLaLlon cannoL dlsLlngulsh beLween a Lhread requesLlng Lhe read lock for Lhe
flrsL Llme and a reenLranL lock requesL, whlch would make falr readwrlLe locks deadlockprone.
8eadwrlLe locks can lmprove concurrency when locks are Lyplcally held for a moderaLely long Llme and mosL operaLlons
do noL modlfy Lhe guarded resources. N&$;P94.&F$G ln LlsLlng 13.7 uses a N&&".9$".N&$;P94.&52*C Lo wrap a F$G so
LhaL lL can be shared safely by mulLlple readers and sLlll prevenL readerwrlLer or wrlLerwrlLer confllcLs.
[7]
ln reallLy,
D2"*)99&".E$#8F$G's performance ls so good LhaL you would probably use lL raLher Lhan Lhls approach lf all you
needed was a concurrenL hashbased map, buL Lhls Lechnlque would be useful lf you wanL Lo provlde more concurrenL
access Lo an alLernaLe F$G lmplemenLaLlon such as 54"C&;E$#8F$G.
[7] N&$;P94.&F$G does noL lmplemenL F$G because lmplemenLlng Lhe vlew meLhods such as &".97'&. and /$0)&# would be dlfflculL and
Lhe "easy" meLhods are usually sufflclenL.

178 !ava Concurrency ln racLlce
2/3./&4 @LAZA B)">>/&4 " F$G 8/.: " N*"08)/.* 2%'<A
G)M04* *0$## N&$;P94.&F$GRleBT W
G94/$.& %4"$0 F$GRleBT 3$G[
G94/$.& %4"$0 N&$;P94.&52*C 02*C ] "&? N&&".9$".N&$;P94.&52*CUV[
G94/$.& %4"$0 52*C 9 ] 02*C+9&$;52*CUV[
G94/$.& %4"$0 52*C ? ] 02*C+?94.&52*CUV[

G)M04* N&$;P94.&F$GUF$GRleBT 3$GV W
.84#+3$G ] 3$G[
\

G)M04* B G).Ul C&7e B /$0)&V W
?+02*CUV[
.97 W
9&.)9" 3$G+G).UC&7e /$0)&V[
\ %4"$007 W
?+)"02*CUV[
\
\
XX A2 .8& #$3& %29 9&32/&UVe G).100UVe *0&$9UV

G)M04* B 6&.UJM`&*. C&7V W
9+02*CUV[
.97 W
9&.)9" 3$G+6&.UC&7V[
\ %4"$007 W
9+)"02*CUV[
\
\
XX A2 .8& #$3& %29 2.8&9 9&$;a2"07 F$G 3&.82;#
\
llgure 13.3 shows a LhroughpuL comparlson beLween an 199$754#. wrapped wlLh a N&&".9$".52*C and wlLh a
N&$;P94.&52*C on a fourway CpLeron sysLem runnlng Solarls. 1he LesL program used here ls slmllar Lo Lhe F$G
performance LesL we've been uslng LhroughouL Lhe book each operaLlon randomly selecLs a value and searches for lL ln
Lhe collecLlon, and a small percenLage of operaLlons modlfy Lhe conLenLs of Lhe collecLlon.
S/4()* @LALA N*"08)/.* 2%'< -*)6%)5"&'*A

C(55")+
LxpllclL 52*Cs offer an exLended feaLure seL compared Lo lnLrlnslc locklng, lncludlng greaLer flexlblllLy ln deallng wlLh
lock unavallablllLy and greaLer conLrol over queulng behavlor. 8uL N&&".9$".52*C ls noL a blankeL subsLlLuLe for
#7"*892"4:&;, use lL only when you need feaLures LhaL #7"*892"4:&; lacks.
8eadwrlLe locks allow mulLlple readers Lo access a guarded ob[ecL concurrenLly, offerlng Lhe poLenLlal for lmproved
scalablllLy when accesslng readmosLly daLa sLrucLures.

179 78arL lv: Advanced 1oplcs 268ChapLer 14 8ulldlng CusLom Synchronlzers
$:">.*) @M ;(/?0/&4 $(3.%5 C+&':)%&/O*)3
1he class llbrarles lnclude a number of sLaLedependenL classes Lhose havlng operaLlons wlLh sLaLebased
precondlLlons such as <).)9&=$#C, '&3$G829&, and K02*C4"6O)&)&. lor example, you cannoL remove an lLem from an
empLy queue or reLrleve Lhe resulL of a Lask LhaL has noL yeL flnlshed, before Lhese operaLlons can proceed, you musL
walL unLll Lhe queue enLers Lhe "nonempLy" sLaLe or Lhe Lask enLers Lhe "compleLed" sLaLe.
1he easlesL way Lo consLrucL a sLaLedependenL class ls usually Lo bulld on Lop of an exlsLlng sLaLedependenL llbrary
class, we dld Lhls ln B$0)&5$.*8 on page 187, uslng a D2)".A2?"5$.*8 Lo provlde Lhe requlred blocklng behavlor. 8uL lf
Lhe llbrary classes do noL provlde Lhe funcLlonallLy you need, you can also bulld your own synchronlzers uslng Lhe low
level mechanlsms provlded by Lhe language and llbrarles, lncludlng lnLrlnslc condlLlon queues, expllclL D2";4.42"
ob[ecLs, and Lhe 1M#.9$*.O)&)&;'7"*892"4:&9 framework. 1hls chapLer explores Lhe varlous opLlons for lmplemenLlng
sLaLe dependence and Lhe rules for uslng Lhe sLaLe dependence mechanlsms provlded by Lhe plaLform.
@MA@A K"&"4/&4 C.".* [*>*&0*&'*
ln a slngleLhreaded program, lf a sLaLebased precondlLlon (llke "Lhe connecLlon pool ls nonempLy") does noL hold when
a meLhod ls called, lL wlll never become Lrue. 1herefore, classes ln sequenLlal programs can be coded Lo fall when Lhelr
precondlLlons do noL hold. 8uL ln a concurrenL program, sLaLebased condlLlons can change Lhrough Lhe acLlons of oLher
Lhreads: a pool LhaL was empLy a few lnsLrucLlons ago can become nonempLy because anoLher Lhread reLurned an
elemenL. SLaLedependenL meLhods on concurrenL ob[ecLs can someLlmes geL away wlLh falllng when Lhelr
precondlLlons are noL meL, buL Lhere ls ofLen a beLLer alLernaLlve: walL for Lhe precondlLlon Lo become Lrue.
SLaLedependenL operaLlons LhaL block unLll Lhe operaLlon can proceed are more convenlenL and less errorprone Lhan
Lhose LhaL slmply fall. 1he bullLln condlLlon queue mechanlsm enables Lhreads Lo block unLll an ob[ecL has enLered a
sLaLe LhaL allows progress and Lo wake blocked Lhreads when Lhey may be able Lo make furLher progress. We cover Lhe
deLalls of condlLlon queues ln SecLlon 14.2, buL Lo moLlvaLe Lhe value of an efflclenL condlLlon walL mechanlsm, we flrsL
show how sLaLe dependence mlghL be (palnfully) Lackled uslng polllng and sleeplng.
A blocklng sLaLedependenL acLlon Lakes Lhe form shown ln LlsLlng 14.1. 1he paLLern of locklng ls somewhaL unusual ln
LhaL Lhe lock ls released and reacqulred ln Lhe mlddle of Lhe operaLlon. 1he sLaLe varlables LhaL make up Lhe
precondlLlon musL be guarded by Lhe ob[ecL's lock, so LhaL Lhey can remaln consLanL whlle Lhe precondlLlon ls LesLed.
8uL lf Lhe precondlLlon does noL hold, Lhe lock musL be released so anoLher Lhread can modlfy Lhe ob[ecL sLaLeoLherwlse
Lhe precondlLlon wlll never become Lrue. 1he lock musL Lhen be reacqulred before LesLlng Lhe precondlLlon agaln.
2/3./&4 @MA@A C.)('.()* %6 ;?%'</&4 C.".*0*>*&0*&. F'./%&3A
/24; M02*C4"61*.42"UV .892?# I".&99)G.&;L-*&G.42" W
$*()49& 02*C 2" 2M`&*. #.$.&
?840& UG9&*2";4.42" ;2&# "2. 820;V W
9&0&$#& 02*C
?$4. )".40 G9&*2";4.42" 3468. 820;
2G.42"$007 %$40 4% 4".&99)G.&; 29 .43&2). &-G49&#
9&$*()49& 02*C
\
G&9%293 $*.42"
\
8ounded buffers such as 199$7K02*C4"6O)&)& are commonly used ln producerconsumer deslgns. A bounded buffer
provldes puL and Lake operaLlons, each of whlch has precondlLlons: you cannoL Lake an elemenL from an empLy buffer,
nor puL an elemenL lnLo a full buffer. SLaLe dependenL operaLlons can deal wlLh precondlLlon fallure by Lhrowlng an
excepLlon or reLurnlng an error sLaLus (maklng lL Lhe caller's problem), or by blocklng unLll Lhe ob[ecL LranslLlons Lo Lhe
rlghL sLaLe.
We're golng Lo develop several lmplemenLaLlons of a bounded buffer LhaL Lake dlfferenL approaches Lo handllng
precondlLlon fallure. Lach exLends K$#&K2)";&;K)%%&9 ln LlsLlng 14.2, whlch lmplemenLs a classlc arraybased clrcular
buffer where Lhe buffer sLaLe varlables (M)%, 8&$;, .$40, and *2)".) are guarded by Lhe buffer's lnLrlnslc lock. lL
provldes synchronlzed ;2@). and ;2=$C& meLhods LhaL are used by subclasses Lo lmplemenL Lhe G). and .$C&
operaLlons, Lhe underlylng sLaLe ls hldden from Lhe subclasses.
@MA@A@A =1"5>?*T -)%>"4"./&4 -)*'%&0/./%& S"/?()* .% $"??*)3
H9)3G7K2)";&;K)%%&9 ln LlsLlng 14.3 ls a crude flrsL aLLempL aL lmplemenLlng a bounded buffer. 1he G). and .$C&
meLhods are #7"*892"4:&; Lo ensure excluslve access Lo Lhe buffer sLaLe, slnce boLh employ checkLhenacL loglc ln
accesslng Lhe buffer.

180 !ava Concurrency ln racLlce
Whlle Lhls approach ls easy enough Lo lmplemenL, lL ls annoylng Lo use. LxcepLlons are supposed Lo be for excepLlonal
condlLlons [L! lLem 39]. "8uffer ls full" ls noL an excepLlonal condlLlon for a bounded buffer any more Lhan "red" ls an
excepLlonal condlLlon for a Lrafflc slgnal. 1he slmpllflcaLlon ln lmplemenLlng Lhe buffer (forclng Lhe caller Lo manage Lhe
sLaLe dependence) ls more Lhan made up for by Lhe subsLanLlal compllcaLlon ln uslng lL, slnce now Lhe caller musL be
prepared Lo caLch excepLlons and posslbly reLry for every buffer operaLlon.
[1]
A wellsLrucLured call Lo .$C& ls shown ln
LlsLlng 14.4noL very preLLy, especlally lf G). and .$C& are called LhroughouL Lhe program.
[1] ushlng Lhe sLaLe dependence back Lo Lhe caller also makes lL nearly lmposslble Lo do Lhlngs llke preserve lllC orderlng, by forclng Lhe caller Lo
reLry, you lose Lhe lnformaLlon of who arrlved flrsL.
2/3./&4 @MADA ;"3* $?"33 6%) ;%(&0*0 ;(66*) ,5>?*5*&."./%&3A
b=89&$;'$%&
G)M04* $M#.9$*. *0$## K$#&K2)";&;K)%%&9RBT W
bH)$9;&;K7Ud.84#dV G94/$.& %4"$0 Bfg M)%[
bH)$9;&;K7Ud.84#dV G94/$.& 4". .$40[
bH)$9;&;K7Ud.84#dV G94/$.& 4". 8&$;[
bH)$9;&;K7Ud.84#dV G94/$.& 4". *2)".[

G92.&*.&; K$#&K2)";&;K)%%&9U4". *$G$*4.7V W
.84#+M)% ] UBfgV "&? JM`&*.f*$G$*4.7g[
\

G92.&*.&; #7"*892"4:&; %4"$0 /24; ;2@).UB /V W
M)%f.$40g ] /[
4% U__.$40 ]] M)%+0&"6.8V
.$40 ] Z[
__*2)".[
\

G92.&*.&; #7"*892"4:&; %4"$0 B ;2=$C&UV W
B / ] M)%f8&$;g[
M)%f8&$;g ] ")00[
4% U__8&$; ]] M)%+0&"6.8V
8&$; ] Z[
aa*2)".[
9&.)9" /[
\

G)M04* #7"*892"4:&; %4"$0 M220&$" 4#<)00UV W
9&.)9" *2)". ]] M)%+0&"6.8[
\

G)M04* #7"*892"4:&; %4"$0 M220&$" 4#L3G.7UV W
9&.)9" *2)". ]] Z[
\
\
2/3./&4 @MALA ;%(&0*0 ;(66*) .:". ;"?<3 B:*& -)*'%&0/./%&3 ")* P%. K*.A

b=89&$;'$%&
G)M04* *0$## H9)3G7K2)";&;K)%%&9RBT &-.&";# K$#&K2)";&;K)%%&9RBT W
G)M04* H9)3G7K2)";&;K)%%&9U4". #4:&V W #)G&9U#4:&V[ \

G)M04* #7"*892"4:&; /24; G).UB /V .892?# K)%%&9<)00L-*&G.42" W
4% U4#<)00UVV
.892? "&? K)%%&9<)00L-*&G.42"UV[
;2@).U/V[
\

G)M04* #7"*892"4:&; B .$C&UV .892?# K)%%&9L3G.7L-*&G.42" W
4% U4#L3G.7UVV
.892? "&? K)%%&9L3G.7L-*&G.42"UV[
9&.)9" ;2=$C&UV[
\
\


181 78arL lv: Advanced 1oplcs 268ChapLer 14 8ulldlng CusLom Synchronlzers
2/3./&4 @MAMA $?/*&. 2%4/' 6%) $"??/&4 H9)3G7K2)";&;K)%%&9A
?840& U.9)&V W
.97 W
B 4.&3 ] M)%%&9+.$C&UV[
XX )#& 4.&3
M9&$C[
\ *$.*8 UK)%%&9L3G.7L-*&G.42" &V W
=89&$;+#0&&GU'5LL@kHN1,!51NI=rV[
\
\
A varlanL of Lhls approach ls Lo reLurn an error value when Lhe buffer ls ln Lhe wrong sLaLe. 1hls ls a mlnor lmprovemenL
ln LhaL lL doesn'L abuse Lhe excepLlon mechanlsm by Lhrowlng an excepLlon LhaL really means "sorry, Lry agaln", buL lL
does noL address Lhe fundamenLal problem: LhaL callers musL deal wlLh precondlLlon fallures Lhemselves.
[2]

[2] O)&)& offers boLh of Lhese opLlons G200 reLurns ")00 lf Lhe queue ls empLy, and 9&32/& Lhrows an excepLlon buL O)&)& ls noL lnLended
for use ln producerconsumer deslgns. K02*C4"6O)&)&, whose operaLlons block unLll Lhe queue ls ln Lhe rlghL sLaLe Lo proceed, ls a beLLer
cholce when producers and consumers wlll execuLe concurrenLly.
1he cllenL code ln LlsLlng 14.4 ls noL Lhe only way Lo lmplemenL Lhe reLry loglc. 1he caller could reLry Lhe .$C&
lmmedlaLely, wlLhouL sleeplng an approach known as busy walLlng or spln walLlng. 1hls could consume qulLe a loL of
Cu Llme lf Lhe buffer sLaLe does noL change for a whlle. Cn Lhe oLher hand, lf Lhe caller decldes Lo sleep so as noL Lo
consume so much Cu Llme, lL could easlly "oversleep" lf Lhe buffer sLaLe changes shorLly afLer Lhe call Lo #0&&G. So Lhe
cllenL code ls lefL wlLh Lhe cholce beLween Lhe poor Cu usage of splnnlng and Lhe poor responslveness of sleeplng.
(Somewhere beLween busy walLlng and sleeplng would be calllng =89&$;+74&0; ln each lLeraLlon, whlch ls a hlnL Lo Lhe
scheduler LhaL Lhls would be a reasonable Llme Lo leL anoLher Lhread run. lf you are walLlng for anoLher Lhread Lo do
someLhlng, LhaL someLhlng mlghL happen fasLer lf you yleld Lhe processor raLher Lhan consumlng your full schedullng
quanLum.)
@MA@ADA =1"5>?*T $)(0* ;?%'</&4 Y+ -%??/&4 "&0 C?**>/&4
'0&&G7K2)";&;K)%%&9 ln LlsLlng 14.3 aLLempLs Lo spare callers Lhe lnconvenlence of lmplemenLlng Lhe reLry loglc on
each call by encapsulaLlng Lhe same crude "poll and sleep" reLry mechanlsm wlLhln Lhe G). and .$C& operaLlons. lf Lhe
buffer ls empLy, .$C& sleeps unLll anoLher Lhread puLs some daLa lnLo Lhe buffer, lf Lhe buffer ls full, G). sleeps unLll
anoLher Lhread makes room by removlng some daLa. 1hls approach encapsulaLes precondlLlon managemenL and
slmpllfles uslng Lhe buffer deflnlLely a sLep ln Lhe rlghL dlrecLlon.
1he lmplemenLaLlon of '0&&G7K2)";&;K)%%&9 ls more compllcaLed Lhan Lhe prevlous aLLempL.
[3]
1he buffer code musL
LesL Lhe approprlaLe sLaLe condlLlon wlLh Lhe buffer lock held, because Lhe varlables LhaL represenL Lhe sLaLe condlLlon
are guarded by Lhe buffer lock. lf Lhe LesL falls, Lhe execuLlng Lhread sleeps for a whlle, flrsL releaslng Lhe lock so oLher
Lhreads can access Lhe buffer.
[4]
Cnce Lhe Lhread wakes up, lL reacqulres Lhe lock and Lrles agaln, alLernaLlng beLween
sleeplng and LesLlng Lhe sLaLe condlLlon unLll Lhe operaLlon can proceed.
[3] We wlll spare you Lhe deLalls of Snow WhlLe's oLher flve bounded buffer lmplemenLaLlons, especlally '"&&:7K2)";&;K)%%&9.
[4] lL ls usually a bad ldea for a Lhread Lo go Lo sleep or oLherwlse block wlLh a lock held, buL ln Lhls case ls even worse because Lhe deslred
condlLlon (buffer ls full/empLy) can never become Lrue lf Lhe lock ls noL released!
lrom Lhe perspecLlve of Lhe caller, Lhls works nlcely lf Lhe operaLlon can proceed lmmedlaLely, lL does, and oLherwlse lL
blocks and Lhe caller need noL deal wlLh Lhe mechanlcs of fallure and reLry. Chooslng Lhe sleep granularlLy ls a Lradeoff
beLween responslveness and Cu usage, Lhe smaller Lhe sleep granularlLy, Lhe more responslve, buL also Lhe more Cu
resources consumed. llgure 14.1 shows how sleep granularlLy can affecL responslveness: Lhere may be a delay beLween
when buffer space becomes avallable and when Lhe Lhread wakes up and checks agaln.
S/4()* @MA@A J:)*"0 E#*)3?**>/&4 ;*'"(3* .:* $%&0/./%& ;*'"5* J)(* !(3. F6.*) ,. B*&. .% C?**>A



182 !ava Concurrency ln racLlce
2/3./&4 @MAVA ;%(&0*0 ;(66*) 93/&4 $)(0* ;?%'</&4A

b=89&$;'$%&
G)M04* *0$## '0&&G7K2)";&;K)%%&9RBT &-.&";# K$#&K2)";&;K)%%&9RBT W
G)M04* '0&&G7K2)";&;K)%%&9U4". #4:&V W #)G&9U#4:&V[ \

G)M04* /24; G).UB /V .892?# I".&99)G.&;L-*&G.42" W
?840& U.9)&V W
#7"*892"4:&; U.84#V W
4% UY4#<)00UVV W
;2@).U/V[
9&.)9"[
\
\
=89&$;+#0&&GU'5LL@kHN1,!51NI=rV[
\
\

G)M04* B .$C&UV .892?# I".&99)G.&;L-*&G.42" W
?840& U.9)&V W
#7"*892"4:&; U.84#V W
4% UY4#L3G.7UVV
9&.)9" ;2=$C&UV[
\
=89&$;+#0&&GU'5LL@kHN1,!51NI=rV[
\
\
\
'0&&G7K2)";&;K)%%&9 also creaLes anoLher requlremenL for Lhe caller deallng wlLh I".&99)G.&;L-*&G.42". When a
meLhod blocks walLlng for a condlLlon Lo become Lrue, Lhe pollLe Lhlng Lo do ls Lo provlde a cancellaLlon mechanlsm (see
ChapLer 7). Llke mosL wellbehaved blocklng llbrary meLhods, '0&&G7K2)";&;aK)%%&9 supporLs cancellaLlon Lhrough
lnLerrupLlon, reLurnlng early and Lhrowlng I".&99)G.&;L-*&G.42" lf lnLerrupLed.
1hese aLLempLs Lo synLheslze a blocklng operaLlon from polllng and sleeplng were falrly palnful. lL would be nlce Lo have
a way of suspendlng a Lhread buL ensurlng LhaL lL ls awakened prompLly when a cerLaln condlLlon (such as Lhe buffer
belng no longer full) becomes Lrue. 1hls ls exacLly whaL condlLlon queues do.
@MA@ALA $%&0/./%& a(*(*3 .% .:* N*3'(*
CondlLlon queues are llke Lhe "LoasL ls ready" bell on your LoasLer. lf you are llsLenlng for lL, you are noLlfled prompLly
when your LoasL ls ready and can drop whaL you are dolng (or noL, maybe you wanL Lo flnlsh Lhe newspaper flrsL) and
geL your LoasL. lf you are noL llsLenlng for lL (perhaps you wenL ouLslde Lo geL Lhe newspaper), you could mlss Lhe
noLlflcaLlon, buL on reLurn Lo Lhe klLchen you can observe Lhe sLaLe of Lhe LoasLer and elLher reLrleve Lhe LoasL lf lL ls
flnlshed or sLarL llsLenlng for Lhe bell agaln lf lL ls noL.
A condlLlon queue geLs lLs name because lL glves a group of Lhreads called Lhe walL seL a way Lo walL for a speclflc
condlLlon Lo become Lrue. unllke Lyplcal queues ln whlch Lhe elemenLs are daLa lLems, Lhe elemenLs of a condlLlon
queue are Lhe Lhreads walLlng for Lhe condlLlon.
!usL as each !ava ob[ecL can acL as a lock, each ob[ecL can also acL as a condlLlon queue, and Lhe ?$4., "2.4%7, and
"2.4%7100 meLhods ln JM`&*. consLlLuLe Lhe Al for lnLrlnslc condlLlon queues. An ob[ecL's lnLrlnslc lock and lLs lnLrlnslc
condlLlon queue are relaLed: ln order Lo call any of Lhe condlLlon queue meLhods on ob[ecL x, you musL hold Lhe lock on
x. 1hls ls because Lhe mechanlsm for walLlng for sLaLebased condlLlons ls necessarlly LlghLly bound Lo Lhe mechanlsm
for preservlng sLaLe conslsLency: you cannoL walL for a condlLlon unless you can examlne Lhe sLaLe, and you cannoL
release anoLher Lhread from a condlLlon walL unless you can modlfy Lhe sLaLe.
JM`&*.+?$4. aLomlcally releases Lhe lock and asks Lhe CS Lo suspend Lhe currenL Lhread, allowlng oLher Lhreads Lo
acqulre Lhe lock and Lherefore modlfy Lhe ob[ecL sLaLe. upon waklng, lL reacqulres Lhe lock before reLurnlng. lnLulLlvely,
calllng ?$4. means "l wanL Lo go Lo sleep, buL wake me when someLhlng lnLeresLlng happens", and calllng Lhe
noLlflcaLlon meLhods means "someLhlng lnLeresLlng happened".
K2)";&;K)%%&9 ln LlsLlng 14.6 lmplemenLs a bounded buffer uslng ?$4. and "2.4%7100. 1hls ls slmpler Lhan Lhe
sleeplng verslon, and ls boLh more efflclenL (waklng up less frequenLly lf Lhe buffer sLaLe does noL change) and more
responslve (waklng up prompLly when an lnLeresLlng sLaLe change happens). 1hls ls a blg lmprovemenL, buL noLe LhaL
Lhe lnLroducLlon of condlLlon queues dldn'L change Lhe semanLlcs compared Lo Lhe sleeplng verslon. lL ls slmply an
opLlmlzaLlon ln several dlmenslons: Cu efflclency, conLexLswlLch overhead, and responslveness. CondlLlon queues

183 78arL lv: Advanced 1oplcs 268ChapLer 14 8ulldlng CusLom Synchronlzers
don'L leL you do anyLhlng you can'L do wlLh sleeplng and polllng
[3]
, buL Lhey make lL a loL easler and more efflclenL Lo
express and manage sLaLe dependence.
[3] 1hls ls noL qulLe Lrue, a falr condlLlon queue can guaranLee Lhe relaLlve order ln whlch Lhreads are released from Lhe walL seL. lnLrlnslc condlLlon
queues, llke lnLrlnslc locks, do noL offer falr queulng, expllclL D2";4.42"s offer a cholce of falr or nonfalr queulng.
2/3./&4 @MAWA ;%(&0*0 ;(66*) 93/&4 $%&0/./%& a(*(*3A
b=89&$;'$%&
G)M04* *0$## K2)";&;K)%%&9RBT &-.&";# K$#&K2)";&;K)%%&9RBT W
XX DJ,AI=IJ, @NLAID1=Lh "2.a%)00 UY4#<)00UVV
XX DJ,AI=IJ, @NLAID1=Lh "2.a&3G.7 UY4#L3G.7UVV

G)M04* K2)";&;K)%%&9U4". #4:&V W #)G&9U#4:&V[ \

XX K5JDl'a!,=I5h "2.a%)00
G)M04* #7"*892"4:&; /24; G).UB /V .892?# I".&99)G.&;L-*&G.42" W
?840& U4#<)00UVV
?$4.UV[
;2@).U/V[
"2.4%7100UV[
\

XX K5JDl'a!,=I5h "2.a&3G.7
G)M04* #7"*892"4:&; B .$C&UV .892?# I".&99)G.&;L-*&G.42" W
?840& U4#L3G.7UVV
?$4.UV[
B / ] ;2=$C&UV[
"2.4%7100UV[
9&.)9" /[
\
\
K2)";&;K)%%&9 ls flnally good enough Lo use lL ls easy Lo use and manages sLaLe dependence senslbly.
[6]
A producLlon
verslon should also lnclude Llmed verslons of G). and .$C&, so LhaL blocklng operaLlons can Llme ouL lf Lhey cannoL
compleLe wlLhln a Llme budgeL. 1he Llmed verslon of JM`&*.+?$4. makes Lhls easy Lo lmplemenL.
[6] D2";4.42"K2)";&;K)%%&9 ln SecLlon 14.3 ls even beLLer: lL ls more efflclenL because lL can use slngle noLlflcaLlon lnsLead of "2.4%7100.
@MADA 93/&4 $%&0/./%& a(*(*3
CondlLlon queues make lL easler Lo bulld efflclenL and responslve sLaLedependenL classes, buL Lhey are sLlll easy Lo use
lncorrecLly, Lhere are a loL of rules regardlng Lhelr proper use LhaL are noL enforced by Lhe compller or plaLform. (1hls ls
one of Lhe reasons Lo bulld on Lop of classes llke 54"C&;K02*C4"6O)&)&, D2)".A2?"a5$.*8, '&3$G829&, and
<).)9&=$#C when you can, lf you can geL away wlLh lL, lL ls a loL easler.)
@MADA@A J:* $%&0/./%& -)*0/'".*
1he key Lo uslng condlLlon queues correcLly ls ldenLlfylng Lhe condlLlon predlcaLes LhaL Lhe ob[ecL may walL for. lL ls Lhe
condlLlon predlcaLe LhaL causes much of Lhe confuslon surroundlng ?$4. and "2.4%7, because lL has no lnsLanLlaLlon ln
Lhe Al and noLhlng ln elLher Lhe language speclflcaLlon or Lhe !vM lmplemenLaLlon ensures lLs correcL use. ln facL, lL ls
noL menLloned dlrecLly aL all ln Lhe language speclflcaLlon or Lhe !avadoc. 8uL wlLhouL lL, condlLlon walLs would noL
work.
1he condlLlon predlcaLe ls Lhe precondlLlon LhaL makes an operaLlon sLaLedependenL ln Lhe flrsL place. ln a bounded
buffer, .$C& can proceed only lf Lhe buffer ls noL empLy, oLherwlse lL musL walL. lor .$C&, Lhe condlLlon predlcaLe ls "Lhe
buffer ls noL empLy", whlch .$C& musL LesL for before proceedlng. Slmllarly, Lhe condlLlon predlcaLe for G). ls "Lhe
buffer ls noL full". CondlLlon predlcaLes are expresslons consLrucLed from Lhe sLaLe varlables of Lhe class,
K$#&K2)";&;K)%%&9 LesLs for "buffer noL empLy" by comparlng *2)". Lo zero, and LesLs for "buffer noL full" by
comparlng *2)". Lo Lhe buffer slze.
uocumenL Lhe condlLlon predlcaLe(s) assoclaLed wlLh a condlLlon queue and Lhe operaLlons LhaL walL on Lhem.
1here ls an lmporLanL Lhreeway relaLlonshlp ln a condlLlon walL lnvolvlng locklng, Lhe ?$4. meLhod, and a condlLlon
predlcaLe. 1he condlLlon predlcaLe lnvolves sLaLe varlables, and Lhe sLaLe varlables are guarded by a lock, so before
LesLlng Lhe condlLlon predlcaLe, we musL hold LhaL lock. 1he lock ob[ecL and Lhe condlLlon queue ob[ecL (Lhe ob[ecL on
whlch ?$4. and "2.4%7 are lnvoked) musL also be Lhe same ob[ecL.
ln K2)";&;K)%%&9, Lhe buffer sLaLe ls guarded by Lhe buffer lock and Lhe buffer ob[ecL ls used as Lhe condlLlon queue.
1he .$C& meLhod acqulres Lhe buffer lock and Lhen LesLs Lhe condlLlon predlcaLe (LhaL Lhe buffer ls nonempLy). lf Lhe

184 !ava Concurrency ln racLlce
buffer ls lndeed nonempLy, lL removes Lhe flrsL elemenL, whlch lL can do because lL sLlll holds Lhe lock guardlng Lhe
buffer sLaLe.
lf Lhe condlLlon predlcaLe ls noL Lrue (Lhe buffer ls empLy), .$C& musL walL unLll anoLher Lhread puLs an ob[ecL ln Lhe
buffer. lL does Lhls by calllng ?$4. on Lhe buffer's lnLrlnslc condlLlon queue, whlch requlres holdlng Lhe lock on Lhe
condlLlon queue ob[ecL. As careful deslgn would have lL, .$C& already holds LhaL lock, whlch lL needed Lo LesL Lhe
condlLlon predlcaLe (and lf Lhe condlLlon predlcaLe was Lrue, Lo modlfy Lhe buffer sLaLe ln Lhe same aLomlc operaLlon).
1he ?$4. meLhod releases Lhe lock, blocks Lhe currenL Lhread, and walLs unLll Lhe speclfled LlmeouL explres, Lhe Lhread
ls lnLerrupLed, or Lhe Lhread ls awakened by a noLlflcaLlon. AfLer Lhe Lhread wakes up, ?$4. reacqulres Lhe lock before
reLurnlng. A Lhread waklng up from ?$4. geLs no speclal prlorlLy ln reacqulrlng Lhe lock, lL conLends for Lhe lock [usL llke
any oLher Lhread aLLempLlng Lo enLer a #7"*892"4:&; block.
Lvery call Lo ?$4. ls lmpllclLly assoclaLed wlLh a speclflc condlLlon predlcaLe. When calllng ?$4. regardlng a parLlcular
condlLlon predlcaLe, Lhe caller musL already hold Lhe lock assoclaLed wlLh Lhe condlLlon queue, and LhaL lock musL also
guard Lhe sLaLe varlables from whlch Lhe condlLlon predlcaLe ls composed.
@MADADA B"</&4 9> J%% C%%&
As lf Lhe Lhreeway relaLlonshlp among Lhe lock, Lhe condlLlon predlcaLe, and Lhe condlLlon queue were noL compllcaLed
enough, LhaL ?$4. reLurns does noL necessarlly mean LhaL Lhe condlLlon predlcaLe Lhe Lhread ls walLlng for has become
Lrue.
A slngle lnLrlnslc condlLlon queue may be used wlLh more Lhan one condlLlon predlcaLe. When your Lhread ls awakened
because someone called "2.4%7100, LhaL doesn'L mean LhaL Lhe condlLlon predlcaLe you were walLlng for ls now Lrue.
(1hls ls llke havlng your LoasLer and coffee maker share a slngle bell, when lL rlngs, you sLlll have Lo look Lo see whlch
devlce ralsed Lhe slgnal.)
[7]
AddlLlonally, ?$4. ls even allowed Lo reLurn "spurlously" noL ln response Lo any Lhread calllng
"2.4%7.
[8]

[7] 1hls slLuaLlon acLually descrlbes 1lm's klLchen preLLy well, so many devlces beep LhaL when you hear one, you have Lo lnspecL Lhe LoasLer, Lhe
mlcrowave, Lhe coffee maker, and several oLhers Lo deLermlne Lhe cause of Lhe slgnal.
[8] 1o push Lhe breakfasL analogy way Loo far, Lhls ls llke a LoasLer wlLh a loose connecLlon LhaL makes Lhe bell go off when Lhe LoasL ls ready buL
also someLlmes when lL ls noL ready.
When conLrol reenLers Lhe code calllng ?$4., lL has reacqulred Lhe lock assoclaLed wlLh Lhe condlLlon queue. ls Lhe
condlLlon predlcaLe now Lrue? Maybe. lL mlghL have been Lrue aL Lhe Llme Lhe noLlfylng Lhread called "2.4%7100, buL
could have become false agaln by Lhe Llme you reacqulre Lhe lock. CLher Lhreads may have acqulred Lhe lock and
changed Lhe ob[ecL's sLaLe beLween when your Lhread was awakened and when ?$4. reacqulred Lhe lock. Cr maybe lL
hasn'L been Lrue aL all slnce you called ?$4.. ?ou don'L know why anoLher Lhread called "2.4%7 or "2.4%7100, maybe lL
was because anoLher condlLlon predlcaLe assoclaLed wlLh Lhe same condlLlon queue became Lrue. MulLlple condlLlon
predlcaLes per condlLlon queue are qulLe common K2)";&;K)%%&9 uses Lhe same condlLlon queue for boLh Lhe "noL
full" and "noL empLy" predlcaLes.
[9]

[9] lL ls acLually posslble for Lhreads Lo be walLlng for boLh "noL full" and "noL empLy" aL Lhe same Llme! 1hls can happen when Lhe number of
producers/consumers exceeds Lhe buffer capaclLy.
lor all Lhese reasons, when you wake up from ?$4. you musL LesL Lhe condlLlon predlcaLe agaln, and go back Lo walLlng
(or fall) lf lL ls noL yeL Lrue. Slnce you can wake up repeaLedly wlLhouL your condlLlon predlcaLe belng Lrue, you musL
Lherefore always call ?$4. from wlLhln a loop, LesLlng Lhe condlLlon predlcaLe ln each lLeraLlon. 1he canonlcal form for a
condlLlon walL ls shown ln LlsLlng 14.7.
2/3./&4 @MAZA $"&%&/'"? S%)5 6%) C.".*0*>*&0*&. K*.:%03A
/24; #.$.&A&G&";&".F&.82;UV .892?# I".&99)G.&;L-*&G.42" W
XX *2";4.42" G9&;4*$.& 3)#. M& 6)$9;&; M7 02*C
#7"*892"4:&;U02*CV W
?840& UY*2";4.42"@9&;4*$.&UVV
02*C+?$4.UV[
XX 2M`&*. 4# "2? 4" ;&#49&; #.$.&
\
\




183 78arL lv: Advanced 1oplcs 268ChapLer 14 8ulldlng CusLom Synchronlzers
When uslng condlLlon walLs (JM`&*.+?$4. or D2";4.42"+$?$4.):
Always have a condlLlon predlcaLesome LesL of ob[ecL sLaLe LhaL musL hold before proceedlng,
Always LesL Lhe condlLlon predlcaLe before calllng ?$4., and agaln afLer reLurnlng from ?$4.,
Always call ?$4. ln a loop,
Lnsure LhaL Lhe sLaLe varlables maklng up Lhe condlLlon predlcaLe are guarded by Lhe lock assoclaLed wlLh Lhe
condlLlon queue,
Pold Lhe lock assoclaLed wlLh Lhe Lhe condlLlon queue when calllng ?$4., "2.4%7, or "2.4%7100, and
uo noL release Lhe lock afLer checklng Lhe condlLlon predlcaLe buL before acLlng on lL.
@MADALA K/33*0 C/4&"?3
ChapLer 10 dlscussed llveness fallures such as deadlock and llvelock. AnoLher form of llveness fallure ls mlssed slgnals. A
mlssed slgnal occurs when a Lhread musL walL for a speclflc condlLlon LhaL ls already Lrue, buL falls Lo check Lhe
condlLlon predlcaLe before walLlng. now Lhe Lhread ls walLlng Lo be noLlfled of an evenL LhaL has already occurred. 1hls
ls llke sLarLlng Lhe LoasL, golng ouL Lo geL Lhe newspaper, havlng Lhe bell go off whlle you are ouLslde, and Lhen slLLlng
down aL Lhe klLchen Lable walLlng for Lhe LoasL bell. ?ou could walL a long LlmepoLenLlally forever.
[10]
unllke Lhe
marmalade for your LoasL, noLlflcaLlon ls noL "sLlcky"lf Lhread A noLlfles on a condlLlon queue and Lhread 8 subsequenLly
walLs on LhaL same condlLlon queue, 8 does noL lmmedlaLely wake upanoLher noLlflcaLlon ls requlred Lo wake 8. Mlssed
slgnals are Lhe resulL of codlng errors llke Lhose warned agalnsL ln Lhe llsL above, such as falllng Lo LesL Lhe condlLlon
predlcaLe before calllng ?$4.. lf you sLrucLure your condlLlon walLs as ln LlsLlng 14.7, you wlll noL have problems wlLh
mlssed slgnals.
[10] ln order Lo emerge from Lhls walL, someone else would have Lo make LoasL, buL Lhls wlll [usL make maLLers worse, when Lhe bell rlngs, you wlll
Lhen have a dlsagreemenL abouL LoasL ownershlp.
@MADAMA P%./6/'"./%&
So far, we've descrlbed half of whaL goes on ln a condlLlon walL: walLlng. 1he oLher half ls noLlflcaLlon. ln a bounded
buffer, .$C& blocks lf called when Lhe buffer ls empLy. ln order for .$C& Lo unblock when Lhe buffer becomes nonempLy,
we musL ensure LhaL every code paLh ln whlch Lhe buffer could become nonempLy performs a noLlflcaLlon. ln
K2)";&;K)%%&9, Lhere ls only one such placeafLer a G).. So G). calls "2.4%7100 afLer successfully addlng an ob[ecL Lo
Lhe buffer. Slmllarly, .$C& calls "2.4%7100 afLer removlng an elemenL Lo lndlcaLe LhaL Lhe buffer may no longer be full,
ln case any Lhreads are walLlng on Lhe "noL full" condlLlon.
Whenever you walL on a condlLlon, make sure LhaL someone wlll perform a noLlflcaLlon whenever Lhe condlLlon
predlcaLe becomes Lrue.
1here are Lwo noLlflcaLlon meLhods ln Lhe condlLlon queue Al"2.4%7 and "2.4%7100. 1o call elLher, you musL hold Lhe
lock assoclaLed wlLh Lhe condlLlon queue ob[ecL. Calllng "2.4%7 causes Lhe !vM Lo selecL one Lhread walLlng on LhaL
condlLlon queue Lo wake up, calllng "2.4%7100 wakes up all Lhe Lhreads walLlng on LhaL condlLlon queue. 8ecause you
musL hold Lhe lock on Lhe condlLlon queue ob[ecL when calllng "2.4%7 or "2.4%7100, and walLlng Lhreads cannoL reLurn
from ?$4. wlLhouL reacqulrlng Lhe lock, Lhe noLlfylng Lhread should release Lhe lock qulckly Lo ensure LhaL Lhe walLlng
Lhreads are unblocked as soon as posslble.
8ecause mulLlple Lhreads could be walLlng on Lhe same condlLlon queue for dlfferenL condlLlon predlcaLes, uslng "2.4%7
lnsLead of "2.4%7100 can be dangerous, prlmarlly because slngle noLlflcaLlon ls prone Lo a problem akln Lo mlssed
slgnals.
K2)";&;K)%%&9 provldes a good lllusLraLlon of why "2.4%7100 should be preferred Lo slngle "2.4%7 ln mosL cases. 1he
condlLlon queue ls used for Lwo dlfferenL condlLlon predlcaLes: "noL full" and "noL empLy". Suppose Lhread A walLs on a
condlLlon queue for predlcaLe A, whlle Lhread 8 walLs on Lhe same condlLlon queue for predlcaLe 8. now, suppose 8
becomes Lrue and Lhread C performs a slngle "2.4%7: Lhe !vM wlll wake up one Lhread of lLs own chooslng. lf A ls
chosen, lL wlll wake up, see LhaL A ls noL yeL Lrue, and go back Lo walLlng. Meanwhlle, 8, whlch could now make
progress, does noL wake up. 1hls ls noL exacLly a mlssed slgnallL's more of a "hl[acked slgnal"buL Lhe problem ls Lhe
same: a Lhread ls walLlng for a slgnal LhaL has (or should have) already occurred.
Slngle "2.4%7 can be used lnsLead of "2.4%7100 only when boLh of Lhe followlng condlLlons hold:
unlform walLers. Cnly one condlLlon predlcaLe ls assoclaLed wlLh Lhe condlLlon queue, and each Lhread execuLes Lhe
same loglc upon reLurnlng from ?$4., and
Cneln, oneouL. A noLlflcaLlon on Lhe condlLlon varlable enables aL mosL one Lhread Lo proceed.

186 !ava Concurrency ln racLlce
K2)";&;K)%%&9 meeLs Lhe oneln, oneouL requlremenL, buL does noL meeL Lhe unlform walLers requlremenL because
walLlng Lhreads mlghL be walLlng for elLher Lhe "noL full" and "noL empLy" condlLlon. A "sLarLlng gaLe" laLch llke LhaL
used ln =&#.E$9"&## on page 96, ln whlch a slngle evenL releases a seL of Lhreads, does noL meeL Lhe oneln, oneouL
requlremenL because openlng Lhe sLarLlng gaLe leLs mulLlple Lhreads proceed.
MosL classes don'L meeL Lhese requlremenLs, so Lhe prevalllng wlsdom ls Lo use "2.4%7100 ln preference Lo slngle
"2.4%7. Whlle Lhls may be lnefflclenL, lL ls much easler Lo ensure LhaL your classes behave correcLly when uslng
"2.4%7100 lnsLead of "2.4%7.
1hls "prevalllng wlsdom" makes some people uncomforLable, and for good reason. uslng "2.4%7100 when only one
Lhread can make progress ls lnefflclenLsomeLlmes a llLLle, someLlmes grossly so. lf Len Lhreads are walLlng on a condlLlon
queue, calllng "2.4%7100 causes each of Lhem Lo wake up and conLend for Lhe lock, Lhen mosL or all of Lhem wlll go
rlghL back Lo sleep. 1hls means a loL of conLexL swlLches and a loL of conLended lock acqulslLlons for each evenL LhaL
enables (maybe) a slngle Lhread Lo make progress. (ln Lhe worsL case, uslng "2.4%7a100 resulLs ln C(n
2
) wakeups where
n would sufflce.) 1hls ls anoLher slLuaLlon where performance concerns supporL one approach and safeLy concerns
supporL Lhe oLher.
1he noLlflcaLlon done by G). and .$C& ln K2)";&;K)%%&9 ls conservaLlve: a noLlflcaLlon ls performed every Llme an
ob[ecL ls puL lnLo or removed from Lhe buffer. 1hls could be opLlmlzed by observlng LhaL a Lhread can be released from a
walL only lf Lhe buffer goes from empLy Lo noL empLy or from full Lo noL full, and noLlfylng only lf a G). or .$C& effecLed
one of Lhese sLaLe LranslLlons. 1hls ls called condlLlonal noLlflcaLlon. Whlle condlLlonal noLlflcaLlon can lmprove
performance, lL ls Lrlcky Lo geL rlghL (and also compllcaLes Lhe lmplemenLaLlon of subclasses) and so should be used
carefully. LlsLlng 14.8 lllusLraLes uslng condlLlonal noLlflcaLlon ln K2)";&;K)%%&9.G)..
Slngle noLlflcaLlon and condlLlonal noLlflcaLlon are opLlmlzaLlons. As always, follow Lhe prlnclple "llrsL make lL rlghL, and
Lhen make lL fasLlf lL ls noL already fasL enough" when uslng Lhese opLlmlzaLlons, lL ls easy Lo lnLroduce sLrange llveness
fallures by applylng Lhem lncorrecLly.
2/3./&4 @MA\A 93/&4 $%&0/./%&"? P%./6/'"./%& /& K2)";&;K)%%&9+G).A
G)M04* #7"*892"4:&; /24; G).UB /V .892?# I".&99)G.&;L-*&G.42" W
?840& U4#<)00UVV
?$4.UV[
M220&$" ?$#L3G.7 ] 4#L3G.7UV[
;2@).U/V[
4% U?$#L3G.7V
"2.4%7100UV[
\
@MADAVA =1"5>?*T F R".* $?"33
1he sLarLlng gaLe laLch ln =&#.E$9"&## on page 96 was consLrucLed wlLh an lnlLlal counL of one, creaLlng a blnary laLch:
one wlLh Lwo sLaLes, Lhe lnlLlal sLaLe and Lhe Lermlnal sLaLe. 1he laLch prevenLs Lhreads from passlng Lhe sLarLlng gaLe
unLll lL ls opened, aL whlch polnL all Lhe Lhreads can pass Lhrough. Whlle Lhls laLchlng mechanlsm ls ofLen exacLly whaL ls
needed, someLlmes lL ls a drawback LhaL a gaLe consLrucLed ln Lhls manner cannoL be reclosed once opened.
lL ls easy Lo develop a recloseable =89&$;H$.& class uslng condlLlon walLs, as shown ln LlsLlng 14.9. =89&$;H$.& leLs Lhe
gaLe be opened and closed, provldlng an $?$4. meLhod LhaL blocks unLll Lhe gaLe ls opened. 1he 2G&" meLhod uses
"2.4%7100 because Lhe semanLlcs of Lhls class fall Lhe "oneln, oneouL" LesL for slngle noLlflcaLlon.
1he condlLlon predlcaLe used by $?$4. ls more compllcaLed Lhan slmply LesLlng 4#JG&". 1hls ls needed because lf n
Lhreads are walLlng aL Lhe gaLe aL Lhe Llme lL ls opened, Lhey should all be allowed Lo proceed. 8uL, lf Lhe gaLe ls opened
and closed ln rapld successlon, all Lhreads mlghL noL be released lf $?$4. examlnes only 4#JG&": by Lhe Llme all Lhe
Lhreads recelve Lhe noLlflcaLlon, reacqulre Lhe lock, and emerge from ?$4., Lhe gaLe may have closed agaln. So
=89&$;H$.& uses a somewhaL more compllcaLed condlLlon predlcaLe: every Llme Lhe gaLe ls closed, a "generaLlon"
counLer ls lncremenLed, and a Lhread may pass $?$4. lf Lhe gaLe ls open now or lf Lhe gaLe has opened slnce Lhls Lhread
arrlved aL Lhe gaLe.
Slnce =89&$;H$.& only supporLs walLlng for Lhe gaLe Lo open, lL performs noLlflcaLlon only ln 2G&", Lo supporL boLh
"walL for open" and "walL for close" operaLlons, lL would have Lo noLlfy ln boLh 2G&" and *02#&. 1hls lllusLraLes why
sLaLedependenL classes can be fraglle Lo malnLalnLhe addlLlon of a new sLaLedependenL operaLlon may requlre
modlfylng many code paLhs LhaL modlfy Lhe ob[ecL sLaLe so LhaL Lhe approprlaLe noLlflcaLlons can be performed.

187 78arL lv: Advanced 1oplcs 268ChapLer 14 8ulldlng CusLom Synchronlzers
@MADAWA C(Y'?"33 C"6*.+ ,33(*3
uslng condlLlonal or slngle noLlflcaLlon lnLroduces consLralnLs LhaL can compllcaLe subclasslng [C! 3.3.3.3]. lf you wanL
Lo supporL subclasslng aL all, you musL sLrucLure your class so subclasses can add Lhe approprlaLe noLlflcaLlon on behalf
of Lhe base class lf lL ls subclassed ln a way LhaL vlolaLes one of Lhe requlremenLs for slngle or condlLlonal noLlflcaLlon.
2/3./&4 @MA^A N*'?%3*"Y?* R".* 93/&4 P$4. "&0 ,2.4%7$00A
b=89&$;'$%&
G)M04* *0$## =89&$;H$.& W
XX DJ,AI=IJ,a@NLAID1=Lh 2G&"&;a#4"*&U"V U4#JG&" nn 6&"&9$.42"T"V
bH)$9;&;K7Ud.84#dV G94/$.& M220&$" 4#JG&"[
bH)$9;&;K7Ud.84#dV G94/$.& 4". 6&"&9$.42"[

G)M04* #7"*892"4:&; /24; *02#&UV W
4#JG&" ] %$0#&[
\

G)M04* #7"*892"4:&; /24; 2G&"UV W
__6&"&9$.42"[
4#JG&" ] .9)&[
"2.4%7100UV[
\

XX K5JDl'a!,=I5h 2G&"&;a#4"*&U6&"&9$.42" 2" &".97V
G)M04* #7"*892"4:&; /24; $?$4.UV .892?# I".&99)G.&;L-*&G.42" W
4". $994/$0H&"&9$.42" ] 6&"&9$.42"[
?840& UY4#JG&" qq $994/$0H&"&9$.42" ]] 6&"&9$.42"V
?$4.UV[
\
\
A sLaLedependenL class should elLher fully expose (and documenL) lLs walLlng and noLlflcaLlon proLocols Lo subclasses,
or prevenL subclasses from parLlclpaLlng ln Lhem aL all. (1hls ls an exLenslon of "deslgn and documenL for lnherlLance, or
else prohlblL lL" [L! lLem 13].) AL Lhe very leasL, deslgnlng a sLaLedependenL class for lnherlLance requlres exposlng Lhe
condlLlon queues and locks and documenLlng Lhe condlLlon predlcaLes and synchronlzaLlon pollcy, lL may also requlre
exposlng Lhe underlylng sLaLe varlables. (1he worsL Lhlng a sLaLedependenL class can do ls expose lLs sLaLe Lo subclasses
buL noL documenL lLs proLocols for walLlng and noLlflcaLlon, Lhls ls llke a class exposlng lLs sLaLe varlables buL noL
documenLlng lLs lnvarlanLs.)
Cne opLlon for dolng Lhls ls Lo effecLlvely prohlblL subclasslng, elLher by maklng Lhe class %4"$0 or by hldlng Lhe
condlLlon queues, locks, and sLaLe varlables from subclasses. CLherwlse, lf Lhe subclass does someLhlng Lo undermlne
Lhe way Lhe base class uses "2.4%7, lL needs Lo be able Lo repalr Lhe damage. Conslder an unbounded blocklng sLack ln
whlch Lhe pop operaLlon blocks lf Lhe sLack ls empLy buL Lhe push operaLlon can always proceed. 1hls meeLs Lhe
requlremenLs for slngle noLlflcaLlon. lf Lhls class uses slngle noLlflcaLlon and a subclass adds a blocklng "pop Lwo
consecuLlve elemenLs" meLhod, Lhere are now Lwo classes of walLers: Lhose walLlng Lo pop one elemenL and Lhose
walLlng Lo pop Lwo. 8uL lf Lhe base class exposes Lhe condlLlon queue and documenLs lLs proLocols for uslng lL, Lhe
subclass can overrlde Lhe push meLhod Lo perform a "2.4%7100, resLorlng safeLy.
@MADAZA =&'">3(?"./&4 $%&0/./%& a(*(*3
lL ls generally besL Lo encapsulaLe Lhe condlLlon queue so LhaL lL ls noL accesslble ouLslde Lhe class hlerarchy ln whlch lL ls
used. CLherwlse, callers mlghL be LempLed Lo Lhlnk Lhey undersLand your proLocols for walLlng and noLlflcaLlon and use
Lhem ln a manner lnconslsLenL wlLh your deslgn. (lL ls lmposslble Lo enforce Lhe unlform walLers requlremenL for slngle
noLlflcaLlon unless Lhe condlLlon queue ob[ecL ls lnaccesslble Lo code you do noL conLrol, lf allen code mlsLakenly walLs
on your condlLlon queue, Lhls could subverL your noLlflcaLlon proLocol and cause a hl[acked slgnal.)
unforLunaLely, Lhls advlce Lo encapsulaLe ob[ecLs used as condlLlon queues ls noL conslsLenL wlLh Lhe mosL common
deslgn paLLern for Lhreadsafe classes, ln whlch an ob[ecL's lnLrlnslc lock ls used Lo guard lLs sLaLe. K2)";&;K)%%&9
lllusLraLes Lhls common ldlom, where Lhe buffer ob[ecL lLself ls Lhe lock and condlLlon queue. Powever, K2)";&;K)%%&9
could be easlly resLrucLured Lo use a prlvaLe lock ob[ecL and condlLlon queue, Lhe only dlfference would be LhaL lL would
no longer supporL any form of cllenLslde locklng.
@MADA\A =&.)+ "&0 =1/. -)%.%'%?3
Welllngs (Welllngs, 2004) characLerlzes Lhe proper use of ?$4. and "2.4%7 ln Lerms of enLry and exlL proLocols. lor each
sLaLedependenL operaLlon and for each operaLlon LhaL modlfles sLaLe on whlch anoLher operaLlon has a sLaLe
dependency, you should deflne and documenL an enLry and exlL proLocol. 1he enLry proLocol ls Lhe operaLlon's
condlLlon predlcaLe, Lhe exlL proLocol lnvolves examlnlng any sLaLe varlables LhaL have been changed by Lhe operaLlon

188 !ava Concurrency ln racLlce
Lo see lf Lhey mlghL have caused some oLher condlLlon predlcaLe Lo become Lrue, and lf so, noLlfylng on Lhe assoclaLed
condlLlon queue.
1M#.9$*.O)&)&;'7"*892"4:&9, upon whlch mosL of Lhe sLaLedependenL classes ln `$/$+).40+*2"*)99&". are bullL
(see SecLlon 14.4), explolLs Lhe concepL of exlL proLocol. 8aLher Lhan leLLlng synchronlzer classes perform Lhelr own
noLlflcaLlon, lL lnsLead requlres synchronlzer meLhods Lo reLurn a value lndlcaLlng wheLher lLs acLlon mlghL have
unblocked one or more walLlng Lhreads. 1hls expllclL Al requlremenL makes lL harder Lo "forgeL" Lo noLlfy on some
sLaLe LranslLlons.
@MALA =1>?/'/. $%&0/./%& EY]*'.3
As we saw ln ChapLer 13, expllclL 52*Cs can be useful ln some slLuaLlons where lnLrlnslc locks are Loo lnflexlble. !usL as
52*C ls a generallzaLlon of lnLrlnslc locks, D2";4.42" (see LlsLlng 14.10) ls a generallzaLlon of lnLrlnslc condlLlon queues.
lnLrlnslc condlLlon queues have several drawbacks. Lach lnLrlnslc lock can have only one assoclaLed condlLlon queue,
whlch means LhaL ln classes llke K2)";&;K)%%&9 mulLlple Lhreads mlghL walL on Lhe same condlLlon queue for dlfferenL
condlLlon predlcaLes, and Lhe mosL common paLLern for locklng lnvolves exposlng Lhe condlLlon queue ob[ecL. 8oLh of
Lhese facLors make lL lmposslble Lo enforce Lhe unlform walLer requlremenL for uslng "2.4%7100. lf you wanL Lo wrlLe a
concurrenL ob[ecL wlLh mulLlple condlLlon predlcaLes, or you wanL Lo exerclse more conLrol over Lhe vlslblllLy of Lhe
condlLlon queue, Lhe expllclL 52*C and D2";4.42" classes offer a more flexlble alLernaLlve Lo lnLrlnslc locks and
condlLlon queues.
A D2";4.42" ls assoclaLed wlLh a slngle 52*C, [usL as a condlLlon queue ls assoclaLed wlLh a slngle lnLrlnslc lock, Lo creaLe
a D2";4.42", call 52*C+"&?D2";4.42" on Lhe assoclaLed lock. And [usL as 52*C offers a rlcher feaLure seL Lhan lnLrlnslc
locklng, D2";4.42" offers a rlcher feaLure seL Lhan lnLrlnslc condlLlon queues: mulLlple walL seLs per lock, lnLerrupLlble
and unlnLerrupLlble condlLlon walLs, deadllnebased walLlng, and a cholce of falr or nonfalr queuelng.
2/3./&4 @MA@_A D2";4.42" ,&.*)6"'*A
G)M04* 4".&9%$*& D2";4.42" W
/24; $?$4.UV .892?# I".&99)G.&;L-*&G.42"[
M220&$" $?$4.U02"6 .43&e =43&!"4. )"4.V
.892?# I".&99)G.&;L-*&G.42"[
02"6 $?$4.,$"2#U02"6 "$"2#=43&2).V .892?# I".&99)G.&;L-*&G.42"[
/24; $?$4.!"4".&99)G.4M07UV[
M220&$" $?$4.!".40UA$.& ;&$;04"&V .892?# I".&99)G.&;L-*&G.42"[

/24; #46"$0UV[
/24; #46"$0100UV[
\
unllke lnLrlnslc condlLlon queues, you can have as many D2";4.42" ob[ecLs per 52*C as you wanL. D2";4.42" ob[ecLs
lnherlL Lhe falrness seLLlng of Lhelr assoclaLed 52*C, for falr locks, Lhreads are released from D2";4.42"+$?$4. ln lllC
order.
F.G.," H.,&#&E5 1he equlvalenLs of ?$4., "2.4%7, and "2.4%7100 for D2";4.42" ob[ecLs are $?$4., #46"$0, and
#46"$0100. Powever, D2";4.42" exLends JM`&*., whlch means LhaL lL also has ?$4. and "2.4%7 meLhods. 8e sure Lo
use Lhe proper verslons $?$4. and #46"$0 a lnsLead!
LlsLlng 14.11 shows yeL anoLher bounded buffer lmplemenLaLlon, Lhls Llme uslng Lwo D2";4.42"s, "2.<)00 and
"2.L3G.7, Lo represenL expllclLly Lhe "noL full" and "noL empLy" condlLlon predlcaLes. When .$C& blocks because Lhe
buffer ls empLy, lL walLs on "2.L3G.7, and G). unblocks any Lhreads blocked ln .$C& by slgnallng on "2.L3G.7.
1he behavlor of D2";4.42"K2)";&;K)%%&9 ls Lhe same as K2)";&;K)%%&9, buL lLs use of condlLlon queues ls more
readable lL ls easler Lo analyze a class LhaL uses mulLlple D2";4.42"s Lhan one LhaL uses a slngle lnLrlnslc condlLlon
queue wlLh mulLlple condlLlon predlcaLes. 8y separaLlng Lhe Lwo condlLlon predlcaLes lnLo separaLe walL seLs,
D2";4.42" makes lL easler Lo meeL Lhe requlremenLs for slngle noLlflcaLlon. uslng Lhe more efflclenL #46"$0 lnsLead of
#46"$0100 reduces Lhe number of conLexL swlLches and lock acqulslLlons Lrlggered by each buffer operaLlon.
!usL as wlLh bullLln locks and condlLlon queues, Lhe Lhreeway relaLlonshlp among Lhe lock, Lhe condlLlon predlcaLe, and
Lhe condlLlon varlable musL also hold when uslng expllclL 52*Cs and D2";4.42"s. 1he varlables lnvolved ln Lhe condlLlon
predlcaLe musL be guarded by Lhe 52*C, and Lhe 52*C musL be held when LesLlng Lhe condlLlon predlcaLe and when
calllng $?$4. and #46"$0.
[11]

[11] N&&".9$".52*C requlres LhaL Lhe 52*C be held when calllng #46"$0 or #46"$0100, buL 52*C lmplemenLaLlons are permlLLed Lo
consLrucL D2";4.42"s LhaL do noL have Lhls requlremenL.

189 78arL lv: Advanced 1oplcs 268ChapLer 14 8ulldlng CusLom Synchronlzers
Choose beLween uslng expllclL D2";4.42"s and lnLrlnslc condlLlon queues ln Lhe same way as you would choose
beLween N&&".9$".52*C and #7"*892"4:&;: use D2";4.42" lf you need lLs advanced feaLures such as falr queuelng or
mulLlple walL seLs per lock, and oLherwlse prefer lnLrlnslc condlLlon queues. (lf you already use N&&".9$".52*C because
you need lLs advanced feaLures, Lhe cholce ls already made.)
@MAMA F&".%5+ %6 " C+&':)%&/O*)
1he lnLerfaces of N&&".9$".52*C and '&3$G829& have a loL ln common. 8oLh classes acL as a "gaLe", allowlng only a
llmlLed number of Lhreads Lo pass aL a Llme, Lhreads arrlve aL Lhe gaLe and are allowed Lhrough (02*C or $*()49&
reLurns successfully), are made Lo walL (02*C or $*()49& blocks), or are Lurned away (.9752*C or .971*()49& reLurns
false, lndlcaLlng LhaL Lhe lock or permlL dld noL become avallable ln Lhe Llme allowed). lurLher, boLh allow lnLerrupLlble,
unlnLerrupLlble, and Llmed acqulslLlon aLLempLs, and boLh allow a cholce of falr or nonfalr queuelng of walLlng Lhreads.
Clven Lhls commonallLy, you mlghL Lhlnk LhaL '&3$G829& was lmplemenLed on Lop of N&&".9$".52*C, or perhaps
N&&".9$".52*C was lmplemenLed as a '&3$G829& wlLh one permlL. 1hls would be enLlrely pracLlcal, lL ls a common
exerclse Lo prove LhaL a counLlng semaphore can be lmplemenLed uslng a lock (as ln '&3$G829&J"52*C ln LlsLlng 14.12)
and LhaL a lock can be lmplemenLed uslng a counLlng semaphore.
ln acLuallLy, Lhey are boLh lmplemenLed uslng a common base class, 1M#.9$*.aO)&)&;'7"*892"4:&9 (ACS)as are many
oLher synchronlzers. ACS ls a framework for bulldlng locks and synchronlzers, and a surprlslngly broad range of
synchronlzers can be bullL easlly and efflclenLly uslng lL. noL only are N&&".9$".52*C and '&3$G829& bullL uslng ACS,
buL so are D2)".A2?"5$.*8, N&&".9$".N&$;P94.&52*C, '7"*892"2)#O)&)&,
[12]
and <).)9&=$#C.
[12] !ava6 replaces Lhe ACSbased '7"*892"2)#O)&)& wlLh a (more scalable) nonblocklng verslon.
2/3./&4 @MA@@A ;%(&0*0 ;(66*) 93/&4 =1>?/'/. $%&0/./%& H")/"Y?*3A
b=89&$;'$%&
G)M04* *0$## D2";4.42"K2)";&;K)%%&9R=T W
G92.&*.&; %4"$0 52*C 02*C ] "&? N&&".9$".52*CUV[
XX DJ,AI=IJ, @NLAID1=Lh "2.<)00 U*2)". R 4.&3#+0&"6.8V
G94/$.& %4"$0 D2";4.42" "2.<)00 ] 02*C+"&?D2";4.42"UV[
XX DJ,AI=IJ, @NLAID1=Lh "2.L3G.7 U*2)". T ZV
G94/$.& %4"$0 D2";4.42" "2.L3G.7 ] 02*C+"&?D2";4.42"UV[
bH)$9;&;K7Ud02*CdV
G94/$.& %4"$0 =fg 4.&3# ] U=fgV "&? JM`&*.fK!<<LNk'IuLg[
bH)$9;&;K7Ud02*CdV G94/$.& 4". .$40e 8&$;e *2)".[

XX K5JDl'a!,=I5h "2.<)00
G)M04* /24; G).U= -V .892?# I".&99)G.&;L-*&G.42" W
02*C+02*CUV[
.97 W
?840& U*2)". ]] 4.&3#+0&"6.8V
"2.<)00+$?$4.UV[
4.&3#f.$40g ] -[
4% U__.$40 ]] 4.&3#+0&"6.8V
.$40 ] Z[
__*2)".[
"2.L3G.7+#46"$0UV[
\ %4"$007 W
02*C+)"02*CUV[
\
\

XX K5JDl'a!,=I5h "2.L3G.7
G)M04* = .$C&UV .892?# I".&99)G.&;L-*&G.42" W
02*C+02*CUV[
.97 W
?840& U*2)". ]] ZV
"2.L3G.7+$?$4.UV[
= - ] 4.&3#f8&$;g[
4.&3#f8&$;g ] ")00[
4% U__8&$; ]] 4.&3#+0&"6.8V
8&$; ] Z[
aa*2)".[
"2.<)00+#46"$0UV[
9&.)9" -[
\ %4"$007 W
02*C+)"02*CUV[
\
\
\


190 !ava Concurrency ln racLlce
2/3./&4 @MA@DA $%(&./&4 C*5">:%)* ,5>?*5*&.*0 93/&4 52*CA
XX ,2. 9&$007 82? `$/$+).40+*2"*)99&".+'&3$G829& 4# 43G0&3&".&;
b=89&$;'$%&
G)M04* *0$## '&3$G829&J"52*C W
G94/$.& %4"$0 52*C 02*C ] "&? N&&".9$".52*CUV[
XX DJ,AI=IJ, @NLAID1=Lh G&934.#1/$40$M0& UG&934.# T ZV
G94/$.& %4"$0 D2";4.42" G&934.#1/$40$M0& ] 02*C+"&?D2";4.42"UV[
bH)$9;&;K7Ud02*CdV G94/$.& 4". G&934.#[

'&3$G829&J"52*CU4". 4"4.4$0@&934.#V W
02*C+02*CUV[
.97 W
G&934.# ] 4"4.4$0@&934.#[
\ %4"$007 W
02*C+)"02*CUV[
\
\

XX K5JDl'a!,=I5h G&934.#1/$40$M0&
G)M04* /24; $*()49&UV .892?# I".&99)G.&;L-*&G.42" W
02*C+02*CUV[
.97 W
?840& UG&934.# R] ZV
G&934.#1/$40$M0&+$?$4.UV[
aaG&934.#[
\ %4"$007 W
02*C+)"02*CUV[
\
\

G)M04* /24; 9&0&$#&UV W
02*C+02*CUV[
.97 W
__G&934.#[
G&934.#1/$40$M0&+#46"$0UV[
\ %4"$007 W
02*C+)"02*CUV[
\
\
\
ACS handles many of Lhe deLalls of lmplemenLlng a synchronlzer, such as lllC queulng of walLlng Lhreads. lndlvldual
synchronlzers can deflne flexlble crlLerla for wheLher a Lhread should be allowed Lo pass or be requlred Lo walL.
uslng ACS Lo bulld synchronlzers offers several beneflLs. noL only does lL subsLanLlally reduce Lhe lmplemenLaLlon
efforL, buL you also needn'L pay for mulLlple polnLs of conLenLlon, as you would when consLrucLlng one synchronlzer on
Lop of anoLher. ln '&3$G829&J"52*C, acqulrlng a permlL has Lwo places where lL mlghL block once aL Lhe lock guardlng
Lhe semaphore sLaLe, and Lhen agaln lf a permlL ls noL avallable. Synchronlzers bullL wlLh ACS have only one polnL where
Lhey mlghL block, reduclng conLexLswlLch overhead and lmprovlng LhroughpuL. ACS was deslgned for scalablllLy, and all
Lhe synchronlzers ln `$/$+).40+*2"*)99&". LhaL are bullL wlLh ACS beneflL from Lhls.
@MAVA FY3.)"'.a(*(*0C+&':)%&/O*)
MosL developers wlll probably never use ACS dlrecLly, Lhe sLandard seL of synchronlzers covers a falrly wlde range of
slLuaLlons. 8uL seelng how Lhe sLandard synchronlzers are lmplemenLed can help clarlfy how Lhey work.
1he baslc operaLlons LhaL an ACSbased synchronlzer performs are some varlanLs of acqulre and release. AcqulslLlon ls
Lhe sLaLedependenL operaLlon and can always block. WlLh a lock or semaphore, Lhe meanlng of acqulre ls
sLralghLforward acqulre Lhe lock or a permlL and Lhe caller may have Lo walL unLll Lhe synchronlzer ls ln a sLaLe where
LhaL can happen. WlLh D2)".A2?"5$.*8, acqulre means "walL unLll Lhe laLch has reached lLs Lermlnal sLaLe", and wlLh
<).)9&=$#C, lL means "walL unLll Lhe Lask has compleLed". 8elease ls noL a blocklng operaLlon, a release may allow
Lhreads blocked ln acqulre Lo proceed.
lor a class Lo be sLaLedependenL, lL musL have some sLaLe. ACS Lakes on Lhe Lask of managlng some of Lhe sLaLe for Lhe
synchronlzer class: lL manages a slngle lnLeger of sLaLe lnformaLlon LhaL can be manlpulaLed Lhrough Lhe proLecLed
6&.'.$.&, #&.'.$.&, and *23G$9&1";'&.'.$.& meLhods. 1hls can be used Lo represenL arblLrary sLaLe, for example,
N&&".9$".52*C uses lL Lo represenL Lhe counL of Llmes Lhe ownlng Lhread has acqulred Lhe lock, '&3$G829& uses lL Lo
represenL Lhe number of permlLs remalnlng, and <).)9&=$#C uses lL Lo represenL Lhe sLaLe of Lhe Lask (noL yeL sLarLed,
runnlng, compleLed, cancelled). Synchronlzers can also manage addlLlonal sLaLe varlables Lhemselves, for example,
N&&".9$".52*C keeps Lrack of Lhe currenL lock owner so lL can dlsLlngulsh beLween reenLranL and conLended lock
acqulslLlon requesLs.

191 78arL lv: Advanced 1oplcs 268ChapLer 14 8ulldlng CusLom Synchronlzers
AcqulslLlon and release ln ACS Lake Lhe forms shown ln LlsLlng 14.13. uependlng on Lhe synchronlzer, acqulslLlon mlghL
be excluslve, as wlLh N&&".9$".a52*C, or nonexcluslve, as wlLh '&3$G829& and D2)".A2?"5$.*8. An acqulre operaLlon
has Lwo parLs. llrsL, Lhe synchronlzer decldes wheLher Lhe currenL sLaLe permlLs acqulslLlon, lf so, Lhe Lhread ls allowed
Lo proceed, and lf noL, Lhe acqulre blocks or falls. 1hls declslon ls deLermlned by Lhe synchronlzer semanLlcs, for
example, acqulrlng a lock can succeed lf Lhe lock ls unheld, and acqulrlng a laLch can succeed lf Lhe laLch ls ln lLs Lermlnal
sLaLe.
1he second parL lnvolves posslbly updaLlng Lhe synchronlzer sLaLe, one Lhread acqulrlng Lhe synchronlzer can affecL
wheLher oLher Lhreads can acqulre lL. lor example, acqulrlng a lock changes Lhe lock sLaLe from "unheld" Lo "held", and
acqulrlng a permlL from a '&3$G829& reduces Lhe number of permlLs lefL. Cn Lhe oLher hand, Lhe acqulslLlon of a laLch
by one Lhread does noL affecL wheLher oLher Lhreads can acqulre lL, so acqulrlng a laLch does noL change lLs sLaLe.
2/3./&4 @MA@LA $"&%&/'"? S%)53 6%) F'Q(/3/./%& "&0 N*?*"3* /& FaCA
M220&$" $*()49&UV .892?# I".&99)G.&;L-*&G.42" W
?840& U#.$.& ;2&# "2. G&934. $*()49&V W
4% UM02*C4"6 $*()4#4.42" 9&()&#.&;V W
&"()&)& *)99&". .89&$; 4% "2. $09&$;7 ()&)&;
M02*C *)99&". .89&$;
\
&0#&
9&.)9" %$40)9&
\
G2##4M07 )G;$.& #7"*892"4:$.42" #.$.&
;&()&)& .89&$; 4% 4. ?$# ()&)&;
9&.)9" #)**&##
\

/24; 9&0&$#&UV W
)G;$.& #7"*892"4:$.42" #.$.&
4% U"&? #.$.& 3$7 G&934. $ M02*C&; .89&$; .2 $*()49&V
)"M02*C 2"& 29 329& ()&)&; .89&$;#
\
A synchronlzer supporLlng excluslve acqulslLlon should lmplemenL Lhe proLecLed meLhods =N71*()49&, =N7N&0&$#&,
and 4#E&0;L-*0)#4/&07, and Lhose supporLlng shared acqulslLlon should lmplemenL .971*()49&'8$9&; and
=N7N&0&$#&'8$9&;. 1he $*()49&, $*()49&'8$9&;, 9&0&$#&, and 9&0&$#&'8$9&; meLhods ln ACS call Lhe =N7 forms of
Lhese meLhods ln Lhe synchronlzer subclass Lo deLermlne lf Lhe operaLlon can proceed. 1he synchronlzer subclass can
use 6&.'.$.&, #&.'.$.&, and *23G$9&1";'&.'.$.& Lo examlne and updaLe Lhe sLaLe accordlng Lo lLs acqulre and
release semanLlcs, and lnforms Lhe base class Lhrough Lhe reLurn sLaLus wheLher Lhe aLLempL Lo acqulre or release Lhe
synchronlzer was successful. lor example, reLurnlng a negaLlve value from =N71*()49&'8$9&; lndlcaLes acqulslLlon
fallure, reLurnlng zero lndlcaLes Lhe synchronlzer was acqulred excluslvely, and reLurnlng a poslLlve value lndlcaLes Lhe
synchronlzer was acqulred nonexcluslvely. 1he =N7N&0&$#& and =N7N&0&$#&'8$9&; meLhods should reLurn .9)& lf Lhe
release may have unblocked Lhreads aLLempLlng Lo acqulre Lhe synchronlzer.
1o slmpllfy lmplemenLaLlon of locks LhaL supporL condlLlon queues (llke N&&".9$".52*C), ACS also provldes machlnery
for consLrucLlng condlLlon varlables assoclaLed wlLh synchronlzers.
@MAVA@A F C/5>?* 2".':
J"&'82.5$.*8 ln LlsLlng 14.14 ls a blnary laLch lmplemenLed uslng ACS. lL has Lwo publlc meLhods, $?$4. and #46"$0,
LhaL correspond Lo acqulslLlon and release. lnlLlally, Lhe laLch ls closed, any Lhread calllng $?$4. blocks unLll Lhe laLch ls
opened. Cnce Lhe laLch ls opened by a call Lo #46"$0, walLlng Lhreads are released and Lhreads LhaL subsequenLly arrlve
aL Lhe laLch wlll be allowed Lo proceed.

192 !ava Concurrency ln racLlce
2/3./&4 @MA@MA ;/&")+ 2".': 93/&4 1M#.9$*.O)&)&;'7"*892"4:&9A
b=89&$;'$%&
G)M04* *0$## J"&'82.5$.*8 W
G94/$.& %4"$0 '7"* #7"* ] "&? '7"*UV[

G)M04* /24; #46"$0UV W #7"*+9&0&$#&'8$9&;UZV[ \

G)M04* /24; $?$4.UV .892?# I".&99)G.&;L-*&G.42" W
#7"*+$*()49&'8$9&;I".&99)G.4M07UZV[
\

G94/$.& *0$## '7"* &-.&";# 1M#.9$*.O)&)&;'7"*892"4:&9 W
G92.&*.&; 4". .971*()49&'8$9&;U4". 46"29&;V W
XX ')**&&; 4% 0$.*8 4# 2G&" U#.$.& ]] ^Ve &0#& %$40
9&.)9" U6&.'.$.&UV ]] ^V S ^ h a^[
\

G92.&*.&; M220&$" .97N&0&$#&'8$9&;U4". 46"29&;V W
#&.'.$.&U^V[ XX 5$.*8 4# "2? 2G&"
9&.)9" .9)&[ XX J.8&9 .89&$;# 3$7 "2? M& $M0& .2 $*()49&

\
\
\
ln J"&'82.5$.*8, Lhe ACS sLaLe holds Lhe laLch sLaLe closed (zero) or open (one). 1he $?$4. meLhod calls
$*()49&'8$9&;I".&99)G.4M07 ln ACS, whlch ln Lurn consulLs Lhe =N71*()49&'8$9&; meLhod ln J"&'82.5$.*8. 1he
.971*()49&a'8$9&; lmplemenLaLlon musL reLurn a value lndlcaLlng wheLher or noL acqulslLlon can proceed. lf Lhe laLch
has been prevlously opened, .971*()49&'8$9&; reLurns success, allowlng Lhe Lhread Lo pass, oLherwlse lL reLurns a
value lndlcaLlng LhaL Lhe acqulslLlon aLLempL falled. 1he $*()49&'8$9&;I".&99)G.4M07 meLhod lnLerpreLs fallure Lo
mean LhaL Lhe Lhread should be placed on Lhe queue of walLlng Lhreads. Slmllarly, #46"$0 calls 9&0&$#&'8$9&;, whlch
causes .97N&0&$#&'8$9&; Lo be consulLed. 1he =N7N&0&$#&'8$9&; lmplemenLaLlon uncondlLlonally seLs Lhe laLch sLaLe
Lo open and lndlcaLes (Lhrough lLs reLurn value) LhaL Lhe synchronlzer ls ln a fully released sLaLe. 1hls causes ACS Lo leL
all walLlng Lhreads aLLempL Lo reacqulre Lhe synchronlzer, and acqulslLlon wlll now succeed because .971*()49&'8$9&;
reLurns success.
J"&'82.5$.*8 ls a fully funcLlonal, usable, performanL synchronlzer, lmplemenLed ln only LwenLy or so llnes of code. Cf
course, lL ls mlsslng some useful feaLure ssuch as Llmed acqulslLlon or Lhe ablllLy Lo lnspecL Lhe laLch sLaLebuL Lhese are
easy Lo lmplemenL as well, slnce ACS provldes Llmed verslons of Lhe acqulslLlon meLhods and uLlllLy meLhods for
common lnspecLlon operaLlons.
J"&'82.5$.*8 could have been lmplemenLed by exLendlng ACS raLher Lhan delegaLlng Lo lL, buL Lhls ls undeslrable for
several reasons [L! lLem 14]. uolng so would undermlne Lhe slmple (LwomeLhod) lnLerface of J"&'82.5$.*8, and whlle
Lhe publlc meLhods of ACS won'L allow callers Lo corrupL Lhe laLch sLaLe, callers could easlly use Lhem lncorrecLly. none
of Lhe synchronlzers ln `$/$+).40+*2"*)99&". exLends ACS dlrecLly Lhey all delegaLe Lo prlvaLe lnner subclasses of
ACS lnsLead.
@MAWA FaC /& !"#"A(./?A'%&'())*&. C+&':)%&/O*) $?"33*3
Many of Lhe blocklng classes ln `$/$+).40+*2"*)99&"., such as N&&".9$".52*C, '&3$G829&,
N&&".9$".N&$;P94.&52*C, D2)".A2?"5$.*8, '7"*892"2)#O)&)&, and <).)9&=$#C, are bullL uslng ACS. WlLhouL geLLlng
Loo deeply lnLo Lhe deLalls (Lhe source code ls parL of Lhe !uk download
[13]
), leL's Lake a qulck look aL how each of Lhese
classes uses ACS.
[13] Cr wlLh fewer llcenslng resLrlcLlons aL http://gee.cs.oswego.edu/dl/concurrency-interest.
@MAWA@A N**&.)"&.2%'<
N&&".9$".52*C supporLs only excluslve acqulslLlon, so lL lmplemenLs .971*()49&, .97N&0&$#&, and
4#E&0;L-*0)#4/&07, .971*()49& for Lhe nonfalr verslon ls shown ln LlsLlng 14.13. N&&".9$".52*C uses Lhe
synchronlzaLlon sLaLe Lo hold Lhe lock acqulslLlon counL, and malnLalns an 2?"&9 varlable holdlng Lhe ldenLlLy of Lhe
ownlng Lhread LhaL ls modlfled only when Lhe currenL Lhread has [usL acqulred Lhe lock or ls [usL abouL Lo release lL.
[14]
ln
.97N&0&$#&, lL checks Lhe 2?"&9 fleld Lo ensure LhaL Lhe currenL Lhread owns Lhe lock before allowlng an )"02*C Lo
proceed, ln .971*()49&, lL uses Lhls fleld Lo dlfferenLlaLe beLween a reenLranL acqulslLlon and a conLended acqulslLlon
aLLempL.

193 78arL lv: Advanced 1oplcs 268ChapLer 14 8ulldlng CusLom Synchronlzers
[14] 8ecause Lhe proLecLed sLaLemanlpulaLlon meLhods have Lhe memory semanLlcs of a volaLlle read or wrlLe and N&&".9$".52*C ls careful Lo
read Lhe 2?"&9 fleld only afLer calllng 6&.'.$.& and wrlLe lL only before calllng #&.'.$.&, N&&".9$".52*C can plggyback on Lhe memory
semanLlcs of Lhe synchronlzaLlon sLaLe, and Lhus avold furLher synchronlzaLlon see SecLlon 16.1.4.
When a Lhread aLLempLs Lo acqulre a lock, .971*()49& flrsL consulLs Lhe lock sLaLe. lf lL ls unheld, lL Lrles Lo updaLe Lhe
lock sLaLe Lo lndlcaLe LhaL lL ls held. 8ecause Lhe sLaLe could have changed slnce lL was flrsL lnspecLed a few lnsLrucLlons
ago, .971*()49& uses *23G$9&1";'&.'.$.& Lo aLLempL Lo aLomlcally updaLe Lhe sLaLe Lo lndlcaLe LhaL Lhe lock ls now
held and conflrm LhaL Lhe sLaLe has noL changed slnce lasL observed. (See Lhe descrlpLlon of *23G$9&1";'&. ln SecLlon
13.3.) lf Lhe lock sLaLe lndlcaLes LhaL lL ls already held, lf Lhe currenL Lhread ls Lhe owner of Lhe lock, Lhe acqulslLlon
counL ls lncremenLed, lf Lhe currenL Lhread ls noL Lhe owner of Lhe lock, Lhe acqulslLlon aLLempL falls.
2/3./&4 @MA@VA .971*()49& ,5>?*5*&."./%& S)%5 P%&6"/) N&&".9$".52*CA
G92.&*.&; M220&$" .971*()49&U4". 46"29&;V W
%4"$0 =89&$; *)99&". ] =89&$;+*)99&".=89&$;UV[
4". * ] 6&.'.$.&UV[
4% U* ]] ZV W
4% U*23G$9&1";'&.'.$.&UZe ^VV W
2?"&9 ] *)99&".[
9&.)9" .9)&[
\
\ &0#& 4% U*)99&". ]] 2?"&9V W
#&.'.$.&U*_^V[
9&.)9" .9)&[
\
9&.)9" %$0#&[
\
N&&".9$".52*C also Lakes advanLage of ACS's bullLln supporL for mulLlple condlLlon varlables and walL seLs.
52*C+"&?D2";4.42" reLurns a new lnsLance of D2";4.42"JM`&*., an lnner class of ACS.
@MAWADA C*5">:%)* "&0 $%(&.[%8&2".':
'&3$G829& uses Lhe ACS synchronlzaLlon sLaLe Lo hold Lhe counL of permlLs currenLly avallable. 1he .971*()49&'8$9&;
meLhod (see LlsLlng 14.16) flrsL compuLes Lhe number of permlLs remalnlng, and lf Lhere are noL enough, reLurns a value
lndlcaLlng LhaL Lhe acqulre falled. lf sufflclenL permlLs appear Lo be lefL, lL aLLempLs Lo aLomlcally reduce Lhe permlL
counL uslng *23G$9&1";'&.'.$.&. lf LhaL succeeds (meanlng LhaL Lhe permlL counL had noL changed slnce lL lasL
looked), lL reLurns a value lndlcaLlng LhaL Lhe acqulre succeeded. 1he reLurn value also encodes wheLher oLher shared
acqulslLlon aLLempLs mlghL succeed, ln whlch case oLher walLlng Lhreads wlll also be unblocked.
1he ?840& loop LermlnaLes elLher when Lhere are noL enough permlLs or when =N71*()49&'8$9&; can aLomlcally
updaLe Lhe permlL counL Lo reflecL acqulslLlon. Whlle any glven call Lo *23G$9&1";'&.'.$.& may fall due Lo conLenLlon
wlLh anoLher Lhread (see SecLlon 13.3), causlng lL Lo reLry, one of Lhese Lwo LermlnaLlon crlLerla wlll become Lrue wlLhln
a reasonable number of reLrles. Slmllarly, .97N&0&$#&'8$9&; lncreases Lhe permlL counL, poLenLlally unblocklng walLlng
Lhreads, and reLrles unLll Lhe updaLe succeeds. 1he reLurn value of =N7N&0&$#&'8$9&; lndlcaLes wheLher oLher Lhreads
mlghL have been unblocked by Lhe release.
D2)".A2?"5$.*8 uses ACS ln a slmllar manner Lo '&3$G829&: Lhe synchronlzaLlon sLaLe holds Lhe currenL counL. 1he
*2)".A2?" meLhod calls 9&0&$#&, whlch causes Lhe counLer Lo be decremenLed and unblocks walLlng Lhreads lf Lhe
counLer has reached zero, $?$4. calls $*()49&, whlch reLurns lmmedlaLely lf Lhe counLer has reached zero and
oLherwlse blocks.
2/3./&4 @MA@WA .97$*()49&#8$9&; "&0 .979&0&$#&#8$9&; 6)%5 '&3$G829&A
G92.&*.&; 4". .971*()49&'8$9&;U4". $*()49&#V W
?840& U.9)&V W
4". $/$40$M0& ] 6&.'.$.&UV[
4". 9&3$4"4"6 ] $/$40$M0& a $*()49&#[
4% U9&3$4"4"6 R Z
nn *23G$9&1";'&.'.$.&U$/$40$M0&e 9&3$4"4"6VV
9&.)9" 9&3$4"4"6[
\
\

G92.&*.&; M220&$" .97N&0&$#&'8$9&;U4". 9&0&$#&#V W
?840& U.9)&V W
4". G ] 6&.'.$.&UV[
4% U*23G$9&1";'&.'.$.&UGe G _ 9&0&$#&#VV
9&.)9" .9)&[
\
\


194 !ava Concurrency ln racLlce
@MAWALA S(.()*J"3<
AL flrsL glance, <).)9&=$#C doesn'L even look llke a synchronlzer. 8uL <).)9&+6&. has semanLlcs LhaL are very slmllar Lo
LhaL of a laLchlf some evenL (Lhe compleLlon or cancellaLlon of Lhe Lask represenLed by Lhe <).)9&=$#C) has occurred,
Lhen Lhreads can proceed, oLherwlse Lhey are queued unLll LhaL evenL occurs.
<).)9&=$#C uses Lhe ACS synchronlzaLlon sLaLe Lo hold Lhe Lask sLaLusrunnlng, compleLed, or cancelled. lL also
malnLalns addlLlonal sLaLe varlables Lo hold Lhe resulL of Lhe compuLaLlon or Lhe excepLlon lL Lhrew. lL furLher malnLalns
a reference Lo Lhe Lhread LhaL ls runnlng Lhe compuLaLlon (lf lL ls currenLly ln Lhe runnlng sLaLe), so LhaL lL can be
lnLerrupLed lf Lhe Lask ls cancelled.
@MAWAMA N**&.)"&.N*"0B)/.*2%'<
1he lnLerface for N&$;P94.&52*C suggesLs Lhere are Lwo locksa reader lock and a wrlLer lockbuL ln Lhe ACSbased
lmplemenLaLlon of N&&".9$".N&$;P94.&52*C, a slngle ACS subclass manages boLh read and wrlLe locklng.
N&&".9$".N&$;aP94.&52*C uses 16 blLs of Lhe sLaLe for Lhe wrlLelock counL, and Lhe oLher 16 blLs for Lhe readlock
counL. CperaLlons on Lhe read lock use Lhe shared acqulre and release meLhods, operaLlons on Lhe wrlLe lock use Lhe
excluslve acqulre and release meLhods.
lnLernally, ACS malnLalns a queue of walLlng Lhreads, keeplng Lrack of wheLher a Lhread has requesLed excluslve or
shared access. ln N&&".9$".N&$;aP94.&52*C, when Lhe lock becomes avallable, lf Lhe Lhread aL Lhe head of Lhe queue
was looklng for wrlLe access lL wlll geL lL, and lf Lhe Lhread aL Lhe head of Lhe queue was looklng for read access, all
queued Lhreads up Lo Lhe flrsL wrlLer wlll geL lL.
[13]

[13] 1hls mechanlsm does noL permlL Lhe cholce of a readerpreference or wrlLerpreference pollcy, as some readwrlLe lock lmplemenLaLlons do.
lor LhaL, elLher Lhe ACS walL queue would need Lo be someLhlng oLher Lhan a lllC queue, or Lwo queues would be needed. Powever, such a sLrlcL
orderlng pollcy ls rarely needed ln pracLlce, lf Lhe nonfalr verslon of N&&".9$".N&$;P94.&52*C does noL offer accepLable llveness, Lhe falr
verslon usually provldes saLlsfacLory orderlng and guaranLees nonsLarvaLlon of readers and wrlLers.
C(55")+
lf you need Lo lmplemenL a sLaLedependenL class one whose meLhods musL block lf a sLaLebased precondlLlon does
noL hold Lhe besL sLraLegy ls usually Lo bulld upon an exlsLlng llbrary class such as '&3$G829&, K02*C4"6O)&)&, or
D2)".A2?"5$.*8, as ln B$0)&5$.*8 on page 187. Powever, someLlmes exlsLlng llbrary classes do noL provlde a sufflclenL
foundaLlon, ln Lhese cases, you can bulld your own synchronlzers uslng lnLrlnslc condlLlon queues, expllclL D2";4.42"
ob[ecLs, or 1M#.9$*.O)&)&;'7"*892"4:&9. lnLrlnslc condlLlon queues are LlghLly bound Lo lnLrlnslc locklng, slnce Lhe
mechanlsm for managlng sLaLe dependence ls necessarlly Lled Lo Lhe mechanlsm for ensurlng sLaLe conslsLency.
Slmllarly, expllclL D2";4.42"s are LlghLly bound Lo expllclL 52*Cs, and offer an exLended feaLure seL compared Lo
lnLrlnslc condlLlon queues, lncludlng mulLlple walL seLs per lock, lnLerrupLlble or unlnLerrupLlble condlLlon walLs, falr or
nonfalr queulng, and deadllnebased walLlng.


193 78arL lv: Advanced 1oplcs 278ChapLer 13. ALomlc varlables and nonblocklng SynchronlzaLlon
$:">.*) @VA F.%5/' H")/"Y?*3 "&0 P%&Y?%'</&4 C+&':)%&/O"./%&
Many of Lhe classes ln `$/$+).40+*2"*)99&"., such as '&3$G829& and D2"*)99&".54"C&;O)&)&, provlde beLLer
performance and scalablllLy Lhan alLernaLlves uslng #7"*892"4:&;. ln Lhls chapLer, we Lake a look aL Lhe prlmary source
of Lhls performance boosL: aLomlc varlables and nonblocklng synchronlzaLlon.
Much of Lhe recenL research on concurrenL algorlLhms has focused on nonblocklng algorlLhms, whlch use lowlevel
aLomlc machlne lnsLrucLlons such as compareandswap lnsLead of locks Lo ensure daLa lnLegrlLy under concurrenL
access. nonblocklng algorlLhms are used exLenslvely ln operaLlng sysLems and !vMs for Lhread and process schedullng,
garbage collecLlon, and Lo lmplemenL locks and oLher concurrenL daLa sLrucLures.
nonblocklng algorlLhms are conslderably more compllcaLed Lo deslgn and lmplemenL Lhan lockbased alLernaLlves, buL
Lhey can offer slgnlflcanL scalablllLy and llveness advanLages. 1hey coordlnaLe aL a flner level of granularlLy and can
greaLly reduce schedullng overhead because Lhey don'L block when mulLlple Lhreads conLend for Lhe same daLa.
lurLher, Lhey are lmmune Lo deadlock and oLher llveness problems. ln lockbased algorlLhms, oLher Lhreads cannoL
make progress lf a Lhread goes Lo sleep or splns whlle holdlng a lock, whereas nonblocklng algorlLhms are lmpervlous Lo
lndlvldual Lhread fallures. As of !ava 3.0, lL ls posslble Lo bulld efflclenL nonblocklng algorlLhms ln !ava uslng Lhe aLomlc
varlable classes such as 1.234*I".&6&9 and 1.234*N&%&9&"*&.
ALomlc varlables can also be used as "beLLer volaLlle varlables" even lf you are noL developlng nonblocklng algorlLhms.
ALomlc varlables offer Lhe same memory semanLlcs as volaLlle varlables, buL wlLh addlLlonal supporL for aLomlc updaLes
maklng Lhem ldeal for counLers, sequence generaLors, and sLaLlsLlcs gaLherlng whlle offerlng beLLer scalablllLy Lhan
lockbased alLernaLlves.
@VA@A [/3"0#"&."4*3 %6 2%'</&4
CoordlnaLlng access Lo shared sLaLe uslng a conslsLenL locklng proLocol ensures LhaL whlchever Lhread holds Lhe lock
guardlng a seL of varlables has excluslve access Lo Lhose varlables, and LhaL any changes made Lo Lhose varlables are
vlslble Lo oLher Lhreads LhaL subsequenLly acqulre Lhe lock.
Modern !vMs can opLlmlze unconLended lock acqulslLlon and release falrly effecLlvely, buL lf mulLlple Lhreads requesL
Lhe lock aL Lhe same Llme Lhe !vM enllsLs Lhe help of Lhe operaLlng sysLem. lf lL geLs Lo Lhls polnL, some unforLunaLe
Lhread wlll be suspended and have Lo be resumed laLer.
[1]
When LhaL Lhread ls resumed, lL may have Lo walL for oLher
Lhreads Lo flnlsh Lhelr schedullng quanLa before lL ls acLually scheduled. Suspendlng and resumlng a Lhread has a loL of
overhead and generally enLalls a lengLhy lnLerrupLlon. lor lockbased classes wlLh flnegralned operaLlons (such as Lhe
synchronlzed collecLlons classes, where mosL meLhods conLaln only a few operaLlons), Lhe raLlo of schedullng overhead
Lo useful work can be qulLe hlgh when Lhe lock ls frequenLly conLended.
[1] A smarL !vM need noL necessarlly suspend a Lhread lf lL conLends for a lock, lL could use proflllng daLa Lo declde adapLlvely beLween suspenslon
and spln locklng based on how long Lhe lock has been held durlng prevlous acqulslLlons.
volaLlle varlables are a llghLerwelghL synchronlzaLlon mechanlsm Lhan locklng because Lhey do noL lnvolve conLexL
swlLches or Lhread schedullng. Powever, volaLlle varlables have some llmlLaLlons compared Lo locklng: whlle Lhey
provlde slmllar vlslblllLy guaranLees, Lhey cannoL be used Lo consLrucL aLomlc compound acLlons. 1hls means LhaL
volaLlle varlables cannoL be used when one varlable depends on anoLher, or when Lhe new value of a varlable depends
on lLs old value. 1hls llmlLs when volaLlle varlables are approprlaLe, slnce Lhey cannoL be used Lo rellably lmplemenL
common Lools such as counLers or muLexes.
[2]

[2] lL ls LheoreLlcally posslble, Lhough wholly lmpracLlcal, Lo use Lhe semanLlcs of /20$.40& Lo consLrucL muLexes and oLher synchronlzers, see
(8aynal, 1986).
lor example, whlle Lhe lncremenL operaLlon (__4) may look llke an aLomlc operaLlon, lL ls acLually Lhree dlsLlncL
operaLlons feLch Lhe currenL value of Lhe varlable, add one Lo lL, and Lhen wrlLe Lhe updaLed value back. ln order Lo noL
lose an updaLe, Lhe enLlre readmodlfywrlLe operaLlon musL be aLomlc. So far, Lhe only way we've seen Lo do Lhls ls
wlLh locklng, as ln D2)".&9 on page 36.
D2)".&9 ls Lhreadsafe, and ln Lhe presence of llLLle or no conLenLlon performs [usL flne. 8uL under conLenLlon,
performance suffers because of conLexLswlLch overhead and schedullng delays. When locks are held so brlefly, belng
puL Lo sleep ls a harsh penalLy for asklng for Lhe lock aL Lhe wrong Llme.
Locklng has a few oLher dlsadvanLages. When a Lhread ls walLlng for a lock, lL cannoL do anyLhlng else. lf a Lhread
holdlng a lock ls delayed (due Lo a page faulL, schedullng delay, or Lhe llke), Lhen no Lhread LhaL needs LhaL lock can

196 !ava Concurrency ln racLlce
make progress. 1hls can be a serlous problem lf Lhe blocked Lhread ls a hlghprlorlLy Lhread buL Lhe Lhread holdlng Lhe
lock ls a lowerprlorlLy Lhread a performance hazard known as prlorlLy lnverslon. Lven Lhough Lhe hlgherprlorlLy
Lhread should have precedence, lL musL walL unLll Lhe lock ls released, and Lhls effecLlvely downgrades lLs prlorlLy Lo
LhaL of Lhe lowerprlorlLy Lhread. lf a Lhread holdlng a lock ls permanenLly blocked (due Lo an lnflnlLe loop, deadlock,
llvelock, or oLher llveness fallure), any Lhreads walLlng for LhaL lock can never make progress.
Lven lgnorlng Lhese hazards, locklng ls slmply a heavywelghL mechanlsm for flnegralned operaLlons such as
lncremenLlng a counLer. lL would be nlce Lo have a flnergralned Lechnlque for managlng conLenLlon beLween Lhreads
someLhlng llke volaLlle varlables, buL offerlng Lhe posslblllLy of aLomlc updaLes as well. Papplly, modern processors offer
us preclsely such a mechanlsm.
@VADA 7")08")* C(>>%). 6%) $%&'())*&'+
Lxcluslve locklng ls a pesslmlsLlc Lechnlque lL assumes Lhe worsL (lf you don'L lock your door, gremllns wlll come ln and
rearrange your sLuff) and doesn'L proceed unLll you can guaranLee, by acqulrlng Lhe approprlaLe locks, LhaL oLher
Lhreads wlll noL lnLerfere.
lor flnegralned operaLlons, Lhere ls an alLernaLe approach LhaL ls ofLen more efflclenL Lhe opLlmlsLlc approach,
whereby you proceed wlLh an updaLe, hopeful LhaL you can compleLe lL wlLhouL lnLerference. 1hls approach relles on
colllslon deLecLlon Lo deLermlne lf Lhere has been lnLerference from oLher parLles durlng Lhe updaLe, ln whlch case Lhe
operaLlon falls and can be reLrled (or noL). 1he opLlmlsLlc approach ls llke Lhe old saylng, "lL ls easler Lo obLaln
forglveness Lhan permlsslon", where "easler" here means "more efflclenL".
rocessors deslgned for mulLlprocessor operaLlon provlde speclal lnsLrucLlons for managlng concurrenL access Lo shared
varlables. Larly processors had aLomlc LesLandseL, feLchandlncremenL, or swap lnsLrucLlons sufflclenL for
lmplemenLlng muLexes LhaL could ln Lurn be used Lo lmplemenL more sophlsLlcaLed concurrenL ob[ecLs. 1oday, nearly
every modern processor has some form of aLomlc readmodlfywrlLe lnsLrucLlon, such as compareandswap or load
llnked/sLorecondlLlonal. CperaLlng sysLems and !vMs use Lhese lnsLrucLlons Lo lmplemenL locks and concurrenL daLa
sLrucLures, buL unLll !ava 3.0 Lhey had noL been avallable dlrecLly Lo !ava classes.
@VADA@A $%5>")* "&0 C8">
1he approach Laken by mosL processor archlLecLures, lncludlng lA32 and Sparc, ls Lo lmplemenL a compareandswap
(CAS) lnsLrucLlon. (CLher processors, such as owerC, lmplemenL Lhe same funcLlonallLy wlLh a palr of lnsLrucLlons:
loadllnked and sLorecondlLlonal.) CAS has Lhree operands a memory locaLlon v on whlch Lo operaLe, Lhe expecLed old
value A, and Lhe new value 8. CAS aLomlcally updaLes v Lo Lhe new value 8, buL only lf Lhe value ln v maLches Lhe
expecLed old value A, oLherwlse lL does noLhlng. ln elLher case, lL reLurns Lhe value currenLly ln v. (1he varlanL called
compareandseL lnsLead reLurns wheLher Lhe operaLlon succeeded.) CAS means "l Lhlnk v should have Lhe value A, lf lL
does, puL 8 Lhere, oLherwlse don'L change lL buL Lell me l was wrong." CAS ls an opLlmlsLlc Lechnlque lL proceeds wlLh
Lhe updaLe ln Lhe hope of success, and can deLecL fallure lf anoLher Lhread has updaLed Lhe varlable slnce lL was lasL
examlned. '43)0$.&;D1' ln LlsLlng 13.1 lllusLraLes Lhe semanLlcs (buL noL Lhe lmplemenLaLlon or performance) of CAS.
When mulLlple Lhreads aLLempL Lo updaLe Lhe same varlable slmulLaneously uslng CAS, one wlns and updaLes Lhe
varlable's value, and Lhe resL lose. 8uL Lhe losers are noL punlshed by suspenslon, as Lhey could be lf Lhey falled Lo
acqulre a lock, lnsLead, Lhey are Lold LhaL Lhey dldn'L wln Lhe race Lhls Llme buL can Lry agaln. 8ecause a Lhread LhaL
loses a CAS ls noL blocked, lL can declde wheLher lL wanLs Lo Lry agaln, Lake some oLher recovery acLlon, or do noLhlng.
[3]

1hls flexlblllLy ellmlnaLes many of Lhe llveness hazards assoclaLed wlLh locklng (Lhough ln unusual cases can lnLroduce
Lhe rlsk of llvelock see SecLlon 10.3.3).
[3] uolng noLhlng may be a perfecLly senslble response Lo a falled CAS, ln some nonblocklng algorlLhms, such as Lhe llnked queue algorlLhm ln
SecLlon 13.4.2, a falled CAS means LhaL someone else already dld Lhe work you were plannlng Lo do.

197 78arL lv: Advanced 1oplcs 278ChapLer 13. ALomlc varlables and nonblocklng SynchronlzaLlon
2/3./&4 @VA@A C/5(?".*0 $FC E>*)"./%&A
b=89&$;'$%&
G)M04* *0$## '43)0$.&;D1' W
bH)$9;&;K7Ud.84#dV G94/$.& 4". /$0)&[

G)M04* #7"*892"4:&; 4". 6&.UV W 9&.)9" /$0)&[ \

G)M04* #7"*892"4:&; 4". *23G$9&1";'?$GU4". &-G&*.&;B$0)&e
4". "&?B$0)&V W
4". 20;B$0)& ] /$0)&[
4% U20;B$0)& ]] &-G&*.&;B$0)&V
/$0)& ] "&?B$0)&[
9&.)9" 20;B$0)&[
\

G)M04* #7"*892"4:&; M220&$" *23G$9&1";'&.U4". &-G&*.&;B$0)&e
4". "&?B$0)&V W
9&.)9" U&-G&*.&;B$0)&
]] *23G$9&1";'?$GU&-G&*.&;B$0)&e "&?B$0)&VV[
\
\
1he Lyplcal paLLern for uslng CAS ls flrsL Lo read Lhe value A from v, derlve Lhe new value 8 from A, and Lhen use CAS Lo
aLomlcally change v from A Lo 8 so long as no oLher Lhread has changed v Lo anoLher value ln Lhe meanLlme. CAS
addresses Lhe problem of lmplemenLlng aLomlc readmodlfywrlLe sequences wlLhouL locklng, because lL can deLecL
lnLerference from oLher Lhreads.
@VADADA F P%&Y?%'</&4 $%(&.*)
D$#D2)".&9 ln LlsLlng 13.2 lmplemenLs a Lhreadsafe counLer uslng CAS. 1he lncremenL operaLlon follows Lhe canonlcal
form feLch Lhe old value, Lransform lL Lo Lhe new value (addlng one), and use CAS Lo seL Lhe new value. lf Lhe CAS falls,
Lhe operaLlon ls lmmedlaLely reLrled. 8eLrylng repeaLedly ls usually a reasonable sLraLegy, alLhough ln cases of exLreme
conLenLlon lL mlghL be deslrable Lo walL or back off before reLrylng Lo avold llvelock.
D$#D2)".&9 does noL block, Lhough lL may have Lo reLry several
[4]
Llmes lf oLher Lhreads are updaLlng Lhe counLer aL Lhe
same Llme. (ln pracLlce, lf all you need ls a counLer or sequence generaLor, [usL use 1.234*I".&6&9 or 1.234*52"6,
whlch provlde aLomlc lncremenL and oLher arlLhmeLlc meLhods.)
[4] 1heoreLlcally, lL could have Lo reLry arblLrarlly many Llmes lf oLher Lhreads keep wlnnlng Lhe CAS race, ln pracLlce, Lhls sorL of sLarvaLlon rarely
happens.
2/3./&4 @VADA P%&Y?%'</&4 $%(&.*) 93/&4 $FCA
b=89&$;'$%&
G)M04* *0$## D$#D2)".&9 W
G94/$.& '43)0$.&;D1' /$0)&[

G)M04* 4". 6&.B$0)&UV W
9&.)9" /$0)&+6&.UV[
\

G)M04* 4". 4"*9&3&".UV W
4". /[
;2 W
/ ] /$0)&+6&.UV[
\
?840& U/ Y] /$0)&+*23G$9&1";'?$GU/e / _ ^VV[
9&.)9" / _ ^[
\
\
AL flrsL glance, Lhe CASbased counLer looks as lf lL should perform worse Lhan a lockbased counLer, lL has more
operaLlons and a more compllcaLed conLrol flow, and depends on Lhe seemlngly compllcaLed CAS operaLlon. 8uL ln
reallLy, CASbased counLers slgnlflcanLly ouLperform lockbased counLers lf Lhere ls even a small amounL of conLenLlon,
and ofLen even lf Lhere ls no conLenLlon. 1he fasL paLh for unconLended lock acqulslLlon Lyplcally requlres aL leasL one
CAS plus oLher lockrelaLed housekeeplng, so more work ls golng on ln Lhe besL case for a lockbased counLer Lhan ln Lhe
normal case for Lhe CASbased counLer. Slnce Lhe CAS succeeds mosL of Lhe Llme (assumlng low Lo moderaLe
conLenLlon), Lhe hardware wlll correcLly predlcL Lhe branch lmpllclL ln Lhe ?840& loop, mlnlmlzlng Lhe overhead of Lhe
more compllcaLed conLrol loglc.
1he language synLax for locklng may be compacL, buL Lhe work done by Lhe !vM and CS Lo manage locks ls noL. Locklng
enLalls Lraverslng a relaLlvely compllcaLed code paLh ln Lhe !vM and may enLall CSlevel locklng, Lhread suspenslon, and
conLexL swlLches. ln Lhe besL case, locklng requlres aL leasL one CAS, so uslng locks moves Lhe CAS ouL of slghL buL

198 !ava Concurrency ln racLlce
doesn'L save any acLual execuLlon cosL. Cn Lhe oLher hand, execuLlng a CAS from wlLhln Lhe program lnvolves no !vM
code, sysLem calls, or schedullng acLlvlLy. WhaL looks llke a longer code paLh aL Lhe appllcaLlon level ls ln facL a much
shorLer code paLh when !vM and CS acLlvlLy are Laken lnLo accounL. 1he prlmary dlsadvanLage of CAS ls LhaL lL forces
Lhe caller Lo deal wlLh conLenLlon (by reLrylng, backlng off, or glvlng up), whereas locks deal wlLh conLenLlon
auLomaLlcally by blocklng unLll Lhe lock ls avallable.
[3]

[3] AcLually, Lhe blggesL dlsadvanLage of CAS ls Lhe dlfflculLy of consLrucLlng Lhe surroundlng algorlLhms correcLly.
CAS performance varles wldely across processors. Cn a slngleCu sysLem, a CAS Lyplcally Lakes on Lhe order of a
handful of clock cycles, slnce no synchronlzaLlon across processors ls necessary. As of Lhls wrlLlng, Lhe cosL of an
unconLended CAS on mulLlple Cu sysLems ranges from abouL Len Lo abouL 130 cycles, CAS performance ls a rapldly
movlng LargeL and varles noL only across archlLecLures buL even across verslons of Lhe same processor. CompeLlLlve
forces wlll llkely resulL ln conLlnued CAS performance lmprovemenL over Lhe nexL several years. A good rule of Lhumb ls
LhaL Lhe cosL of Lhe "fasL paLh" for unconLended lock acqulslLlon and release on mosL processors ls approxlmaLely Lwlce
Lhe cosL of a CAS.
@VADALA $FC C(>>%). /& .:* !HK
So, how does !ava code convlnce Lhe processor Lo execuLe a CAS on lLs behalf? rlor Lo !ava 3.0, Lhere was no way Lo do
Lhls shorL of wrlLlng naLlve code. ln !ava 3.0, lowlevel supporL was added Lo expose CAS operaLlons on 4"., 02"6, and
ob[ecL references, and Lhe !vM complles Lhese lnLo Lhe mosL efflclenL means provlded by Lhe underlylng hardware. Cn
plaLforms supporLlng CAS, Lhe runLlme lnllnes Lhem lnLo Lhe approprlaLe machlne lnsLrucLlon(s), ln Lhe worsL case, lf a
CASllke lnsLrucLlon ls noL avallable Lhe !vM uses a spln lock. 1hls lowlevel !vM supporL ls used by Lhe aLomlc varlable
classes (1.234*ooo ln `$/$+).40+*2"*)99&".+ $.234*) Lo provlde an efflclenL CAS operaLlon on numerlc and
reference Lypes, Lhese aLomlc varlable classes are used, dlrecLly or lndlrecLly, Lo lmplemenL mosL of Lhe classes ln
`$/$+).40+*2"*)99&"..
@VALA F.%5/' H")/"Y?* $?"33*3
ALomlc varlables are flnergralned and llghLerwelghL Lhan locks, and are crlLlcal for lmplemenLlng hlghperformance
concurrenL code on mulLlprocessor sysLems. ALomlc varlables llmlL Lhe scope of conLenLlon Lo a slngle varlable, Lhls ls as
flnegralned as you can geL (assumlng your algorlLhm can even be lmplemenLed uslng such flne granularlLy). 1he fasL
(unconLended) paLh for updaLlng an aLomlc varlable ls no slower Lhan Lhe fasL paLh for acqulrlng a lock, and usually
fasLer, Lhe slow paLh ls deflnlLely fasLer Lhan Lhe slow paLh for locks because lL does noL lnvolve suspendlng and
reschedullng Lhreads. WlLh algorlLhms based on aLomlc varlables lnsLead of locks, Lhreads are more llkely Lo be able Lo
proceed wlLhouL delay and have an easler Llme recoverlng lf Lhey do experlence conLenLlon.
1he aLomlc varlable classes provlde a generallzaLlon of volaLlle varlables Lo supporL aLomlc condlLlonal readmodlfy
wrlLe operaLlons. 1.234*I".&6&9 represenLs an 4". value, and provldes 6&. and #&. meLhods wlLh Lhe same memory
semanLlcs as reads and wrlLes Lo a volaLlle 4".. lL also provldes an aLomlc *23G$9&1";'&. meLhod (whlch lf successful
has Lhe memory effecLs of boLh readlng and wrlLlng a volaLlle varlable) and, for convenlence, aLomlc add, lncremenL,
and decremenL meLhods. 1.234*I".&6&9 bears a superflclal resemblance Lo an exLended D2)".&9 class, buL offers far
greaLer scalablllLy under conLenLlon because lL can dlrecLly explolL underlylng hardware supporL for concurrency.
1here are Lwelve aLomlc varlable classes, dlvlded lnLo four groups: scalars, fleld updaLers, arrays, and compound
varlables. 1he mosL commonly used aLomlc varlables are Lhe scalars: 1.234*I".&6&9, 1.234*52"6, 1.234*K220&$", and
1.234*N&%&9&"*&. All supporL CAS, Lhe I".&6&9 and 52"6 verslons supporL arlLhmeLlc as well. (1o slmulaLe aLomlc
varlables of oLher prlmlLlve Lypes, you can casL #829. or M7.& values Lo and from 4"., and use %02$.=2I".K4.# or
;2)M0&=252"6K4.# for floaLlngpolnL numbers.)
1he aLomlc array classes (avallable ln I".&6&9, 52"6, and N&%&9&"*& verslons) are arrays whose elemenLs can be
updaLed aLomlcally. 1he aLomlc array classes provlde volaLlle access semanLlcs Lo Lhe elemenLs of Lhe array, a feaLure
noL avallable for ordlnary arrays a /20$.40& array has /20$.40& semanLlcs only for Lhe array reference, noL for lLs
elemenLs. (1he oLher Lypes of aLomlc varlables are dlscussed ln SecLlons 13.4.3 and 13.4.4.)
Whlle Lhe aLomlc scalar classes exLend ,)3M&9, Lhey do noL exLend Lhe prlmlLlve wrapper classes such as I".&6&9 or
52"6. ln facL, Lhey cannoL: Lhe prlmlLlve wrapper classes are lmmuLable whereas Lhe aLomlc varlable classes are
muLable. 1he aLomlc varlable classes also do noL redeflne 8$#8D2;& or &()$0#, each lnsLance ls dlsLlncL. Llke mosL
muLable ob[ecLs, Lhey are noL good candldaLes for keys ln hashbased collecLlons.

199 78arL lv: Advanced 1oplcs 278ChapLer 13. ALomlc varlables and nonblocklng SynchronlzaLlon
@VALA@A F.%5/'3 "3 `;*..*) H%?"./?*3`
ln SecLlon 3.4.2, we used a /20$.40& reference Lo an lmmuLable ob[ecL Lo updaLe mulLlple sLaLe varlables aLomlcally.
1haL example relled on checkLhenacL, buL ln LhaL parLlcular case Lhe race was harmless because we dld noL care lf we
occaslonally losL an updaLe. ln mosL oLher slLuaLlons, such a checkLhenacL would noL be harmless and could
compromlse daLa lnLegrlLy. lor example, ,)3M&9N$"6& on page 67 could noL be lmplemenLed safely wlLh a /20$.40&
reference Lo an lmmuLable holder ob[ecL for Lhe upper and lower bounds, nor wlLh uslng aLomlc lnLegers Lo sLore Lhe
bounds. 8ecause an lnvarlanL consLralns Lhe Lwo numbers and Lhey cannoL be updaLed slmulLaneously whlle preservlng
Lhe lnvarlanL, a number range class uslng /20$.40& references or mulLlple aLomlc lnLegers wlll have unsafe checkLhen
acL sequences.
We can comblne Lhe Lechnlque from J"&B$0)&D$*8& wlLh aLomlc references Lo close Lhe race condlLlon by aLomlcally
updaLlng Lhe reference Lo an lmmuLable ob[ecL holdlng Lhe lower and upper bounds. D$#,)3M&9N$"6& ln LlsLlng 13.3
uses an 1.234*N&%&9&"*& Lo an I".@$49 Lo hold Lhe sLaLe, by uslng *23G$9&1";'&. lL can updaLe Lhe upper or lower
bound wlLhouL Lhe race condlLlons of ,)3M&9N$"6&.
2/3./&4 @VALA -)*3*)#/&4 K(?./#")/"Y?* ,&#")/"&.3 93/&4 $FCA
G)M04* *0$## D$#,)3M&9N$"6& W
bI33).$M0&
G94/$.& #.$.4* *0$## I".@$49 W
%4"$0 4". 02?&9[ XX I"/$94$".h 02?&9 R] )GG&9
%4"$0 4". )GG&9[
+++
\
G94/$.& %4"$0 1.234*N&%&9&"*&RI".@$49T /$0)&# ]
"&? 1.234*N&%&9&"*&RI".@$49TU"&? I".@$49UZe ZVV[

G)M04* 4". 6&.52?&9UV W 9&.)9" /$0)&#+6&.UV+02?&9[ \
G)M04* 4". 6&.!GG&9UV W 9&.)9" /$0)&#+6&.UV+)GG&9[ \

G)M04* /24; #&.52?&9U4". 4V W
?840& U.9)&V W
I".@$49 20;/ ] /$0)&#+6&.UV[
4% U4 T 20;/+)GG&9V
.892? "&? I00&6$0196)3&".L-*&G.42"U
dD$"m. #&. 02?&9 .2 d _ 4 _ d T )GG&9dV[
I".@$49 "&?/ ] "&? I".@$49U4e 20;/+)GG&9V[
4% U/$0)&#+*23G$9&1";'&.U20;/e "&?/VV
9&.)9"[
\
\
XX #4340$907 %29 #&.!GG&9
\
@VALADA -*)6%)5"&'* $%5>")/3%&T 2%'<3 H*)3(3 F.%5/' H")/"Y?*3
1o demonsLraLe Lhe dlfferences ln scalablllLy beLween locks and aLomlc varlables, we consLrucLed a benchmark
comparlng several lmplemenLaLlons of a pseudorandom number generaLor (8nC). ln a 8nC, Lhe nexL "random"
number ls a deLermlnlsLlc funcLlon of Lhe prevlous number, so a 8nC musL remember Lhe prevlous number as parL of
lLs sLaLe.
LlsLlngs 13.4 and 13.3 show Lwo lmplemenLaLlons of a Lhreadsafe 8nC, one uslng N&&".9$".52*C and Lhe oLher uslng
1.234*I".&6&9. 1he LesL drlver lnvokes each repeaLedly, each lLeraLlon generaLes a random number (whlch feLches and
modlfles Lhe shared #&&; sLaLe) and also performs a number of "busywork" lLeraLlons LhaL operaLe sLrlcLly on Lhread
local daLa. 1hls slmulaLes Lyplcal operaLlons LhaL lnclude some porLlon of operaLlng on shared sLaLe and some porLlon of
operaLlng on Lhreadlocal sLaLe.
llgures 13.1 and 13.2 show LhroughpuL wlLh low and moderaLe levels of slmulaLed work ln each lLeraLlon. WlLh a low
level of Lhreadlocal compuLaLlon, Lhe lock or aLomlc varlable experlences heavy conLenLlon, wlLh more Lhreadlocal
compuLaLlon, Lhe lock or aLomlc varlable experlences less conLenLlon slnce lL ls accessed less ofLen by each Lhread.

200 !ava Concurrency ln racLlce
S/4()* @VA@A 52*C "&0 1.234*I".&6&9 -*)6%)5"&'* 9&0*) 7/4: $%&.*&./%&A


S/4()* @VADA 52*C "&0 1.234*I".&6&9 -*)6%)5"&'* 9&0*) K%0*)".* $%&.*&./%&A

2/3./&4 @VAMA N"&0%5 P(5Y*) R*&*)".%) 93/&4 N&&".9$".52*CA
b=89&$;'$%&
G)M04* *0$## N&&".9$".52*C@#&);2N$";23 &-.&";# @#&);2N$";23 W
G94/$.& %4"$0 52*C 02*C ] "&? N&&".9$".52*CU%$0#&V[
G94/$.& 4". #&&;[

N&&".9$".52*C@#&);2N$";23U4". #&&;V W
.84#+#&&; ] #&&;[
\

G)M04* 4". "&-.I".U4". "V W
02*C+02*CUV[
.97 W
4". # ] #&&;[
#&&; ] *$0*)0$.&,&-.U#V[
4". 9&3$4";&9 ] # x "[
9&.)9" 9&3$4";&9 T Z S 9&3$4";&9 h 9&3$4";&9 _ "[
\ %4"$007 W
02*C+)"02*CUV[
\
\
\


201 78arL lv: Advanced 1oplcs 278ChapLer 13. ALomlc varlables and nonblocklng SynchronlzaLlon
2/3./&4 @VAVA N"&0%5 P(5Y*) R*&*)".%) 93/&4 1.234*I".&6&9A
b=89&$;'$%&
G)M04* *0$## 1.234*@#&);2N$";23 &-.&";# @#&);2N$";23 W
G94/$.& 1.234*I".&6&9 #&&;[

1.234*@#&);2N$";23U4". #&&;V W
.84#+#&&; ] "&? 1.234*I".&6&9U#&&;V[
\

G)M04* 4". "&-.I".U4". "V W
?840& U.9)&V W
4". # ] #&&;+6&.UV[
4". "&-.'&&; ] *$0*)0$.&,&-.U#V[
4% U#&&;+*23G$9&1";'&.U#e "&-.'&&;VV W
4". 9&3$4";&9 ] # x "[
9&.)9" 9&3$4";&9 T Z S 9&3$4";&9 h 9&3$4";&9 _ "[
\
\
\
\
As Lhese graphs show, aL hlgh conLenLlon levels locklng Lends Lo ouLperform aLomlc varlables, buL aL more reallsLlc
conLenLlon levels aLomlc varlables ouLperform locks.
[6]
1hls ls because a lock reacLs Lo conLenLlon by suspendlng Lhreads,
reduclng Cu usage and synchronlzaLlon Lrafflc on Lhe shared memory bus. (1hls ls slmllar Lo how blocklng producers ln
a producerconsumer deslgn reduces Lhe load on consumers and Lhereby leLs Lhem caLch up.) Cn Lhe oLher hand, wlLh
aLomlc varlables, conLenLlon managemenL ls pushed back Lo Lhe calllng class. Llke mosL CASbased algorlLhms,
1.234*@#&);2N$";23 reacLs Lo conLenLlon by Lrylng agaln lmmedlaLely, whlch ls usually Lhe rlghL approach buL ln a
hlghconLenLlon envlronmenL [usL creaLes more conLenLlon.
[6] 1he same holds Lrue ln oLher domalns: Lrafflc llghLs provlde beLLer LhroughpuL for hlgh Lrafflc buL roLarles provlde beLLer LhroughpuL for low
Lrafflc, Lhe conLenLlon scheme used by LLherneL neLworks performs beLLer aL low Lrafflc levels, buL Lhe Lokenpasslng scheme used by Loken rlng
neLworks does beLLer wlLh heavy Lrafflc.
8efore we condemn 1.234*@#&);2N$";23 as poorly wrlLLen or aLomlc varlables as a poor cholce compared Lo locks, we
should reallze LhaL Lhe level of conLenLlon ln llgure 13.1 ls unreallsLlcally hlgh: no real program does noLhlng buL
conLend for a lock or aLomlc varlable. ln pracLlce, aLomlcs Lend Lo scale beLLer Lhan locks because aLomlcs deal more
effecLlvely wlLh Lyplcal conLenLlon levels.
1he performance reversal beLween locks and aLomlcs aL dlfferlng levels of conLenLlon lllusLraLes Lhe sLrengLhs and
weaknesses of each. WlLh low Lo moderaLe conLenLlon, aLomlcs offer beLLer scalablllLy, wlLh hlgh conLenLlon, locks offer
beLLer conLenLlon avoldance. (CASbased algorlLhms also ouLperform lockbased ones on slngleCu sysLems, slnce a
CAS always succeeds on a slngleCu sysLem excepL ln Lhe unllkely case LhaL a Lhread ls preempLed ln Lhe mlddle of Lhe
readmodlfywrlLe operaLlon.)
llgures 13.1 and 13.2 lnclude a Lhlrd curve, an lmplemenLaLlon of @#&);2N$";23 LhaL uses a =89&$;52*$0 for Lhe 8nC
sLaLe. 1hls lmplemenLaLlon approach changes Lhe behavlor of Lhe classeach Lhread sees lLs own prlvaLe sequence of
pseudorandom numbers, lnsLead of all Lhreads sharlng one sequencebuL lllusLraLes LhaL lL ls ofLen cheaper Lo noL share
sLaLe aL all lf lL can be avolded. We can lmprove scalablllLy by deallng more effecLlvely wlLh conLenLlon, buL Lrue
scalablllLy ls achleved only by ellmlnaLlng conLenLlon enLlrely.
@VAMA P%&Y?%'</&4 F?4%)/.:53
Lockbased algorlLhms are aL rlsk for a number of llveness fallures. lf a Lhread holdlng a lock ls delayed due Lo blocklng
l/C, page faulL, or oLher delay, lL ls posslble LhaL no Lhread wlll make progress. An algorlLhm ls called nonblocklng lf
fallure or suspenslon of any Lhread cannoL cause fallure or suspenslon of anoLher Lhread, an algorlLhm ls called lockfree
lf, aL each sLep, some Lhread can make progress. AlgorlLhms LhaL use CAS excluslvely for coordlnaLlon beLween Lhreads
can, lf consLrucLed correcLly, be boLh nonblocklng and lockfree. An unconLended CAS always succeeds, and lf mulLlple
Lhreads conLend for a CAS, one always wlns and Lherefore makes progress. nonblocklng algorlLhms are also lmmune Lo
deadlock or prlorlLy lnverslon (Lhough Lhey can exhlblL sLarvaLlon or llvelock because Lhey can lnvolve repeaLed reLrles).
We've seen one nonblocklng algorlLhm so far: D$#D2)".&9. Cood nonblocklng algorlLhms are known for many
common daLa sLrucLures, lncludlng sLacks, queues, prlorlLy queues, and hash Lables Lhough deslgnlng new ones ls a
Lask besL lefL Lo experLs.
@VAMA@A F P%&Y?%'</&4 C."'<
nonblocklng algorlLhms are conslderably more compllcaLed Lhan Lhelr lockbased equlvalenLs. 1he key Lo creaLlng non
blocklng algorlLhms ls flgurlng ouL how Lo llmlL Lhe scope of aLomlc changes Lo a slngle varlable whlle malnLalnlng daLa

202 !ava Concurrency ln racLlce
conslsLency. ln llnked collecLlon classes such as queues, you can someLlmes geL away wlLh expresslng sLaLe
LransformaLlons as changes Lo lndlvldual llnks and uslng an 1.234*N&%&9&"*& Lo represenL each llnk LhaL musL be
updaLed aLomlcally.
SLacks are Lhe slmplesL llnked daLa sLrucLure: each elemenL refers Lo only one oLher elemenL and each elemenL ls
referred Lo by only one ob[ecL reference. D2"*)99&".'.$*C ln LlsLlng 13.6 shows how Lo consLrucL a sLack uslng aLomlc
references. 1he sLack ls a llnked llsL of ,2;& elemenLs, rooLed aL .2G, each of whlch conLalns a value and a llnk Lo Lhe
nexL elemenL. 1he G)#8 meLhod prepares a new llnk node whose "&-. fleld refers Lo Lhe currenL Lop of Lhe sLack, and
Lhen uses CAS Lo Lry Lo lnsLall lL on Lhe Lop of Lhe sLack. lf Lhe same node ls sLlll on Lhe Lop of Lhe sLack as when we
sLarLed, Lhe CAS succeeds, lf Lhe Lop node has changed (because anoLher Lhread has added or removed elemenLs slnce
we sLarLed), Lhe CAS falls and G)#8 updaLes Lhe new node based on Lhe currenL sLack sLaLe and Lrles agaln. ln elLher
case, Lhe sLack ls sLlll ln a conslsLenL sLaLe afLer Lhe CAS.
D$#D2)".&9 and D2"*)99&".'.$*C lllusLraLe characLerlsLlcs of all nonblocklng algorlLhms: some work ls done
speculaLlvely and may have Lo be redone. ln D2"*)99&".'.$*C, when we consLrucL Lhe ,2;& represenLlng Lhe new
elemenL, we are hoplng LhaL Lhe value of Lhe "&-. reference wlll sLlll be correcL by Lhe Llme lL ls lnsLalled on Lhe sLack,
buL are prepared Lo reLry ln Lhe evenL of conLenLlon.
nonblocklng algorlLhms llke D2"*)99&".'.$*C derlve Lhelr Lhread safeLy from Lhe facL LhaL, llke locklng,
*23G$9&1";'&. provldes boLh aLomlclLy and vlslblllLy guaranLees. When a Lhread changes Lhe sLaLe of Lhe sLack, lL does
so wlLh a *23G$9&1";'&., whlch has Lhe memory effecLs of a volaLlle wrlLe. When a Lhread examlnes Lhe sLack, lL does
so by calllng 6&. on Lhe same 1.234*N&%&9&"*&, whlch has Lhe memory effecLs of a volaLlle read. So any changes made
by one Lhread are safely publlshed Lo any oLher Lhread LhaL examlnes Lhe sLaLe of Lhe llsL. And Lhe llsL ls modlfled wlLh a
*23G$9&1";'&. LhaL aLomlcally elLher updaLes Lhe .2G reference or falls lf lL deLecLs lnLerference from anoLher Lhread.
@VAMADA F P%&Y?%'</&4 2/&<*0 2/3.
1he Lwo nonblocklng algorlLhms we've seen so far, Lhe counLer and Lhe sLack, lllusLraLe Lhe baslc paLLern of uslng CAS
Lo updaLe a value speculaLlvely, reLrylng lf Lhe updaLe falls. 1he Lrlck Lo bulldlng nonblocklng algorlLhms ls Lo llmlL Lhe
scope of aLomlc changes Lo a slngle varlable. WlLh counLers Lhls ls Lrlvlal, and wlLh a sLack lL ls sLralghLforward enough,
buL for more compllcaLed daLa sLrucLures such as queues, hash Lables, or Lrees, lL can geL a loL Lrlckler.
A llnked queue ls more compllcaLed Lhan a sLack because lL musL supporL fasL access Lo boLh Lhe head and Lhe Lall. 1o do
Lhls, lL malnLalns separaLe head and Lall polnLers. 1wo polnLers refer Lo Lhe node aL Lhe Lall: Lhe "&-. polnLer of Lhe
currenL lasL elemenL, and Lhe Lall polnLer. 1o lnserL a new elemenL successfully, boLh of Lhese polnLers musL be updaLed
aLomlcally. AL flrsL glance, Lhls cannoL be done wlLh aLomlc varlables, separaLe CAS operaLlons are requlred Lo updaLe
Lhe Lwo polnLers, and lf Lhe flrsL succeeds buL Lhe second one falls Lhe queue ls lefL ln an lnconslsLenL sLaLe. And, even lf
boLh operaLlons succeed, anoLher Lhread could Lry Lo access Lhe queue beLween Lhe flrsL and Lhe second. 8ulldlng a
nonblocklng algorlLhm for a llnked queue requlres a plan for boLh Lhese slLuaLlons.

203 78arL lv: Advanced 1oplcs 278ChapLer 13. ALomlc varlables and nonblocklng SynchronlzaLlon
2/3./&4 @VAWA P%&Y?%'</&4 C."'< 93/&4 J)*/Y*)b3 F?4%)/.:5 GJ)*/Y*)X @^\WIA
b=89&$;'$%&
G)M04* *0$## D2"*)99&".'.$*C RLT W
1.234*N&%&9&"*&R,2;&RLTT .2G ] "&? 1.234*N&%&9&"*&R,2;&RLTTUV[

G)M04* /24; G)#8UL 4.&3V W
,2;&RLT "&?E&$; ] "&? ,2;&RLTU4.&3V[
,2;&RLT 20;E&$;[
;2 W
20;E&$; ] .2G+6&.UV[
"&?E&$;+"&-. ] 20;E&$;[
\ ?840& UY.2G+*23G$9&1";'&.U20;E&$;e "&?E&$;VV[
\

G)M04* L G2GUV W
,2;&RLT 20;E&$;[
,2;&RLT "&?E&$;[
;2 W
20;E&$; ] .2G+6&.UV[
4% U20;E&$; ]] ")00V
9&.)9" ")00[
"&?E&$; ] 20;E&$;+"&-.[
\ ?840& UY.2G+*23G$9&1";'&.U20;E&$;e "&?E&$;VV[
9&.)9" 20;E&$;+4.&3[
\

G94/$.& #.$.4* *0$## ,2;& RLT W
G)M04* %4"$0 L 4.&3[
G)M04* ,2;&RLT "&-.[

G)M04* ,2;&UL 4.&3V W
.84#+4.&3 ] 4.&3[
\
\
\
We need several Lrlcks Lo develop Lhls plan. 1he flrsL ls Lo ensure LhaL Lhe daLa sLrucLure ls always ln a conslsLenL sLaLe,
even ln Lhe mlddle of an mulLlsLep updaLe. 1haL way, lf Lhread A ls ln Lhe mlddle of a updaLe when Lhread 8 arrlves on
Lhe scene, 8 can Lell LhaL an operaLlon has been parLlally compleLed and knows noL Lo Lry lmmedlaLely Lo apply lLs own
updaLe. 1hen 8 can walL (by repeaLedly examlnlng Lhe queue sLaLe) unLll A flnlshes, so LhaL Lhe Lwo don'L geL ln each
oLher's way.
Whlle Lhls Lrlck by lLself would sufflce Lo leL Lhreads "Lake Lurns" accesslng Lhe daLa sLrucLure wlLhouL corrupLlng lL, lf
one Lhread falled ln Lhe mlddle of an updaLe, no Lhread would be able Lo access Lhe queue aL all. 1o make Lhe algorlLhm
nonblocklng, we musL ensure LhaL Lhe fallure of a Lhread does noL prevenL oLher Lhreads from maklng progress. 1hus,
Lhe second Lrlck ls Lo make sure LhaL lf 8 arrlves Lo flnd Lhe daLa sLrucLure ln Lhe mlddle of an updaLe by A, enough
lnformaLlon ls already embodled ln Lhe daLa sLrucLure for 8 Lo flnlsh Lhe updaLe for A. lf 8 "helps" A by flnlshlng A's
operaLlon, 8 can proceed wlLh lLs own operaLlon wlLhouL walLlng for A. When A geLs around Lo flnlshlng lLs operaLlon, lL
wlll flnd LhaL 8 already dld Lhe [ob for lL.
54"C&;O)&)& ln LlsLlng 13.7 shows Lhe lnserLlon porLlon of Lhe MlchaelScoLL nonblocklng llnkedqueue algorlLhm
(Mlchael and ScoLL, 1996), whlch ls used by D2"*)99&".54"C&;O)&)&. As ln many queue algorlLhms, an empLy queue
conslsLs of a "senLlnel" or "dummy" node, and Lhe head and Lall polnLers are lnlLlallzed Lo refer Lo Lhe senLlnel. 1he Lall
polnLer always refers Lo Lhe senLlnel (lf Lhe queue ls empLy), Lhe lasL elemenL ln Lhe queue, or (ln Lhe case LhaL an
operaLlon ls ln mldupdaLe) Lhe secondLolasL elemenL. llgure 13.3 lllusLraLes a queue wlLh Lwo elemenLs ln Lhe normal,
or qulescenL, sLaLe.
S/4()* @VALA a(*(* 8/.: J8% =?*5*&.3 /& a(/*3'*&. C.".*A

lnserLlng a new elemenL lnvolves updaLlng Lwo polnLers. 1he flrsL llnks Lhe new node Lo Lhe end of Lhe llsL by updaLlng
Lhe "&-. polnLer of Lhe currenL lasL elemenL, Lhe second swlngs Lhe Lall polnLer around Lo polnL Lo Lhe new lasL elemenL.

204 !ava Concurrency ln racLlce
8eLween Lhese Lwo operaLlons, Lhe queue ls ln Lhe lnLermedlaLe sLaLe, shown ln llgure 13.4. AfLer Lhe second updaLe,
Lhe queue ls agaln ln Lhe qulescenL sLaLe, shown ln llgure 13.3.
S/4()* @VAMA a(*(* /& ,&.*)5*0/".* C.".* [()/&4 ,&3*)./%&A

S/4()* @VAVA a(*(* F4"/& /& a(/*3'*&. C.".* F6.*) ,&3*)./%& /3 $%5>?*.*A

1he key observaLlon LhaL enables boLh of Lhe requlred Lrlcks ls LhaL lf Lhe queue ls ln Lhe qulescenL sLaLe, Lhe "&-. fleld
of Lhe llnk node polnLed Lo by .$40 ls null, and lf lL ls ln Lhe lnLermedlaLe sLaLe, .$40+"&-. ls nonnull. So any Lhread can
lmmedlaLely Lell Lhe sLaLe of Lhe queue by examlnlng .$40+"&-.. lurLher, lf Lhe queue ls ln Lhe lnLermedlaLe sLaLe, lL can
be resLored Lo Lhe qulescenL sLaLe by advanclng Lhe Lall polnLer forward one node, flnlshlng Lhe operaLlon for whlchever
Lhread ls ln Lhe mlddle of lnserLlng an elemenL.
[7]

[7] lor a full accounL of Lhe correcLness of Lhls algorlLhm, see (Mlchael and ScoLL, 1996) or (Perllhy and ShavlL, 2006).
54"C&;O)&)&+G). flrsL checks Lo see lf Lhe queue ls ln Lhe lnLermedlaLe sLaLe before aLLempLlng Lo lnserL a new elemenL
(sLep A). lf lL ls, Lhen some oLher Lhread ls already ln Lhe process of lnserLlng an elemenL (beLween lLs sLeps C and u).
8aLher Lhan walL for LhaL Lhread Lo flnlsh, Lhe currenL Lhread helps lL by flnlshlng Lhe operaLlon for lL, advanclng Lhe Lall
polnLer (sLep 8). lL Lhen repeaLs Lhls check ln case anoLher Lhread has sLarLed lnserLlng a new elemenL, advanclng Lhe Lall
polnLer unLll lL flnds Lhe queue ln Lhe qulescenL sLaLe so lL can begln lLs own lnserLlon.
1he CAS aL sLep C, whlch llnks Lhe new node aL Lhe Lall of Lhe queue, could fall lf Lwo Lhreads Lry Lo lnserL an elemenL aL
Lhe same Llme. ln LhaL case, no harm ls done: no changes have been made, and Lhe currenL Lhread can [usL reload Lhe
Lall polnLer and Lry agaln. Cnce C succeeds, Lhe lnserLlon ls consldered Lo have Laken effecL, Lhe second CAS (sLep u) ls
consldered "cleanup", slnce lL can be performed elLher by Lhe lnserLlng Lhread or by any oLher Lhread. lf u falls, Lhe
lnserLlng Lhread reLurns anyway raLher Lhan reLrylng Lhe CAS, because no reLry ls needed anoLher Lhread has already
flnlshed Lhe [ob ln lLs sLep 8! 1hls works because before any Lhread Lrles Lo llnk a new node lnLo Lhe queue, lL flrsL
checks Lo see lf Lhe queue needs cleanlng up by checklng lf .$40+"&-. ls nonnull. lf lL ls, lL advances Lhe Lall polnLer flrsL
(perhaps mulLlple Llmes) unLll Lhe queue ls ln Lhe qulescenL sLaLe.

203 78arL lv: Advanced 1oplcs 278ChapLer 13. ALomlc varlables and nonblocklng SynchronlzaLlon
2/3./&4 @VAZA ,&3*)./%& /& .:* K/':"*?C'%.. P%&Y?%'</&4 a(*(* F?4%)/.:5 GK/':"*? "&0 C'%..X @^^WIA
b=89&$;'$%&
G)M04* *0$## 54"C&;O)&)& RLT W
G94/$.& #.$.4* *0$## ,2;& RLT W
%4"$0 L 4.&3[
%4"$0 1.234*N&%&9&"*&R,2;&RLTT "&-.[

G)M04* ,2;&UL 4.&3e ,2;&RLT "&-.V W
.84#+4.&3 ] 4.&3[
.84#+"&-. ] "&? 1.234*N&%&9&"*&R,2;&RLTTU"&-.V[
\
\

G94/$.& %4"$0 ,2;&RLT ;)337 ] "&? ,2;&RLTU")00e ")00V[
G94/$.& %4"$0 1.234*N&%&9&"*&R,2;&RLTT 8&$;
] "&? 1.234*N&%&9&"*&R,2;&RLTTU;)337V[
G94/$.& %4"$0 1.234*N&%&9&"*&R,2;&RLTT .$40
] "&? 1.234*N&%&9&"*&R,2;&RLTTU;)337V[

G)M04* M220&$" G).UL 4.&3V W
,2;&RLT "&?,2;& ] "&? ,2;&RLTU4.&3e ")00V[
?840& U.9)&V W
,2;&RLT *)9=$40 ] .$40+6&.UV[
,2;&RLT .$40,&-. ] *)9=$40+"&-.+6&.UV[
4% U*)9=$40 ]] .$40+6&.UVV W
4% U.$40,&-. Y] ")00V W
XX O)&)& 4" 4".&93&;4$.& #.$.&e $;/$"*& .$40
.$40+*23G$9&1";'&.U*)9=$40e .$40,&-.V[
\ &0#& W
XX I" ()4&#*&". #.$.&e .97 4"#&9.4"6 "&? "2;&
4% U*)9=$40+"&-.+*23G$9&1";'&.U")00e "&?,2;&VV W
XX I"#&9.42" #)**&&;&;e .97 $;/$"*4"6 .$40
.$40+*23G$9&1";'&.U*)9=$40e "&?,2;&V[
9&.)9" .9)&[
\
\
\
\
\
\
@VAMALA F.%5/' S/*?0 9>0".*)3
LlsLlng 13.7 lllusLraLes Lhe algorlLhm used by D2"*)99&".54"C&;O)&)&, buL Lhe acLual lmplemenLaLlon ls a blL dlfferenL.
lnsLead of represenLlng each ,2;& wlLh an aLomlc reference, D2"*)99&".54"C&;O)&)& uses an ordlnary volaLlle
reference and updaLes lL Lhrough Lhe reflecLlonbased 1.234*N&%&9&"*&<4&0;!G;$.&9, as shown ln LlsLlng 13.8.
2/3./&4 @VA\A 93/&4 F.%5/' S/*?0 9>0".*)3 /& D2"*)99&".54"C&;O)&)&A
G94/$.& *0$## ,2;&RLT W
G94/$.& %4"$0 L 4.&3[
G94/$.& /20$.40& ,2;&RLT "&-.[

G)M04* ,2;&UL 4.&3V W
.84#+4.&3 ] 4.&3[
\
\

G94/$.& #.$.4* 1.234*N&%&9&"*&<4&0;!G;$.&9R,2;&e ,2;&T "&-.!G;$.&9
] 1.234*N&%&9&"*&<4&0;!G;$.&9+"&?!G;$.&9U
,2;&+*0$##e ,2;&+*0$##e d"&-.dV[
1he aLomlc fleld updaLer classes (avallable ln I".&6&9, 52"6, and N&%&9&"*& verslons) represenL a reflecLlonbased
"vlew" of an exlsLlng volaLlle fleld so LhaL CAS can be used on exlsLlng volaLlle flelds. 1he updaLer classes have no
consLrucLors, Lo creaLe one, you call Lhe "&?!G;$.&9 facLory meLhod, speclfylng Lhe class and fleld name. 1he fleld
updaLer classes are noL Lled Lo a speclflc lnsLance, one can be used Lo updaLe Lhe LargeL fleld for any lnsLance of Lhe
LargeL class. 1he aLomlclLy guaranLees for Lhe updaLer classes are weaker Lhan for Lhe regular aLomlc classes because
you cannoL guaranLee LhaL Lhe underlylng flelds wlll noL be modlfled dlrecLlyLhe *23G$9&1";'&. and arlLhmeLlc
meLhods guaranLee aLomlclLy only wlLh respecL Lo oLher Lhreads uslng Lhe aLomlc fleld updaLer meLhods.
ln D2"*)99&".54"C&;O)&)&, updaLes Lo Lhe "&-. fleld of a ,2;& are applled uslng Lhe *23G$9&1";'&. meLhod of
"&-.!G;$.&9. 1hls somewhaL clrculLous approach ls used enLlrely for performance reasons. lor frequenLly allocaLed,
shorLllved ob[ecLs llke queue llnk nodes, ellmlnaLlng Lhe creaLlon of an 1.234*N&%&9&"*& for each ,2;& ls slgnlflcanL

206 !ava Concurrency ln racLlce
enough Lo reduce Lhe cosL of lnserLlon operaLlons. Powever, ln nearly all slLuaLlons, ordlnary aLomlc varlables perform
[usL flne ln only a few cases wlll Lhe aLomlc fleld updaLers be needed. (1he aLomlc fleld updaLers are also useful when
you wanL Lo perform aLomlc updaLes whlle preservlng Lhe serlallzed form of an exlsLlng class.)
@VAMAMA J:* F;F -)%Y?*5
1he A8A problem ls an anomaly LhaL can arlse from Lhe nalve use of compareandswap ln algorlLhms where nodes can
be recycled (prlmarlly ln envlronmenLs wlLhouL garbage collecLlon). A CAS effecLlvely asks "ls Lhe value of v sLlll A?", and
proceeds wlLh Lhe updaLe lf so. ln mosL slLuaLlons, lncludlng Lhe examples presenLed ln Lhls chapLer, Lhls ls enLlrely
sufflclenL. Powever, someLlmes we really wanL Lo ask "Pas Lhe value of v changed slnce l lasL observed lL Lo be A?" lor
some algorlLhms, changlng v from A Lo 8 and Lhen back Lo A sLlll counLs as a change LhaL requlres us Lo reLry some
algorlLhmlc sLep.
1hls A8A problem can arlse ln algorlLhms LhaL do Lhelr own memory managemenL for llnk node ob[ecLs. ln Lhls case, LhaL
Lhe head of a llsL sLlll refers Lo a prevlously observed node ls noL enough Lo lmply LhaL Lhe conLenLs of Lhe llsL have noL
changed. lf you cannoL avold Lhe A8A problem by leLLlng Lhe garbage collecLor manage llnk nodes for you, Lhere ls sLlll a
relaLlvely slmple soluLlon: lnsLead of updaLlng Lhe value of a reference, updaLe a palr of values, a reference and a
verslon number. Lven lf Lhe value changes from A Lo 8 and back Lo A, Lhe verslon numbers wlll be dlfferenL.
1.234*'.$3G&;N&%&9&"*& (and lLs cousln 1.234*F$9C$M0&N&%&9&"*&) provlde aLomlc condlLlonal updaLe on a palr of
varlables. 1.234*'.$3G&;N&%&9&"*& updaLes an ob[ecL referencelnLeger palr, allowlng "versloned" references LhaL are
lmmune
[8]
Lo Lhe A8A problem. Slmllarly, 1.234*F$9C$M0&N&%&9&"*& updaLes an ob[ecL referenceboolean palr LhaL ls
used by some algorlLhms Lo leL a node remaln ln a llsL whlle belng marked as deleLed.
[9]

[8] ln pracLlce, anyway, LheoreLlcally Lhe counLer could wrap.
[9] Many processors provlde a doublewlde CAS (CAS2 or CASx) operaLlon LhaL can operaLe on a polnLerlnLeger palr, whlch would make Lhls
operaLlon reasonably efflclenL. As of !ava 6, 1.234*a'.$3G&;N&%&9&"*& does noL use doublewlde CAS even on plaLforms LhaL supporL lL.
(uoublewlde CAS dlffers from uCAS, whlch operaLes on Lwo unrelaLed memory locaLlons, as of Lhls wrlLlng, no currenL processor lmplemenLs
uCAS.)
C(55")+
nonblocklng algorlLhms malnLaln Lhread safeLy by uslng lowlevel concurrency prlmlLlves such as compareandswap
lnsLead of locks. 1hese lowlevel prlmlLlves are exposed Lhrough Lhe aLomlc varlable classes, whlch can also be used as
"beLLer volaLlle varlables" provldlng aLomlc updaLe operaLlons for lnLegers and ob[ecL references.
nonblocklng algorlLhms are dlfflculL Lo deslgn and lmplemenL, buL can offer beLLer scalablllLy under Lyplcal condlLlons
and greaLer reslsLance Lo llveness fallures. Many of Lhe advances ln concurrenL performance from one !vM verslon Lo
Lhe nexL come from Lhe use of nonblocklng algorlLhms, boLh wlLhln Lhe !vM and ln Lhe plaLform llbrarles.


207 78arL lv: Advanced 1oplcs 288ChapLer 16. 1he !ava Memory Model
$:">.*) @WA J:* !"#" K*5%)+ K%0*?
1hroughouL Lhls book, we've mosLly avolded Lhe lowlevel deLalls of Lhe !ava Memory Model (!MM) and lnsLead focused
on hlgherlevel deslgn lssues such as safe publlcaLlon, speclflcaLlon of, and adherence Lo synchronlzaLlon pollcles. 1hese
derlve Lhelr safeLy from Lhe !MM, and you may flnd lL easler Lo use Lhese mechanlsms effecLlvely when you undersLand
why Lhey work. 1hls chapLer pulls back Lhe curLaln Lo reveal Lhe lowlevel requlremenLs and guaranLees of Lhe !ava
Memory Model and Lhe reasonlng behlnd some of Lhe hlgherlevel deslgn rules offered ln Lhls book.
@WA@A B:". /3 " K*5%)+ K%0*?X "&0 B:+ 8%(?0 , B"&. E&*U
Suppose one Lhread asslgns a value Lo $B$94$M0&:
$B$94$M0& ] s[
A memory model addresses Lhe quesLlon "under whaL condlLlons does a Lhread LhaL reads $B$94$M0& see Lhe value 3?"
1hls may sound llke a dumb quesLlon, buL ln Lhe absence of synchronlzaLlon, Lhere are a number of reasons a Lhread
mlghL noL lmmedlaLely or ever see Lhe resulLs of an operaLlon ln anoLher Lhread. Compllers may generaLe lnsLrucLlons
ln a dlfferenL order Lhan Lhe "obvlous" one suggesLed by Lhe source code, or sLore varlables ln reglsLers lnsLead of ln
memory, processors may execuLe lnsLrucLlons ln parallel or ouL of order, caches may vary Lhe order ln whlch wrlLes Lo
varlables are commlLLed Lo maln memory, and values sLored ln processorlocal caches may noL be vlslble Lo oLher
processors. 1hese facLors can prevenL a Lhread from seelng Lhe mosL upLodaLe value for a varlable and can cause
memory acLlons ln oLher Lhreads Lo appear Lo happen ouL of order lf you don'L use adequaLe synchronlzaLlon.
ln a slngleLhreaded envlronmenL, all Lhese Lrlcks played on our program by Lhe envlronmenL are hldden from us and
have no effecL oLher Lhan Lo speed up execuLlon. 1he !ava Language SpeclflcaLlon requlres Lhe !vM Lo malnLaln wlLhln
Lhread aslfserlal semanLlcs: as long as Lhe program has Lhe same resulL as lf lL were execuLed ln program order ln a
sLrlcLly sequenLlal envlronmenL, all Lhese games are permlsslble. And LhaL's a good Lhlng, Loo, because Lhese
rearrangemenLs are responslble for much of Lhe lmprovemenL ln compuLlng performance ln recenL years. CerLalnly
hlgher clock raLes have conLrlbuLed Lo lmproved performance, buL so has lncreased parallellsm plpellned superscalar
execuLlon unlLs, dynamlc lnsLrucLlon schedullng, speculaLlve execuLlon, and sophlsLlcaLed mulLllevel memory caches. As
processors have become more sophlsLlcaLed, so Loo have compllers, rearranglng lnsLrucLlons Lo faclllLaLe opLlmal
execuLlon and uslng sophlsLlcaLed global reglsLerallocaLlon algorlLhms. And as processor manufacLurers LranslLlon Lo
mulLlcore processors, largely because clock raLes are geLLlng harder Lo lncrease economlcally, hardware parallellsm wlll
only lncrease.
ln a mulLlLhreaded envlronmenL, Lhe llluslon of sequenLlallLy cannoL be malnLalned wlLhouL slgnlflcanL performance
cosL. Slnce mosL of Lhe Llme Lhreads wlLhln a concurrenL appllcaLlon are each "dolng Lhelr own Lhlng", excesslve lnLer
Lhread coordlnaLlon would only slow down Lhe appllcaLlon Lo no real beneflL. lL ls only when mulLlple Lhreads share daLa
LhaL lL ls necessary Lo coordlnaLe Lhelr acLlvlLles, and Lhe !vM relles on Lhe program Lo ldenLlfy when Lhls ls happenlng
by uslng synchronlzaLlon.
1he !MM speclfles Lhe mlnlmal guaranLees Lhe !vM musL make abouL when wrlLes Lo varlables become vlslble Lo oLher
Lhreads. lL was deslgned Lo balance Lhe need for predlcLablllLy and ease of program developmenL wlLh Lhe reallLles of
lmplemenLlng hlghperformance !vMs on a wlde range of popular processor archlLecLures. Some aspecLs of Lhe !MM
may be dlsLurblng aL flrsL lf you are noL famlllar wlLh Lhe Lrlcks used by modern processors and compllers Lo squeeze
exLra performance ouL of your program.
@WA@A@A -?".6%)5 K*5%)+ K%0*?3
ln a sharedmemory mulLlprocessor archlLecLure, each processor has lLs own cache LhaL ls perlodlcally reconclled wlLh
maln memory. rocessor archlLecLures provlde varylng degrees of cache coherence, some provlde mlnlmal guaranLees
LhaL allow dlfferenL processors Lo see dlfferenL values for Lhe same memory locaLlon aL vlrLually any Llme. 1he operaLlng
sysLem, compller, and runLlme (and someLlmes, Lhe program, Loo) musL make up Lhe dlfference beLween whaL Lhe
hardware provldes and whaL Lhread safeLy requlres.
Lnsurlng LhaL every processor knows whaL every oLher processor ls dolng aL all Llmes ls expenslve. MosL of Lhe Llme Lhls
lnformaLlon ls noL needed, so processors relax Lhelr memorycoherency guaranLees Lo lmprove performance. An
archlLecLure's memory model Lells programs whaL guaranLees Lhey can expecL from Lhe memory sysLem, and speclfles
Lhe speclal lnsLrucLlons requlred (called memory barrlers or fences) Lo geL Lhe addlLlonal memory coordlnaLlon
guaranLees requlred when sharlng daLa. ln order Lo shleld Lhe !ava developer from Lhe dlfferences beLween memory

208 !ava Concurrency ln racLlce
models across archlLecLures, !ava provldes lLs own memory model, and Lhe !vM deals wlLh Lhe dlfferences beLween Lhe
!MM and Lhe underlylng plaLform's memory model by lnserLlng memory barrlers aL Lhe approprlaLe places.
Cne convenlenL menLal model for program execuLlon ls Lo lmaglne LhaL Lhere ls a slngle order ln whlch Lhe operaLlons
happen ln a program, regardless of whaL processor Lhey execuLe on, and LhaL each read of a varlable wlll see Lhe lasL
wrlLe ln Lhe execuLlon order Lo LhaL varlable by any processor. 1hls happy, lf unreallsLlc, model ls called sequenLlal
conslsLency. SofLware developers ofLen mlsLakenly assume sequenLlal conslsLency, buL no modern mulLlprocessor offers
sequenLlal conslsLency and Lhe !MM does noL elLher. 1he classlc sequenLlal compuLlng model, Lhe von neumann model,
ls only a vague approxlmaLlon of how modern mulLlprocessors behave.
1he boLLom llne ls LhaL modern sharedmemory mulLlprocessors (and compllers) can do some surprlslng Lhlngs when
daLa ls shared across Lhreads, unless you've Lold Lhem noL Lo Lhrough Lhe use of memory barrlers. lorLunaLely, !ava
programs need noL speclfy Lhe placemenL of memory barrlers, Lhey need only ldenLlfy when shared sLaLe ls belng
accessed, Lhrough Lhe proper use of synchronlzaLlon.
@WA@ADA N*%)0*)/&4
ln descrlblng race condlLlons and aLomlclLy fallures ln ChapLer 2, we used lnLeracLlon dlagrams deplcLlng "unlucky
Llmlng" where Lhe scheduler lnLerleaved operaLlons so as Lo cause lncorrecL resulLs ln lnsufflclenLly synchronlzed
programs. 1o make maLLers worse, Lhe !MM can permlL acLlons Lo appear Lo execuLe ln dlfferenL orders from Lhe
perspecLlve of dlfferenL Lhreads, maklng reasonlng abouL orderlng ln Lhe absence of synchronlzaLlon even more
compllcaLed. 1he varlous reasons why operaLlons mlghL be delayed or appear Lo execuLe ouL of order can all be grouped
lnLo Lhe general caLegory of reorderlng.
@2##4M0&N&29;&94"6 ln LlsLlng 16.1 lllusLraLes how dlfflculL lL ls Lo reason abouL Lhe behavlor of even Lhe slmplesL
concurrenL programs unless Lhey are correcLly synchronlzed. lL ls falrly easy Lo lmaglne how @2##4M0&N&29;&94"6 could
prlnL (1, 0), or (0, 1), or (1, 1): Lhread A could run Lo compleLlon before 8 sLarLs, 8 could run Lo compleLlon before A
sLarLs, or Lhelr acLlons could be lnLerleaved. 8uL, sLrangely, @2##4M0&N&29;&94"6 can also prlnL (0, 0)! 1he acLlons ln
each Lhread have no daLaflow dependence on each oLher, and accordlngly can be execuLed ouL of order. (Lven lf Lhey
are execuLed ln order, Lhe Llmlng by whlch caches are flushed Lo maln memory can make lL appear, from Lhe perspecLlve
of 8, LhaL Lhe asslgnmenLs ln A occurred ln Lhe opposlLe order.) llgure 16.1 shows a posslble lnLerleavlng wlLh
reorderlng LhaL resulLs ln prlnLlng (0, 0).
S/4()* @WA@A ,&.*)?*"#/&4 C:%8/&4 N*%)0*)/&4 /& @2##4M0&N&29;&94"6A


@2##4M0&N&29;&94"6 ls a Lrlvlal program, and lL ls sLlll surprlslngly Lrlcky Lo enumeraLe lLs posslble resulLs. 8eorderlng aL
Lhe memory level can make programs behave unexpecLedly. lL ls prohlblLlvely dlfflculL Lo reason abouL orderlng ln Lhe
absence of synchronlzaLlon, lL ls much easler Lo ensure LhaL your program uses synchronlzaLlon approprlaLely.
SynchronlzaLlon lnhlblLs Lhe compller, runLlme, and hardware from reorderlng memory operaLlons ln ways LhaL would
vlolaLe Lhe vlslblllLy guaranLees provlded by Lhe !MM.
[1]

[1] Cn mosL popular processor archlLecLures, Lhe memory model ls sLrong enough LhaL Lhe performance cosL of a volaLlle read ls ln llne wlLh LhaL of
a nonvolaLlle read.
@WA@ALA J:* !"#" K*5%)+ K%0*? /& V__ B%)03 %) 2*33
1he !ava Memory Model ls speclfled ln Lerms of acLlons, whlch lnclude reads and wrlLes Lo varlables, locks and unlocks
of monlLors, and sLarLlng and [olnlng wlLh Lhreads. 1he !MM deflnes a parLlal orderlng
[2]
called happensbefore on all
acLlons wlLhln Lhe program. 1o guaranLee LhaL Lhe Lhread execuLlng acLlon 8 can see Lhe resulLs of acLlon A (wheLher or
noL A and 8 occur ln dlfferenL Lhreads), Lhere musL be a happensbefore relaLlonshlp beLween A and 8. ln Lhe absence of
a happensbefore orderlng beLween Lwo operaLlons, Lhe !vM ls free Lo reorder Lhem as lL pleases.
[2]
A parLlal orderlng ls a relaLlon on a seL LhaL ls anLlsymmeLrlc, reflexlve, and LranslLlve, buL for any Lwo elemenLs x and y, lL need noL be Lhe
case LhaL x y or y x. We use parLlal orderlngs every day Lo express preferences, we may prefer sushl Lo cheeseburgers and MozarL Lo Mahler,
buL we don'L necessarlly have a clear preference beLween cheeseburgers and MozarL.

209 78arL lv: Advanced 1oplcs 288ChapLer 16. 1he !ava Memory Model
2/3./&4 @WA@A ,&3(66/'/*&.?+ C+&':)%&/O*0 -)%4)"5 .:". '"& :"#* C()>)/3/&4 N*3(?.3A !"#$% !" %&'()

G)M04* *0$## @2##4M0&N&29;&94"6 W
#.$.4* 4". - ] Ze 7 ] Z[
#.$.4* 4". $ ] Ze M ] Z[

G)M04* #.$.4* /24; 3$4"U'.94"6fg $96#V
.892?# I".&99)G.&;L-*&G.42" W
=89&$; 2"& ] "&? =89&$;U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W
$ ] ^[
- ] M[
\
\V[
=89&$; 2.8&9 ] "&? =89&$;U"&? N)""$M0&UV W
G)M04* /24; 9)"UV W
M ] ^[
7 ] $[
\
\V[
2"&+#.$9.UV[ 2.8&9+#.$9.UV[
2"&+`24"UV[ 2.8&9+`24"UV[
'7#.&3+2).+G94".0"UdU d_ - _ ded _ 7 _ dVdV[
\
\
A daLa race occurs when a varlable ls read by more Lhan one Lhread, and wrlLLen by aL leasL one Lhread, buL Lhe reads
and wrlLes are noL ordered by happensbefore. A correcLly synchronlzed program ls one wlLh no daLa races, correcLly
synchronlzed programs exhlblL sequenLlal conslsLency, meanlng LhaL all acLlons wlLhln Lhe program appear Lo happen ln
a flxed, global order.
1he rules for happensbefore are:
rogram order rule. Lach acLlon ln a Lhread happensbefore every acLlon ln LhaL Lhread LhaL comes laLer ln Lhe
program order.
MonlLor lock rule. An unlock on a monlLor lock happensbefore every subsequenL lock on LhaL same monlLor lock.
[3]

volaLlle varlable rule. A wrlLe Lo a volaLlle fleld happensbefore every subsequenL read of LhaL same fleld.
[4]

1hread sLarL rule. A call Lo =89&$;+#.$9. on a Lhread happensbefore every acLlon ln Lhe sLarLed Lhread.
1hread LermlnaLlon rule. Any acLlon ln a Lhread happensbefore any oLher Lhread deLecLs LhaL Lhread has
LermlnaLed, elLher by successfully reLurn from =89&$;+`24" or by =89&$;+4#104/& reLurnlng %$0#&.
lnLerrupLlon rule. A Lhread calllng 4".&99)G. on anoLher Lhread happensbefore Lhe lnLerrupLed Lhread deLecLs Lhe
lnLerrupL (elLher by havlng I".&99)G.&;L-*&G.42" Lhrown, or lnvoklng 4#I".&99)G.&; or 4".&99)G.&;).
llnallzer rule. 1he end of a consLrucLor for an ob[ecL happensbefore Lhe sLarL of Lhe flnallzer for LhaL ob[ecL.
1ranslLlvlLy. lf A happensbefore 8, and 8 happensbefore C, Lhen A happensbefore C.
[3] Locks and unlocks on expllclL 52*C ob[ecLs have Lhe same memory semanLlcs as lnLrlnslc locks.
[4] 8eads and wrlLes of aLomlc varlables have Lhe same memory semanLlcs as volaLlle varlables.
Lven Lhough acLlons are only parLlally ordered, synchronlzaLlon acLlons lock acqulslLlon and release, and reads and
wrlLes of /20$.40& varlables are LoLally ordered. 1hls makes lL senslble Lo descrlbe happensbefore ln Lerms of
"subsequenL" lock acqulslLlons and reads of /20$.40& varlables.
llgure 16.2 lllusLraLes Lhe happensbefore relaLlon when Lwo Lhreads synchronlze uslng a common lock. All Lhe acLlons
wlLhln Lhread A are ordered by Lhe program order rule, as are Lhe acLlons wlLhln Lhread 8. 8ecause A releases lock M
and 8 subsequenLly acqulres M, all Lhe acLlons ln A before releaslng Lhe lock are Lherefore ordered before Lhe acLlons ln
8 afLer acqulrlng Lhe lock. When Lwo Lhreads synchronlze on dlfferenL locks, we can'L say anyLhlng abouL Lhe orderlng of
acLlons beLween Lhem Lhere ls no happensbefore relaLlon beLween Lhe acLlons ln Lhe Lwo Lhreads.

210 !ava Concurrency ln racLlce
S/4()* @WADA ,??(3.)"./%& %6 7">>*&3Y*6%)* /& .:* !"#" K*5%)+ K%0*?A



@WA@AMA -/44+Y"'</&4 %& C+&':)%&/O"./%&
8ecause of Lhe sLrengLh of Lhe happensbefore orderlng, you can someLlmes plggyback on Lhe vlslblllLy properLles of an
exlsLlng synchronlzaLlon. 1hls enLalls comblnlng Lhe program order rule for happensbefore wlLh one of Lhe oLher
orderlng rules (usually Lhe monlLor lock or volaLlle varlable rule) Lo order accesses Lo a varlable noL oLherwlse guarded
by a lock. 1hls Lechnlque ls very senslLlve Lo Lhe order ln whlch sLaLemenLs occur and ls Lherefore qulLe fraglle, lL ls an
advanced Lechnlque LhaL should be reserved for squeezlng Lhe lasL drop of performance ouL of Lhe mosL performance
crlLlcal classes llke N&&".9$".52*C.
1he lmplemenLaLlon of Lhe proLecLed 1M#.9$*.O)&)&;'7"*892"4:&9 meLhods ln <).)9&=$#C lllusLraLes plggybacklng.
ACS malnLalns an lnLeger of synchronlzer sLaLe LhaL <).)9&=$#C uses Lo sLore Lhe Lask sLaLe: runnlng, compleLed, or
cancelled. 8uL <).)9&=$#C also malnLalns addlLlonal varlables, such as Lhe resulL of Lhe compuLaLlon. When one Lhread
calls #&. Lo save Lhe resulL and anoLher Lhread calls 6&. Lo reLrleve lL, Lhe Lwo had beLLer be ordered by happensbefore.
1hls could be done by maklng Lhe reference Lo Lhe resulL /20$.40&, buL lL ls posslble Lo explolL exlsLlng synchronlzaLlon
Lo achleve Lhe same resulL aL lower cosL.
<).)9&=$#C ls carefully crafLed Lo ensure LhaL a successful call Lo .97N&0&$#&'8$9&; always happensbefore a
subsequenL call Lo =N71*()49&'8$9&;, .97aN&0&$#&'8$9&; always wrlLes Lo a volaLlle varlable LhaL ls read by
=N71*()49&a'8$9&;. LlsLlng 16.2 shows Lhe 4""&9'&. and 4""&9H&. meLhods LhaL are called when Lhe resulL ls saved or
reLrleved, slnce 4""&9'&. wrlLes 9&#)0. before calllng 9&0&$#&'8$9&; (whlch calls .97N&0&$#&'8$9&;) and 4""&9H&.
reads 9&#)0. afLer calllng $*()49&'8$9&; (whlch calls =N71*()49&'8$9&;), Lhe program order rule comblnes wlLh Lhe
volaLlle varlable rule Lo ensure LhaL Lhe wrlLe of 9&#)0. ln 4""&9H&. happensbefore Lhe read of 9&#)0. ln 4""&9H&..

211 78arL lv: Advanced 1oplcs 288ChapLer 16. 1he !ava Memory Model
2/3./&4 @WADA ,&&*) $?"33 %6 <).)9&=$#C ,??(3.)"./&4 C+&':)%&/O"./%& -/44+Y"'</&4A

XX I""&9 *0$## 2% <).)9&=$#C
G94/$.& %4"$0 *0$## '7"* &-.&";# 1M#.9$*.O)&)&;'7"*892"4:&9 W
G94/$.& #.$.4* %4"$0 4". N!,,I,H ] ^e N1, ] je D1,DL55LA ] i[
G94/$.& B 9&#)0.[
G94/$.& L-*&G.42" &-*&G.42"[

/24; 4""&9'&.UB /V W
?840& U.9)&V W
4". # ] 6&.'.$.&UV[
4% U9$"J9D$"*&00&;U#VV
9&.)9"[
4% U*23G$9&1";'&.'.$.&U#e N1,VV
M9&$C[
\
9&#)0. ] /[
9&0&$#&'8$9&;UZV[
;2"&UV[
\

B 4""&9H&.UV .892?# I".&99)G.&;L-*&G.42"e L-&*).42"L-*&G.42" W
$*()49&'8$9&;I".&99)G.4M07UZV[
4% U6&.'.$.&UV ]] D1,DL55LAV
.892? "&? D$"*&00$.42"L-*&G.42"UV[
4% U&-*&G.42" Y] ")00V
.892? "&? L-&*).42"L-*&G.42"U&-*&G.42"V[
9&.)9" 9&#)0.[
\
\
We call Lhls Lechnlque "plggybacklng" because lL uses an exlsLlng happensbefore orderlng LhaL was creaLed for some
oLher reason Lo ensure Lhe vlslblllLy of ob[ecL x, raLher Lhan creaLlng a happensbefore orderlng speclflcally for
publlshlng x.
lggybacklng of Lhe sorL employed by <).)9&=$#C ls qulLe fraglle and should noL be underLaken casually. Powever, ln
some cases plggybacklng ls perfecLly reasonable, such as when a class commlLs Lo a happensbefore orderlng beLween
meLhods as parL of lLs speclflcaLlon. lor example, safe publlcaLlon uslng a K02*C4"6O)&)& ls a form of plggybacklng. Cne
Lhread puLLlng an ob[ecL on a queue and anoLher Lhread subsequenLly reLrlevlng lL consLlLuLes safe publlcaLlon because
Lhere ls guaranLeed Lo be sufflclenL lnLernal synchronlzaLlon ln a K02*C4"6O)&)& lmplemenLaLlon Lo ensure LhaL Lhe
enqueue happensbefore Lhe dequeue.
CLher happensbefore orderlngs guaranLeed by Lhe class llbrary lnclude:
laclng an lLem ln a Lhreadsafe collecLlon happensbefore anoLher Lhread reLrleves LhaL lLem from Lhe
collecLlon,
CounLlng down on a D2)".A2?"5$.*8 happensbefore a Lhread reLurns from $?$4. on LhaL laLch,
8eleaslng a permlL Lo a '&3$G829& happensbefore acqulrlng a permlL from LhaL same '&3$G829&,
AcLlons Laken by Lhe Lask represenLed by a <).)9& happensbefore anoLher Lhread successfully reLurns from
<).)9&+6&.,
SubmlLLlng a N)""$M0& or D$00$M0& Lo an L-&*).29 happensbefore Lhe Lask beglns execuLlon, and
A Lhread arrlvlng aL a D7*04*K$994&9 or L-*8$"6&9 happensbefore Lhe oLher Lhreads are released from LhaL
same barrler or exchange polnL. lf D7*04*K$994&9 uses a barrler acLlon, arrlvlng aL Lhe barrler happensbefore
Lhe barrler acLlon, whlch ln Lurn happensbefore Lhreads are released from Lhe barrler.
@WADA -(Y?/'"./%&
ChapLer 3 explored how an ob[ecL could be safely or lmproperly publlshed. 1he safe publlcaLlon Lechnlques descrlbed
Lhere derlve Lhelr safeLy from guaranLees provlded by Lhe !MM, Lhe rlsks of lmproper publlcaLlon are consequences of
Lhe absence of a happensbefore orderlng beLween publlshlng a shared ob[ecL and accesslng lL from anoLher Lhread.
@WADA@A 9&3"6* -(Y?/'"./%&
1he posslblllLy of reorderlng ln Lhe absence of a happensbefore relaLlonshlp explalns why publlshlng an ob[ecL wlLhouL
adequaLe synchronlzaLlon can allow anoLher Lhread Lo see a parLlally consLrucLed ob[ecL (see SecLlon 3.3). lnlLlallzlng a
new ob[ecL lnvolves wrlLlng Lo varlables Lhe new ob[ecL's flelds. Slmllarly, publlshlng a reference lnvolves wrlLlng Lo
anoLher varlable Lhe reference Lo Lhe new ob[ecL. lf you do noL ensure LhaL publlshlng Lhe shared reference happens

212 !ava Concurrency ln racLlce
before anoLher Lhread loads LhaL shared reference, Lhen Lhe wrlLe of Lhe reference Lo Lhe new ob[ecL can be reordered
(from Lhe perspecLlve of Lhe Lhread consumlng Lhe ob[ecL) wlLh Lhe wrlLes Lo lLs flelds. ln LhaL case, anoLher Lhread
could see an upLodaLe value for Lhe ob[ecL reference buL ouLofdaLe values for some or all of LhaL ob[ecL's sLaLea
parLlally consLrucLed ob[ecL.
unsafe publlcaLlon can happen as a resulL of an lncorrecL lazy lnlLlallzaLlon, as shown ln llgure 16.3. AL flrsL glance, Lhe
only problem here seems Lo be Lhe race condlLlon descrlbed ln SecLlon 2.2.2. under cerLaln clrcumsLances, such as when
all lnsLances of Lhe N&#2)9*& are ldenLlcal, you mlghL be wllllng Lo overlook Lhese (along wlLh Lhe lnefflclency of posslbly
creaLlng Lhe N&#2)9*& more Lhan once). unforLunaLely, even lf Lhese defecLs are overlooked,
!"#$%&5$:7I"4.4$04:$.42" ls sLlll noL safe, because anoLher Lhread could observe a reference Lo a parLlally
consLrucLed N&#2)9*&.
2/3./&4 @WALA 9&3"6* 2"O+ ,&/./"?/O"./%&A [%&b. [% .:/3A

b,2.=89&$;'$%&
G)M04* *0$## !"#$%&5$:7I"4.4$04:$.42" W
G94/$.& #.$.4* N&#2)9*& 9&#2)9*&[

G)M04* #.$.4* N&#2)9*& 6&.I"#.$"*&UV W
4% U9&#2)9*& ]] ")00V
9&#2)9*& ] "&? N&#2)9*&UV[ XX )"#$%& G)M04*$.42"
9&.)9" 9&#2)9*&[
\
\
Suppose Lhread A ls Lhe flrsL Lo lnvoke 6&.I"#.$"*&. lL sees LhaL 9&#2)9*& ls ")00, lnsLanLlaLes a new N&#2)9*&, and
seLs 9&#2)9*& Lo reference lL. When Lhread 8 laLer calls 6&.I"#.$"*&, lL mlghL see LhaL 9&#2)9*& already has a nonnull
value and [usL use Lhe already consLrucLed N&#2)9*&. 1hls mlghL look harmless aL flrsL, buL Lhere ls no happensbefore
orderlng beLween Lhe wrlLlng of resource ln A and Lhe readlng of resource ln 8. A daLa race has been used Lo publlsh Lhe
ob[ecL, and Lherefore 8 ls noL guaranLeed Lo see Lhe correcL sLaLe of Lhe N&#2)9*&.
1he N&#2)9*& consLrucLor changes Lhe flelds of Lhe freshly allocaLed N&#2)9*& from Lhelr defaulL values (wrlLLen by Lhe
JM`&*. consLrucLor) Lo Lhelr lnlLlal values. Slnce nelLher Lhread used synchronlzaLlon, 8 could posslbly see A's acLlons ln
a dlfferenL order Lhan A performed Lhem. So even Lhough A lnlLlallzed Lhe N&#2)9*& before seLLlng 9&#2)9*& Lo
reference lL, 8 could see Lhe wrlLe Lo 9&#2)9*& as occurrlng before Lhe wrlLes Lo Lhe flelds of Lhe N&#2)9*&. 8 could Lhus
see a parLlally consLrucLed N&#2)9*& LhaL may well be ln an lnvalld sLaLeand whose sLaLe may unexpecLedly change
laLer.
WlLh Lhe excepLlon of lmmuLable ob[ecLs, lL ls noL safe Lo use an ob[ecL LhaL has been lnlLlallzed by anoLher Lhread
unless Lhe publlcaLlon happensbefore Lhe consumlng Lhread uses lL.
@WADADA C"6* -(Y?/'"./%&
1he safepubllcaLlon ldloms descrlbed ln ChapLer 3 ensure LhaL Lhe publlshed ob[ecL ls vlslble Lo oLher Lhreads because
Lhey ensure Lhe publlcaLlon happensbefore Lhe consumlng Lhread loads a reference Lo Lhe publlshed ob[ecL. lf Lhread A
places x on a K02*C4"6O)&)& (and no Lhread subsequenLly modlfles lL) and Lhread 8 reLrleves lL from Lhe queue, 8 ls
guaranLeed Lo see x as A lefL lL. 1hls ls because Lhe K02*C4"6O)&)& lmplemenLaLlons have sufflclenL lnLernal
synchronlzaLlon Lo ensure LhaL Lhe G). happensbefore Lhe .$C&. Slmllarly, uslng a shared varlable guarded by a lock or
a shared volaLlle varlable ensures LhaL reads and wrlLes of LhaL varlable are ordered by happensbefore.
1hls happensbefore guaranLee ls acLually a sLronger promlse of vlslblllLy and orderlng Lhan made by safe publlcaLlon.
When x ls safely publlshed from A Lo 8, Lhe safe publlcaLlon guaranLees vlslblllLy of Lhe sLaLe of x, buL noL of Lhe sLaLe of
oLher varlables A may have Louched. 8uL lf A puLLlng x on a queue happensbefore 8 feLches x from LhaL queue, noL only
does 8 see x ln Lhe sLaLe LhaL A lefL lL (assumlng LhaL x has noL been subsequenLly modlfled by A or anyone else), buL 8
sees everyLhlng A dld before Lhe handoff (agaln, sub[ecL Lo Lhe same caveaL).
[3]

[3] 1he !MM guaranLees LhaL 8 sees a value aL leasL as upLodaLe as Lhe value LhaL A wroLe, subsequenL wrlLes may or may noL be vlslble.
Why dld we focus so heavlly on bH)$9;&;K7 and safe publlcaLlon, when Lhe !MM already provldes us wlLh Lhe more
powerful happensbefore? 1hlnklng ln Lerms of handlng off ob[ecL ownershlp and publlcaLlon flLs beLLer lnLo mosL
program deslgns Lhan Lhlnklng ln Lerms of vlslblllLy of lndlvldual memory wrlLes. 1he happensbefore orderlng operaLes

213 78arL lv: Advanced 1oplcs 288ChapLer 16. 1he !ava Memory Model
aL Lhe level of lndlvldual memory accesses lL ls a sorL of "concurrency assembly language". Safe publlcaLlon operaLes aL
a level closer Lo LhaL of your program's deslgn.
@WADALA C"6* ,&/./"?/O"./%& ,0/%53
lL someLlmes makes sense Lo defer lnlLlallzaLlon of ob[ecLs LhaL are expenslve Lo lnlLlallze unLll Lhey are acLually needed,
buL we have seen how Lhe mlsuse of lazy lnlLlallzaLlon can lead Lo Lrouble. !"#$%&5$:7I"4.4$04:$.42" can be flxed by
maklng Lhe 6&=N&#2)9*& meLhod #7"*892"4:&;, as shown ln LlsLlng 16.4. 8ecause Lhe code paLh Lhrough 6&.I"#.$"*&
ls falrly shorL (a LesL and a predlcLed branch), lf 6&.I"#.$"*& ls noL called frequenLly by many Lhreads, Lhere ls llLLle
enough conLenLlon for Lhe '$%&5$:7I"4.4$04:$.42" lock LhaL Lhls approach offers adequaLe performance.
1he LreaLmenL of sLaLlc flelds wlLh lnlLlallzers (or flelds whose value ls lnlLlallzed ln a sLaLlc lnlLlallzaLlon block [!L 2.2.1
and 2.3.3]) ls somewhaL speclal and offers addlLlonal LhreadsafeLy guaranLees. SLaLlc lnlLlallzers are run by Lhe !vM aL
class lnlLlallzaLlon Llme, afLer class loadlng buL before Lhe class ls used by any Lhread. 8ecause Lhe !vM acqulres a lock
durlng lnlLlallzaLlon [!LS 12.4.2] and Lhls lock ls acqulred by each Lhread aL leasL once Lo ensure LhaL Lhe class has been
loaded, memory wrlLes made durlng sLaLlc lnlLlallzaLlon are auLomaLlcally vlslble Lo all Lhreads. 1hus sLaLlcally lnlLlallzed
ob[ecLs requlre no expllclL synchronlzaLlon elLher durlng consLrucLlon or when belng referenced. Powever, Lhls applles
only Lo Lhe asconsLrucLed sLaLe lf Lhe ob[ecL ls muLable, synchronlzaLlon ls sLlll requlred by boLh readers and wrlLers Lo
make subsequenL modlflcaLlons vlslble and Lo avold daLa corrupLlon.
2/3./&4 @WAMA J:)*"03"6* 2"O+ ,&/./"?/O"./%&A
b=89&$;'$%&
G)M04* *0$## '$%&5$:7I"4.4$04:$.42" W
G94/$.& #.$.4* N&#2)9*& 9&#2)9*&[

G)M04* #7"*892"4:&; #.$.4* N&#2)9*& 6&.I"#.$"*&UV W
4% U9&#2)9*& ]] ")00V
9&#2)9*& ] "&? N&#2)9*&UV[
9&.)9" 9&#2)9*&[
\
\
2/3./&4 @WAVA ="4*) ,&/./"?/O"./%&A
b=89&$;'$%&
G)M04* *0$## L$6&9I"4.4$04:$.42" W
G94/$.& #.$.4* N&#2)9*& 9&#2)9*& ] "&? N&#2)9*&UV[

G)M04* #.$.4* N&#2)9*& 6&.N&#2)9*&UV W 9&.)9" 9&#2)9*&[ \
\
uslng eager lnlLlallzaLlon, shown ln LlsLlng 16.3, ellmlnaLes Lhe synchronlzaLlon cosL lncurred on each call Lo
6&.I"#.$"*& ln '$%&5$:7I"4.4$04:$.42". 1hls Lechnlque can be comblned wlLh Lhe !vM's lazy class loadlng Lo creaLe
a lazy lnlLlallzaLlon Lechnlque LhaL does noL requlre synchronlzaLlon on Lhe common code paLh. 1he lazy lnlLlallzaLlon
holder class ldlom [L! lLem 48] ln LlsLlng 16.6 uses a class whose only purpose ls Lo lnlLlallze Lhe N&#2)9*&. 1he !vM
defers lnlLlallzlng Lhe N&#2)9*&E20;&9 class unLll lL ls acLually used [!LS 12.4.1], and because Lhe N&#2)9*& ls lnlLlallzed
wlLh a sLaLlc lnlLlallzer, no addlLlonal synchronlzaLlon ls needed. 1he flrsL call Lo 6&.9&#2)9*& by any Lhread causes
N&#2)9*&E20;&9 Lo be loaded and lnlLlallzed, aL whlch Llme Lhe lnlLlallzaLlon of Lhe N&#2)9*& happens Lhrough Lhe sLaLlc
lnlLlallzer.
2/3./&4 @WAWA 2"O+ ,&/./"?/O"./%& 7%?0*) $?"33 ,0/%5A
b=89&$;'$%&
G)M04* *0$## N&#2)9*&<$*.297 W
G94/$.& #.$.4* *0$## N&#2)9*&E20;&9 W
G)M04* #.$.4* N&#2)9*& 9&#2)9*& ] "&? N&#2)9*&UV[
\

G)M04* #.$.4* N&#2)9*& 6&.N&#2)9*&UV W
9&.)9" N&#2)9*&E20;&9+9&#2)9*& [
\
\
@WADAMA [%(Y?*':*'<*0 2%'</&4
no book on concurrency would be compleLe wlLhouL a dlscusslon of Lhe lnfamous doublechecked locklng (uCL) anLl
paLLern, shown ln LlsLlng 16.7. ln very early !vMs, synchronlzaLlon, even unconLended synchronlzaLlon, had a slgnlflcanL
performance cosL. As a resulL, many clever (or aL leasL cleverlooklng) Lrlcks were lnvenLed Lo reduce Lhe lmpacL of
synchronlzaLlon some good, some bad, and some ugly. uCL falls lnLo Lhe "ugly" caLegory.
Agaln, because Lhe performance of early !vMs lefL someLhlng Lo be deslred, lazy lnlLlallzaLlon was ofLen used Lo avold
poLenLlally unnecessary expenslve operaLlons or reduce appllcaLlon sLarLup Llme. A properly wrlLLen lazy lnlLlallzaLlon

214 !ava Concurrency ln racLlce
meLhod requlres synchronlzaLlon. 8uL aL Lhe Llme, synchronlzaLlon was slow and, more lmporLanLly, noL compleLely
undersLood: Lhe excluslon aspecLs were well enough undersLood, buL Lhe vlslblllLy aspecLs were noL.
uCL purporLed Lo offer Lhe besL of boLh worlds lazy lnlLlallzaLlon wlLhouL paylng Lhe synchronlzaLlon penalLy on Lhe
common code paLh. 1he way lL worked was flrsL Lo check wheLher lnlLlallzaLlon was needed wlLhouL synchronlzlng, and
lf Lhe 9&#2)9*& reference was noL ")00, use lL. CLherwlse, synchronlze and check agaln lf Lhe N&#2)9*& ls lnlLlallzed,
ensurlng LhaL only one Lhread acLually lnlLlallzes Lhe shared N&#2)9*&. 1he common code paLh feLchlng a reference Lo
an already consLrucLed N&#2)9*& doesn'L use synchronlzaLlon. And LhaL's where Lhe problem ls: as descrlbed ln SecLlon
16.2.1, lL ls posslble for a Lhread Lo see a parLlally consLrucLed N&#2)9*&.
1he real problem wlLh uCL ls Lhe assumpLlon LhaL Lhe worsL Lhlng LhaL can happen when readlng a shared ob[ecL
reference wlLhouL synchronlzaLlon ls Lo erroneously see a sLale value (ln Lhls case, ")00), ln LhaL case Lhe uCL ldlom
compensaLes for Lhls rlsk by Lrylng agaln wlLh Lhe lock held. 8uL Lhe worsL case ls acLually conslderably worse lL ls
posslble Lo see a currenL value of Lhe reference buL sLale values for Lhe ob[ecL's sLaLe, meanlng LhaL Lhe ob[ecL could be
seen Lo be ln an lnvalld or lncorrecL sLaLe.
SubsequenL changes ln Lhe !MM (!ava 3.0 and laLer) have enabled uCL Lo work lf 9&#2)9*& ls made /20$.40&, and Lhe
performance lmpacL of Lhls ls small slnce volaLlle reads are usually only sllghLly more expenslve Lhan nonvolaLlle reads.
Powever, Lhls ls an ldlom whose uLlllLy has largely passed Lhe forces LhaL moLlvaLed lL (slow unconLended
synchronlzaLlon, slow !vM sLarLup) are no longer ln play, maklng lL less effecLlve as an opLlmlzaLlon. 1he lazy
lnlLlallzaLlon holder ldlom offers Lhe same beneflLs and ls easler Lo undersLand.
2/3./&4 @WAZA [%(Y?*':*'<*0?%'</&4 F&./>"..*)&A !"#$% !" %&'()

b,2.=89&$;'$%&
G)M04* *0$## A2)M0&D8&*C&;52*C4"6 W
G94/$.& #.$.4* N&#2)9*& 9&#2)9*&[

G)M04* #.$.4* N&#2)9*& 6&.I"#.$"*&UV W
4% U9&#2)9*& ]] ")00V W
#7"*892"4:&; UA2)M0&D8&*C&;52*C4"6+*0$##V W
4% U9&#2)9*& ]] ")00V
9&#2)9*& ] "&? N&#2)9*&UV[
\
\
9&.)9" 9&#2)9*&[
\
\
16.3. lnlLlallzaLlon SafeLy
1he guaranLee of lnlLlallzaLlon safeLy allows properly consLrucLed lmmuLable ob[ecLs Lo be safely shared across Lhreads
wlLhouL synchronlzaLlon, regardless of how Lhey are publlshedeven lf publlshed uslng a daLa race. (1hls means LhaL
!"#$%&5$:7I"4.4$04:$.42" ls acLually safe lf N&#2)9*& ls lmmuLable.)
WlLhouL lnlLlallzaLlon safeLy, supposedly lmmuLable ob[ecLs llke '.94"6 can appear Lo change Lhelr value lf
synchronlzaLlon ls noL used by boLh Lhe publlshlng and consumlng Lhreads. 1he securlLy archlLecLure relles on Lhe
lmmuLablllLy of '.94"6, Lhe lack of lnlLlallzaLlon safeLy could creaLe securlLy vulnerablllLles LhaL allow mallclous code Lo
bypass securlLy checks.
lnlLlallzaLlon safeLy guaranLees LhaL for properly consLrucLed ob[ecLs, all Lhreads wlll see Lhe correcL values of flnal flelds
LhaL were seL by Lhe consLrucLor, regardless of how Lhe ob[ecL ls publlshed. lurLher, any varlables LhaL can be reached
Lhrough a flnal fleld of a properly consLrucLed ob[ecL (such as Lhe elemenLs of a flnal array or Lhe conLenLs of a E$#8F$G
referenced by a flnal fleld) are also guaranLeed Lo be vlslble Lo oLher Lhreads.
[6]

[6] 1hls applles only Lo ob[ecLs LhaL are reachable only Lhrough %4"$0 flelds of Lhe ob[ecL under consLrucLlon.
lor ob[ecLs wlLh flnal flelds, lnlLlallzaLlon safeLy prohlblLs reorderlng any parL of consLrucLlon wlLh Lhe lnlLlal load of a
reference Lo LhaL ob[ecL. All wrlLes Lo flnal flelds made by Lhe consLrucLor, as well as Lo any varlables reachable Lhrough
Lhose flelds, become "frozen" when Lhe consLrucLor compleLes, and any Lhread LhaL obLalns a reference Lo LhaL ob[ecL ls
guaranLeed Lo see a value LhaL ls aL leasL as up Lo daLe as Lhe frozen value. WrlLes LhaL lnlLlallze varlables reachable
Lhrough flnal flelds are noL reordered wlLh operaLlons followlng Lhe posLconsLrucLlon freeze.

213 78arL lv: Advanced 1oplcs 288ChapLer 16. 1he !ava Memory Model
lnlLlallzaLlon safeLy means LhaL '$%&'.$.&# ln LlsLlng 16.8 could be safely publlshed even Lhrough unsafe lazy
lnlLlallzaLlon or sLashlng a reference Lo a '$%&'.$.&# ln a publlc sLaLlc fleld wlLh no synchronlzaLlon, even Lhough lL uses
no synchronlzaLlon and relles on Lhe nonLhreadsafe E$#8'&..
2/3./&4 @WA\A ,&/./"?/O"./%& C"6*.+ 6%) ,55(."Y?* EY]*'.3A
b=89&$;'$%&
G)M04* *0$## '$%&'.$.&# W
G94/$.& %4"$0 F$GR'.94"6e '.94"6T #.$.&#[

G)M04* '$%&'.$.&#UV W
#.$.&# ] "&? E$#8F$GR'.94"6e '.94"6TUV[
#.$.&#+G).Ud$0$#C$de d1ldV[
#.$.&#+G).Ud$0$M$3$de d15dV[
+++
#.$.&#+G).Ud?7234"6de dPrdV[
\

G)M04* '.94"6 6&.1MM9&/4$.42"U'.94"6 #V W
9&.)9" #.$.&#+6&.U#V[
\
\
Powever, a number of small changes Lo '$%&'.$.&# would Lake away lLs Lhread safeLy. lf #.$.&# were noL flnal, or lf
any meLhod oLher Lhan Lhe consLrucLor modlfled lLs conLenLs, lnlLlallzaLlon safeLy would noL be sLrong enough Lo safely
access '$%&'.$.&# wlLhouL synchronlzaLlon. lf '$%&'.$.&# had oLher nonflnal flelds, oLher Lhreads mlghL sLlll see
lncorrecL values of Lhose flelds. And allowlng Lhe ob[ecL Lo escape durlng consLrucLlon lnvalldaLes Lhe lnlLlallzaLlon
safeLy guaranLee.
lnlLlallzaLlon safeLy makes vlslblllLy guaranLees only for Lhe values LhaL are reachable Lhrough flnal flelds as of Lhe Llme
Lhe consLrucLor flnlshes. lor values reachable Lhrough nonflnal flelds, or values LhaL may change afLer consLrucLlon, you
musL use synchronlzaLlon Lo ensure vlslblllLy.
C(55")+
1he !ava Memory Model speclfles when Lhe acLlons of one Lhread on memory are guaranLeed Lo be vlslble Lo anoLher.
1he speclflcs lnvolve ensurlng LhaL operaLlons are ordered by a parLlal orderlng called happensbefore, whlch ls speclfled
aL Lhe level of lndlvldual memory and synchronlzaLlon operaLlons. ln Lhe absence of sufflclenL synchronlzaLlon, some
very sLrange Lhlngs can happen when Lhreads access shared daLa. Powever, Lhe hlgherlevel rules offered ln ChapLers 2
and 3, such as bH)$9;&;K7 and safe publlcaLlon, can be used Lo ensure Lhread safeLy wlLhouL resorLlng Lo Lhe lowlevel
deLalls of happensbefore.


216 !ava Concurrency ln racLlce
F>>*&0/1 FA F&&%."./%&3 6%) $%&'())*&'+
We've used annoLaLlons such as bH)$9;&;K7 and b=89&$;'$%& Lo show how LhreadsafeLy promlses and
synchronlzaLlon pollcles can be documenLed. 1hls appendlx documenLs Lhese annoLaLlons, Lhelr source code can be
downloaded from Lhls book's webslLe. (1here are, of course, addlLlonal LhreadsafeLy promlses and lmplemenLaLlon
deLalls LhaL should be documenLed buL LhaL are noL capLured by Lhls mlnlmal seL of annoLaLlons.)
FA@A $?"33 F&&%."./%&3
We use Lhree classlevel annoLaLlons Lo descrlbe a class's lnLended LhreadsafeLy promlses: bI33).$M0&, b=89&$;'$%&,
and b,2.=89&$;'$%&. bI33).$M0& means, of course, LhaL Lhe class ls lmmuLable, and lmplles b=89&$;'$%&.
b,2.=89&$;'$%& ls opLlonal lf a class ls noL annoLaLed as Lhreadsafe, lL should be presumed noL Lo be Lhreadsafe, buL
lf you wanL Lo make lL exLra clear, use b,2.=89&$;'$%&.
1hese annoLaLlons are relaLlvely unlnLruslve and are beneflclal Lo boLh users and malnLalners. users can see
lmmedlaLely wheLher a class ls Lhreadsafe, and malnLalners can see lmmedlaLely wheLher LhreadsafeLy guaranLees
musL be preserved. AnnoLaLlons are also useful Lo a Lhlrd consLlLuency: Lools. SLaLlc code analysls Lools may be able Lo
verlfy LhaL Lhe code complles wlLh Lhe conLracL lndlcaLed by Lhe annoLaLlon, such as verlfylng LhaL a class annoLaLed wlLh
bI33).$M0& acLually ls lmmuLable.
FADA S/*?0 "&0 K*.:%0 F&&%."./%&3
1he classlevel annoLaLlons above are parL of Lhe publlc documenLaLlon for Lhe class. CLher aspecLs of a class's Lhread
safeLy sLraLegy are enLlrely for malnLalners and are noL parL of lLs publlc documenLaLlon.
Classes LhaL use locklng should documenL whlch sLaLe varlables are guarded wlLh whlch locks, and whlch locks are used
Lo guard Lhose varlables. A common source of lnadverLenL nonLhreadsafeLy ls when a Lhreadsafe class conslsLenLly
uses locklng Lo guard lLs sLaLe, buL ls laLer modlfled Lo add elLher new sLaLe varlables LhaL are noL adequaLely guarded by
locklng, or new meLhods LhaL do noL use locklng properly Lo guard Lhe exlsLlng sLaLe varlables. uocumenLlng whlch
varlables are guarded by whlch locks can help prevenL boLh Lypes of omlsslons.
bH)$9;&;K7U02*CV documenLs LhaL a fleld or meLhod should be accessed only wlLh a speclflc lock held. 1he 02*C
argumenL ldenLlfles Lhe lock LhaL should be held when accesslng Lhe annoLaLed fleld or meLhod. 1he posslble values for
02*C are:
bH)$9;&;K7Ud.84#dV, meanlng Lhe lnLrlnslc lock on Lhe conLalnlng ob[ecL (Lhe ob[ecL of whlch Lhe meLhod or
fleld ls a member),
bH)$9;&;K7Ud%4&0;,$3&dV, meanlng Lhe lock assoclaLed wlLh Lhe ob[ecL referenced by Lhe named fleld, elLher
an lnLrlnslc lock (for flelds LhaL do noL refer Lo a 52*C) or an expllclL 52*C (for flelds LhaL refer Lo a 52*C),
bH)$9;&;K7UdD0$##,$3&+%4&0;,$3&dV, llke bH)$9;&;K7Ud%4&0;,$3&dV, buL referenclng a lock ob[ecL held ln a
sLaLlc fleld of anoLher class,
bH)$9;&;K7Ud3&.82;,$3&UVdV, meanlng Lhe lock ob[ecL LhaL ls reLurned by calllng Lhe named meLhod,
bH)$9;&;K7UdD0$##,$3&+*0$##dV, meanlng Lhe class llLeral ob[ecL for Lhe named class.
uslng bH)$9;&;K7 Lo ldenLlfy each sLaLe varlable LhaL needs locklng and whlch lock guards lL can asslsL ln malnLenance
and code revlews, and can help auLomaLed analysls Lools spoL poLenLlal LhreadsafeLy errors.


217 78arL lv: Advanced 1oplcs 3088lbllography
;/Y?/%4)">:+
ken Arnold, !ames Cosllng, and uavld Polmes. L(' Y040 >?"8?011)78 I078=08'X V"=?$( /.)$)"7. AddlsonWesley, 2003.

uavld l. 8acon, 8avl 8. konuru, CheL MurLhy, and Maurlclo !. Serrano. L()7 I"6+&\ V'0$('?#')8($ AB76(?"7)G0$)"7 C"?
Y040: F7 AFN>I5^ -"7C'?'76' "7 >?"8?011)78 I078=08' T'&)87 07. F123'1'7$0$)"7, pages 238268, 1998.
u8L hLLp://clLeseer.lsL.psu.edu/bacon98Lhln.hLml.

!oshua 8loch. /CC'6$)4' Y040 >?"8?011)78 I078=08' N=).'. AddlsonWesley, 2001.

!oshua 8loch and neal CafLer. Y040 >=GG3'?&. AddlsonWesley, 2003.

Pans 8oehm. T'&$?=6$"?&X V)703)G'?&X 07. AB76(?"7)G0$)"7: F7 >Z>I [P@\ >?"6''.)78& "C $(' @P$( 5-< AFN>I5^AFN5-L
AB12"&)=1 "7 >?)76)23'& "C >?"8?011)78 I078=08'&, pages 262272. ACM ress, 2003.
u8L hLLp://dol.acm.org/10.1143/604131.604133.

Pans 8oehm. V)703)G0$)"7X L(?'0.&X 07. $(' Y040 <'1"?B <".'3. !avaCne presenLaLlon, 2003.
u8L hLLp://developers.sun.com/learnlng/[avaoneonllne/2003/coreplaLform/1S3281.pdf.

!oseph 8owbeer. L(' I0&$ K"?. )7 A#)78 L(?'0.&, 2003.
u8L hLLp://[ava.sun.com/producLs/[fc/Lsc/arLlcles/Lhreads/Lhreads3.hLml.

Cllff Cllck. >'?C"?1076' <B$(& /,2"&'.. !avaCne presenLaLlon, 2003.

Cllff Cllck. >'?C"?1076' <B$(& E'4)&)$'.. !avaCne presenLaLlon, 2003.
u8L hLLp://developers.sun.com/learnlng/[avaoneonllne/2003/coreplaLform/1S3268.pdf.

MarLln lowler. >?'&'7$0$)"7 <".'3, 2003. u8L hLLp://www.marLlnfowler.com/eaauev/resenLaLlonModel.hLml.

Lrlch Camma, 8lchard Pelm, 8alph !ohnson, and !ohn vllssldes. T'&)87 >0$$'?7&. AddlsonWesley, 1993.

MarLln Cardner. L(' C07$0&$)6 6"1R)70$)"7& "C Y"(7 -"7#0B[& 7'# &"3)$0)?' 801' [I)C'[. SclenLlflc Amerlcan, CcLober
1970.

!ames Cosllng, 8lll !oy, Cuy SLeele, and Cllad 8racha. L(' Y040 I078=08' A2'6)C)60$)"7X L()?. /.)$)"7. AddlsonWesley,
2003.

1lm Parrls and kelr lraser. Language SupporL for LlghLwelghL 1ransacLlons. ln CCSLA '03: roceedlngs of Lhe 18Lh
Annual ACM SlCLAn Conference on Cb[ecLCrlenLed rogrammlng, SysLems, Languages, and AppllcaLlons, pages
388402. ACM ress, 2003. u8L hLLp://dol.acm.org/10.1143/949303.949340.

1lm Parrls, Slmon Marlow, Slmon eyLon!ones, and Maurlce Perllhy. -"12"&0R3' <'1"?B L?07&06$)"7&: F7 >>">> [PH\
>?"6''.)78& "C $(' L'7$( 5-< AFN>I5^ AB12"&)=1 "7 >?)76)23'& 07. >?06$)6' "C >0?033'3 >?"8?011)78, pages 4860.
ACM ress, 2003. u8L hLLp://dol.acm.org/10.1143/1063944.1063932.

Maurlce Perllhy. WalLlree SynchronlzaLlon. 5-< L?07&06$)"7& "7 >?"8?011)78 I078=08'& 07. AB&$'1&, 13(1): 124
149, 1991. u8L hLLp://dol.acm.org/10.1143/114003.102808.

Maurlce Perllhy and nlr ShavlL. <=3$)2?"6'&&"? AB76(?"7)G0$)"7 07. -"76=??'7$ T0$0 A$?=6$=?'&. Morgankaufman,
2006.

C. A. 8. Poare. <"7)$"?&\ 57 Z2'?0$)78 AB&$'1 A$?=6$=?)78 -"76'2$: -"11=7)60$)"7& "C $(' 5-<, 17(10): 349337, 1974.
u8L hLLp://dol.acm.org/10.1143/333620.361161.

uavld Povemeyer and Wllllam ugh. V)7.)78 *=8& )& /0&B. SlCLAn noLlces, 39 (12):92106, 2004.
u8L hLLp://dol.acm.org/10.1143/1032883.1032893.

218 !ava Concurrency ln racLlce

8amnlvas Laddad. 5&2'6$Y )7 56$)"7. Mannlng, 2003.

uoug Lea. -"76=??'7$ >?"8?011)78 )7 Y040X A'6"7. /.)$)"7. Addlson Wesley, 2000.

uoug Lea. YAE9@@ -""+R""+ C"? -"12)3'? K?)$'?&. u8L hLLp://gee.cs.oswego.edu/dl/[mm/cookbook.hLml.

!. u. C. LlLLle. 5 2?""C "C $(' W='=')78 V"?1=30 I _ K`. CperaLlons 8esearch, 9: 383387, 1961.

!eremy Manson, Wllllam ugh, and SarlLa v. Adve. L(' Y040 <'1"?B <".'3: F7 >Z>I [PH\ >?"6''.)78& "C $(' @;7. 5-<
AFN>I5^AFN5-L AB12"&)=1 "7 >?)76)23'& "C >?"8?011)78 I078=08'&, pages 378391. ACM ress, 2003.
u8L hLLp://dol.acm.org/10.1143/1040303.1040336.

Ceorge Marsaglla. a"?A()C$ E^N&: Y"=?703 "C A$0$)&$)603 A"C$#0?', 8(13), 2003. u8L hLLp://www.[sLaLsofL.org/v08/l14.

Maged M. Mlchael and Mlchael L. ScoLL. A)123'X V0&$X 07. >?06$)603 ^"7*3"6+)78 07. *3"6+)78 -"76=??'7$ W='='
538"?)$(1&: F7 AB12"&)=1 "7 >?)76)23'& "C T)&$?)R=$'. -"12=$)78, pages 267273, 1996.
u8L hLLp://clLeseer.lsL.psu.edu/mlchael96slmple.hLml.

Mark Molr and nlr ShavlL. -"76=??'7$ T0$0 A$?=6$=?'&X F7 !07.R""+ "C T0$0 A$?=6$=?'& 07. 5223)60$)"7&, chapLer 47. C8C
ress, 2004.

Wllllam ugh and !eremy Manson. Y040 <'1"?B <".'3 07. L(?'0. A2'6)C)60$)"7, 2004.
u8L hLLp://www.cs.umd.edu/~pugh/[ava/memoryModel/[sr133.pdf.

M. 8aynal. 538"?)$(1& C"? <=$=03 /,63=&)"7. Ml1 ress, 1986.

Wllllam n. Scherer, uoug Lea, and Mlchael L. ScoLL. A6030R3' AB76(?"7"=& W='='&: F7 99$( 5-< AFN>I5^ AB12"&)=1 "7
>?)76)23'& 07. >?06$)6'& "C >0?033'3 >?"8?011)78 b>>">>c, 2006.

8. k. 1relber. AB&$'1& >?"8?011)78\ -"2)78 #)$( >0?033'3)&1: L'6(7)603 E'2"?$ EY H99SX F*< 5310.'7 E'&'0?6( -'7$'?,
Aprll 1986.

Andrew Welllngs. -"76=??'7$ 07. E'03L)1' >?"8?011)78 )7 Y040. !ohn Wlley & Sons, 2004.

You might also like