From 5efea7207f445681b35b960e53eb6006ab83895b Mon Sep 17 00:00:00 2001 From: Serguei Bezverkhi Date: Wed, 14 Nov 2018 18:23:16 -0500 Subject: [PATCH 1/3] Vendor changes Signed-off-by: Serguei Bezverkhi --- .../spec/.gitignore | 1 + .../spec/.travis.yml | 2 +- .../container-storage-interface/spec/CCLA.pdf | Bin 0 -> 67545 bytes .../spec/CONTRIBUTING.md | 3 + .../container-storage-interface/spec/OWNERS | 8 +- .../spec/README.md | 2 +- .../container-storage-interface/spec/VERSION | 2 +- .../spec/csi.proto | 473 ++-- .../spec/lib/go/Makefile | 31 +- .../spec/lib/go/csi/{v0 => }/csi.pb.go | 1945 ++++++++++------- .../container-storage-interface/spec/spec.md | 677 +++--- .../kubernetes-csi/csi-test/.gitignore | 5 +- .../kubernetes-csi/csi-test/.travis.yml | 15 +- .../kubernetes-csi/csi-test/Dockerfile.mock | 6 - .../kubernetes-csi/csi-test/Gopkg.lock | 195 -- .../kubernetes-csi/csi-test/Gopkg.toml | 62 - .../kubernetes-csi/csi-test/Makefile | 52 - .../github.com/kubernetes-csi/csi-test/OWNERS | 4 - .../kubernetes-csi/csi-test/README.md | 20 +- .../kubernetes-csi/csi-test/SECURITY_CONTACTS | 14 - .../csi-test/cmd/csi-sanity/Makefile | 14 +- .../csi-test/cmd/csi-sanity/README.md | 30 - .../csi-test/cmd/csi-sanity/sanity_test.go | 17 +- .../kubernetes-csi/csi-test/driver/driver.go | 231 +- .../csi-test/driver/driver.mock.go | 195 +- .../kubernetes-csi/csi-test/driver/mock.go | 83 - .../kubernetes-csi/csi-test/glide.lock | 135 ++ .../kubernetes-csi/csi-test/glide.yaml | 16 + .../kubernetes-csi/csi-test/hack/e2e.sh | 56 +- .../kubernetes-csi/csi-test/mock/AUTHORS | 2 - .../kubernetes-csi/csi-test/mock/README.md | 2 - .../csi-test/mock/cache/SnapshotCache.go | 89 - .../kubernetes-csi/csi-test/mock/main.go | 88 - .../csi-test/mock/mocksecret.yaml | 16 - .../csi-test/mock/service/controller.go | 559 ----- .../csi-test/mock/service/identity.go | 48 - .../csi-test/mock/service/node.go | 236 -- .../csi-test/mock/service/service.go | 137 -- .../csi-test/pkg/sanity/README.md | 54 +- .../csi-test/pkg/sanity/cleanup.go | 134 -- .../csi-test/pkg/sanity/controller.go | 1909 +++++----------- .../csi-test/pkg/sanity/identity.go | 115 +- .../csi-test/pkg/sanity/node.go | 693 +++--- .../csi-test/pkg/sanity/sanity.go | 142 +- .../csi-test/pkg/sanity/tests.go | 56 - .../kubernetes-csi/csi-test/test/co_test.go | 107 +- .../csi-test/test/driver_test.go | 20 +- 47 files changed, 3137 insertions(+), 5564 deletions(-) create mode 100644 vendor/github.com/container-storage-interface/spec/CCLA.pdf rename vendor/github.com/container-storage-interface/spec/lib/go/csi/{v0 => }/csi.pb.go (69%) delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/Dockerfile.mock delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/Makefile delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/OWNERS delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/driver/mock.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/glide.lock create mode 100644 vendor/github.com/kubernetes-csi/csi-test/glide.yaml delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/AUTHORS delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/README.md delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/main.go delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go diff --git a/vendor/github.com/container-storage-interface/spec/.gitignore b/vendor/github.com/container-storage-interface/spec/.gitignore index 4f7ede45..443a2c83 100644 --- a/vendor/github.com/container-storage-interface/spec/.gitignore +++ b/vendor/github.com/container-storage-interface/spec/.gitignore @@ -1,3 +1,4 @@ *.tmp .DS_Store .build +*.swp diff --git a/vendor/github.com/container-storage-interface/spec/.travis.yml b/vendor/github.com/container-storage-interface/spec/.travis.yml index 15b11d3a..65d1a6ab 100644 --- a/vendor/github.com/container-storage-interface/spec/.travis.yml +++ b/vendor/github.com/container-storage-interface/spec/.travis.yml @@ -29,7 +29,7 @@ jobs: # Lang stage: Go - stage: lang language: go - go: 1.9.5 + go: 1.10.4 go_import_path: github.com/container-storage-interface/spec install: - make -C lib/go protoc diff --git a/vendor/github.com/container-storage-interface/spec/CCLA.pdf b/vendor/github.com/container-storage-interface/spec/CCLA.pdf new file mode 100644 index 0000000000000000000000000000000000000000..08a9f2a50d5e812a6c8355960fdf5a77912761b4 GIT binary patch literal 67545 zcmcG$1$3Rek}hawX7)BSb7E#YW@ct)<``mT=9rmdW`>xVnK5P_`<#2a@0)o&J-ybO zwYRpU5|w_J^jE1qnM_Vtl#Yp>1%|9|@6X;z(RI#b-vA6NfDvG8U;)F;4PcNmwlQ@w z1F(IN6aWmO=2lL|4j-|VzLT-Av7xPzF@TpB#?i^aSl=4P4Y*&^GJ5qF`fH`G02pNs zID6PS%W>nZGb|?4BmfpnL<_9->-t*Xtr_tJY1Z4=KH{yyE?2&ZFnp2yB&T0JinBZ6 z0k7Neql9;bkAg_vyX|~|MneJ4da3Q4iucvFQ0_TjvDeG}md?PQx_Wx&?eEf|`7)b! zl{sq719u(r`-{138ztpP!_&R}_NVRrLb;$8ce#=l1rEN_WR9JBMF(SLW9=;1wYJee z4P^5vM`!&=H1IVSN>AI8-tMI{D_=oyjF{u=o?eb0#ea$4N=h>&^@7q| z^Y0?ym>@0a=qdIr^Rbgz=iQ`flh@rICMoQp(b@pT$p2Hmp8svXeDup`SxZC%|5G;O z@(Fe4k0dn~$$XMSK3w-W_2{w%>-n(8pgnm-r?=8Q=*i=eQM2~u7D>U}ob$jM7vLi? zuM#qaHIaND3(0jKS6TyV#o*QRZ#e}n}cd&AI zAXTA%GQwCZ@<@+%{7joV_|mfDY3v>tzJe^QgN-vwa3K%cpdZE|ECzx>hEg^8GUvrB zc?Tb|R^cNZH8L}1=o~A@R~$=jPnnejMVJ$j3he$)1o_(jQ~3ES<$zRXjH$$;K7>l3 zWQ8?TbV6T&Z)Od}9lM_0ol8JGTlAV~IAWPt2{|VWA+L?Tm?Rh8ECM1VH~6d`9}0@2 z#B9>3Ds&2b?+HOu9r%W2Ox|LPRshE}<<@!OX%*Y84rv8@4?M2rt;ykw~&2Xddzf==2#J1=l z&V`A+m?Z@8r*~jPIgZbBq99|Vt~Z4jp01AgU@jhlyWS@!6)5;?vq zl$UFinB%CUHE7Hn7~W1R1z(uZQq1G~+bs!DKHp)U(-Yf`Zss5iz$ z5LDh|0-fwD%Ex&y@S&Qas@p_Ax>UXd#6DFR^aPsraF2m*=ss-3nVguAr6r2^pb`Z* z!4b33D4DAE#S5~>5|!-7=e^Rd}>VmF7%u z2vAKq4pJ8A%rWf=XDDc>J7tHuv@}zm$YFr+5NCXo(?LF?6Wshc6htoYqNAhks@{-f zQBNT9B&?&pP$nxPp-~;4sk#sb^~+wBczcnz*f6vm?m)&|PQtg;Qkk`8qPMR_>qmn- zskci%d^z zBAfm=swIb!_!1ku%wst6ZD5U{W`Qqf*JlD9RgkW-9zV=6Dj{+3EN(em6hInIFk>9O9~Sz;EY` z=idfLbGX|PEKJM3*-9WlXgz_Bh!m0bVGLy5mS~6n=6F=UR*I}g9?vnC8veCqdcOgf zbLfpm$phV1ia-F&f-$wTaawOiJiJ?@8yy$GYY_JSD3ZwEfhWoY>LR#tl^XO1gOOLe zpr9i1bHx=IVf{*JThhU6qfCE}IrAH7;`?vT$NX+Hnnu#hv#K;bL>PHGJW#F?@GWiF z!F%Cj;Iw|%uZ}w{#k1t{$BoL5lj!MsX)926!s8`}uP=CnH8qFa5CIX=or=X^H8!5h z4|o9v2JsTI?^un#=p-RUS<}EZOyV}@8HT3Z`73bW-{gK zo_8u6K}RQ%CMk6Jg-wqw`@V%Xcyev%y~Z97eBUx%MaHw^(VCMg8gmZm2CWaP;&+*5 z@6(Iz^Fz;*!3N`Tac?x5yx|+ACYjXx4sxM`35eEOhI`b0hMc3dSNKk%O4U$6{4` z_>Y~+8o3mYpJK*( zLXf;Fz1>lSt^N6ga*{N2+f!3fVq>iMV&44JS2}WZ^+(vcyjAXn_NYVSo2A$bo6hYd zGpkrXu>Vk%L|Svd6RASvTJe|3V4^FI!k;|arOs(uC@cLDk{S#4DZTkDtJuE~1E>w* z4hO{0vqTPbuOv^JJen;F{S!Toa!XrJl_ph_l5bnVog=$2&wT8<*oVmVnELi*oA)OU!4y&qN`jX?8F`j z@bbm{poF$-;52%Hu)Q)~gAln<6T%Gh2tb9M%k8~F$FqAha*MnKxZ7~-&-%wxn;DCW zt?gDce~Mw;02gm*l;b?cgAEamvdo}YtL1@{y(xElZEyot_TQ_nTszxc(EaK_g3W1h z&+3TvB>iUt{Oq7rF|1Wsr3*&fpw}F9sL>dY@mFh2Vptm-ACXt#9JC-dkaUsY49p4p z0Bc@fKo+m;Fr7WWz}a;cJ;6ivpqtp+aZMmMAt$4)M~F@&_y_SK*>&V6P;YZ;IZmX9 zbRZ+=hz$4;4y#O)t_caq=3<>L9v+{kdE z&bk}nB}Q#6@p%rInEx8NB!t>3r6rr{Yqf(QGjg+a|AOSU&pZ_519J{~7!vZBQhvx;} z25GtIt}ZnN*<;;jml0tSqBO)joquAt5!xtIwNmqFAz~k!mdi!$)-jN= zfSW9Les0L!Uo9EzZf9rDr0_k2y4_Sn(Vg-=V8opx>R+za4(phJwcO7rBmgEV>Zpo- z1&>$bndr`t{d|HKvv{IHDDsiEz{#H2`jw#ex4=lXRRAqOb? ziGffo7IiZTOX9CeB2#bJc;Kb2_L4BJK=V!VQ69F0dM(1xN~qh7oiz+eTAtHB&vq#I zgS?>e*NpFzh>t$>MEtTB{PHU)z_dN-y7jSC8FLZYV!KM@63C(leFCxsfn3VW&)s}X z8ULwRO)0`}7%BHimzEh6zhO@%X2l~QifB}j0!paxFi}tT(VUBkQS@s)>7 z9fzui^N&b~c3pG84sT@PR_=` zML=nHFm6W*?mN34j&04L7$jdlkZOl-UZJP~_>|!fI3DGokC%A5RxiF5uG0Wx->Hyk zaFBh!lg@LWUFwj+sGq(sGgCe&GL|ASCfm23Ap)?4W>#i`h^V$XUwV>icBr$Ur0YtWUeuR~n-EuLMr`N7By6>2i^t@^?#x_R( zGP*uepSIekf%X}*u(GgzmVTz0|ILn5a1Zs&S$%S>_s^T zTSG--CxGV1V8WsR1|?%RCxAA9LCDt1)YkVP!<_sW@h`HH z5)**)lMKV4q{Iy1`lrT!j>YuJ!#{)l%Q$6X`l}v;%*T`%1U@ZPn-B98z#wewVs2=x zASU?V8?Kp2u1bna7$Fnw9PN`R!~n7z91m!~^jC3oF!@jkKRJ-#Kynvha8Wd5DitH8 zE?}_`VnY!$*zW-b3($X%7nV#fycb%4;}QmJ{#vRaxB? z?bEalrzDf_=HjT2W4EzpMYPJTr;%;tMS$gMrI2l!=6OLHw<}4c0qxyW&dfYyNC7dT zrRt6ykmp*3GK-*LpNIYd0fI{C<7#t22!G@<%K~yMG~EpY3zaY^L|#ZS-#Z*KdAh1o zd^BL9N$8c{`r#%uaZ?W$#fa%wWt$`dOr~y~Y8HD|fp>PMKEH4F*txVp;G_7;Gvx8C zd_{gbktGn3rM_WxTF%gp0dz;Ik&a*2k*Ew*Zw00(izNkpE`Q}tn#%Glh2<-0-eQJWUsKO@SE`17#u?K zgwg9g()&&qm-+AEUfPW8F3hW-LuL8H1n9s}v(_MPf3+BTBbY$nxh-ETb_z&rpG-?M zGiO~O?;tL@CJD$PTOJ@G5>QebJ`WJ@6HO#0n4yj~vx?X%$7p7cw}U4_SgkOdEhzzF z#r-?DXd?%*pS0L{8-Z5cASf}APB}2F{`8C>w0iJ(ek^vN>j0oYKXD|`Bv5*B5OaR8 zS^?fgAQ%CrMPQfj7Ir|h{vMNHG6C#%5L#fWUGR3WaDHO^Fu~p57=hRQ&7lG=kBc=+@BjIX5xeE!%aiW3t3x&oY#qmE*k{4r9e=p4ylH<7{al&bV(*FLG133v` zg?I(_4n$4?nbC(|1p4K-RQ(kXN~~veQ`ikX0}``qWlORNgBPK(2XhPe5{NcPwmLB_exkWMvW90naDDhI3J`$BrOIZAGbMDJl1{}6e1*7KLrb80DC7yJz~p1 zMStEvl5sA@FhxZQbD!M=iN*ghpuk|RrcjNdoX-WC9XUOcu1`fTT2He&xdwDm|MV*! z!eAHnHlCeN6P6aU4O=BxBjQ3ttKUg4{1*9@TO0K{+C>0jx8tRO7f}a{FP<;KMl|6- zCq)dj2AE|4B2f~6)QVII2?y!`>NOC#hgIIcSk{R+7|Fd)%YdpTYEhs~0*_oK9!(;c z6jOneDz)BUNy36yo1BNdohW|zi;G^MNrp?Z znctn?U8q0-zSw3~tu&=2Q$x00*jv(@K`fpn`8s)LxFWtR)-v%V*^^8*VIkQyS(iSW zLYK%_@~!Zt2}Hh|zgy8iuUaKnbgRCHCrCmh?PsFF4}&y=P=oK=Z2j_NZE^l_jVQ7u z{72Q(WCcVxf-?o~(@pi#^P8eG zdKmQ{SQM}eBH|-jBciWa_nRknCRW(1%yr(|s1hHkZK%Bxi4#o{ffJ)krIc;w(B?8r ztxIKR87;^y^evVwp6BSxA}5L_T(c%Jby?qcUqX(nTS7gWo-A&Ak9m&canf))aIkTd zagtdF*s#-V(uULa(hgV$>&-M^8^AR7G!7bc7!y;^$COOS>cY8lD=N>5=%#q{*UES* z42$d3O!H*w#!@!F^2%pdWaoE^b!z&KbWH-5^}+?x zyNh$fay#|2^z*h1NlIhOrl|8Y^1cpm8ve2myW-u^B(Dz5l!%jv9F|sGoLyJaYniB@ zfV12;BWEpRYaRSqy{fKWE;^sX)yqBPmFJ%RNCV3gt`?q&v4){bdq%5BcS1|3PNfd6 zW?g?={nWeMmu9JL=sg&0v13>_Ubwd3P*Fm+;nI2L{WVRydJUshrgh>D;g0y3{Fw=o zC&VqJ9pQe_$%V2t(0dN~DxlERasW=5iDBU62xY8-w%4;Bvd5_6Ug%VElX zs|n8`mfnayLf6eMcUE~NaYy6iX-s+{^1!9fx^HcfXX&-+nFjG2VysyBU_w#+0nP-h z&X43O)mYW4#<+Wf3yX^)-Y>kNypg=99WEVZ-V6`=uOP23Pixl;mj_P^&$3{gVCB$< zF!^AjU@DNju*}dX;Lc#Z-N4V!`(?eq6kr>}FTt$b)`bFl%q(wYMl108HtI?|0?TSWdL;wnT zFbHwcJxN_0O75rEzJ=_ZZo}-H)3azL)txRoH0+g(PE^L|cB|V{6W|i!Zy>#gqV|;R zNmbtHHoNYDEyTBmJjWdj&>TcwX+Idh#DXOU)&@=qC--TG8cI}3piAt4$O<%UI@jv{ z@!u^;h)Kw!Vkwy_`E4Fp|4=_8NTwfFO{2}iL($IA%bLe1)G}f|cdm^3lYc5!iqcWt zq%|e#P3l%Wf;64%yE<&0MeVw&m~r-q{2227&2HMR%_#ox_3*O~n+cn%vuTIRTkuOa z+qoq|MbLbu)=IOCt9EzG)@{d0v^Bt5eCcvjbTrZ!cf%%ZeO5k4zPdxttnOleK(JX* zeyg2sTob{n-wNI8Y|W&u!eS+vSZC_iV01fdTb67!?qoyVTlH~m>@M)Ez2L6k*>%$e z%5`<^sO4HM)j(ZmX|i%tyJBzb&(A-Nt*WOTD*BrCdWh!HM$z@m#kK8{RTg4%MaTKa z{y5QT`|Zb`jc1w5gDVT28`i!vz>%Q+kh}zFcx}Ez_i~mM0@nTdt6Sy06DL29TRKgB zb$x};#IIr}#Dc^M2798N=f2J^a1?VSu=hm94b<$|58A$N-U@l5mL18l>FS&<#mmX2PX5amq)CkkaonM^~{guyd?kVNDapX6Xi9e1{$Cv)@brW%y znpQVr`fVb196ckQE8ErQkKjPiRd^_7$QM;7-j@2^>CDRN%JotI#tLgC5SqFV;-EZX0L{)J3lF4WT~@C`6^!L9u`bW+HX#C z7W5|SewaoFHwiJkCq7om_JU88Owfqdiyn(rh)qO1Myz?%-1J|LMkLSd1a(rq{kl(_ zD$Dk9eQq~381B2YoH|M6lUy(E;ChN^@6 z0shiq6#&}*)M5J6#Q*O)?Z1@o|IXubrX>-L*NEP+Pz7)(1zz6M@En%T;v6RZWf(oc z2r>#8t&w&$&qiH24S{@Mim~V5$7u zOgr;N>i5T+Cu@D34mS7qi?eUo$)?6ew{ z(SSf)el4SclqAFlDz~i30{MzeD+*jlLIN!hBofjNjGQsfAO^(fx7h410YmHCZKV%< zpxr~N7tHS>%nL$7YkZkQf}y$(>3H7=TUxWurjB2LeK5_V*AH3V#BpDgX#RDEV2r0v zK#C|}p}iP)YBfn$)o6*tFZJYLG8FgvR(X3i&(iliTcxyQlaA|(Y#&&Fc@17-dJ2|y zS)MYu37Q9}Vu%O7wdjtny647^xNfi%3|ctatA~Nly(UZph(o@C6WAuad>!Y265>~8 z#tJ)Q0BLs19e&pEh#__7b?~Z7XZhGG@`x$_#RUMmmM6}_KO~uMzV|?0D@4%{4tRy6?HZtDuykc)6kJV{j290&0Ygc|v1WoFW%c|Zh#>`pX0u(WoW(m|u?$xA z30OfpMV{^{u4U!s2}C*D?tSdF0w}EWXr@*PW%@I@iHRa+=>NTJb1tKE`B_}Njwr$vWx9JWF#M`}NUap5w$H#5tvb5S{ z%Me?cq9I0ac0O)PAyvbhMW#h_rzXp)?=N|1R!MVjr@AA|M&H9Sb>~WL%>TiKkCF9r zg5)b`Iet-$3sZy$J@fBCIF)mrO~u)18YjUj$;tV1V5i8Npx4Xwz8&>sJDB|Bvkh&2`KkM7q$1&tr{yx>HDv!U;8jU z6|2LDSFBxrGTB$Fosb=IM&>OC6uRog7le{Z&t}XO4v%P)bPWEqUkZ)Mmr(oauRIWd z5~f!%cJ&2=f@Y?Hg5oZ4j!xKHgSf;dtlVmT#eE!s<%}$@FCSdxXo$BDW-k{Rev8kx|`>q*PQ75={$fcY%pOoZqcKZ7>NF0V%XMs^2+< zU7tevY4n%5idtsbtUsjQ0n+i9!Aat-_Xyts?f?;ROx*j1LM5L1?_EWsX<~oSX8f_t zc)IT)B<@gkKp?4-^7!2+NTo541xHDZ(W%C*QOUk3aLbb19BSEwa&pIzcPAVn3j`b_ z3~)XKa!DW5y+2Y?B_-?`+u&Nq&l0)IJgKBEPWO$(=*b-iDIvL6c}hGzFHX3VbVFR8B5F!S#Su9n zVf|Fj_;q2iRn@CAGvVB^MWH4{fwQ$UGW-Zu$x|`q!ykR)vFZbGHA0mEPBluy{8St_ z9e#wL6x*n6+HwP&;`y-%3{dXNWm&X_T9z1(5M=lI$X&^V2sEX=IWIZJ6BwbQ;eBOB zygpXd0>s$r!WXp26%SBsrVO-V8$o}1$+*OEpZ*wTr7&03l~TjZprY=1ujhrSZyGcY z7lbW|XN2u+EBI*HSg0b4^q_<=eFMDhL}RsE*6E zRt{My3iXd_gcY@+e61wVpuhF?%_hD92WZv+zxx=Eh1VPc|AD5rL;AX|mIvFYY5o>; z6T*-$;!>wPcTmEgU}c(f^V7xREYO5Zv<^rUdx`eSIZ<%ZOz^Dh+nST* zAl{?%Y)UQ?Bcbq)Vj>(TZ$l=@b-4|6UF0LTphCL?^*#P_3}wW|ER<(KUepwnCj(H) zLze{lG)Nh%md`=K?8WNiZN3w*tJ(Jv&H4H0?)^5@+rXZT(72^R)Nm~`+$7S-+OKfA zi@bS?Re;<$Yh_(3{o{nVuVX0<55L-6T)H@NfJ;!_#P_`9DjKEdPxC|8)%?Z4=b&xe zJ^bRG5b@?n86^)ahP}b#L#2PlPG?$obZ3~?xbZia2;yz>j~@XJ9>8y^OI4Z!&O#R2 z=SFS#An_ODP88eq;*r38H$b=DtWoqfgz>To#*ql2xU0QrSUF#-H7k2yV|p?VJ^gUb zTtV!Y+*K|DD+AT8YjA_nTE~^dOUPnF&3wbY|5)3uXZ5T@O%vk}23b7~857o^XD$WCdS7=yR7slK*80so@!`GnclD$CPW2s?fxUN{rR%0&iqUyu|-#?7c<$hS1+4ZzCDbQEjntiIPV*%6v0Gq(CRgFlKLY7tg zv*P5pr%2hI_^5xyRW*Lk9IZl^!foUfy5k|lVP8}Vcthspt{$YEM;9J}YFs|VH=rmk zb%yrc3aQPCCs51DrBV8skU!ALRAo5Z#8fq~SFzKjrLf-W8bEM*S9)*{4Z2fkiyQOs zPK>A#UmX>sPF z6JS%F&La96&09*WiJGvry$9CJ0-G4DxUp!*G2N<57|T>u+N#7oAS2+B)le@5SfqXVFaAKO@b z1znv}AnYskt;x2*9}FyqC^^#xjBp|i9mnu6DXIe<&Bc5>!>u8y(n6^a96K@y!k*LL zocTe#Gm>-gT+*|L)GOE~@Ea3sv&~-Hmvjj?L{xOkg6pE@ZL)E&vRgw5t|_lc!xSWR z@HRfdv=lwB%a|X4d(aGCeW_76`Qu<%bsM?^P1nTJ$3aKJSu+fz*{1ufLsD=RTeno9 z1~Tf4N*r@2HO!cy_RMbkmsm&gZYyVg`Uj`QAqdjw#73#U;?^9vmyjh;sP*vjS-~rZ z@`wrK4PaZ9ED37tuQ`g=X7gj@gqkQ1gbYsA*h}Gf`tbe0&X9I`xQVl{v~)JF%4kIv zq!?es2+_Rj-~q{acTZ`ah} zHP}uVifzel{zyh@yWt4_qY=m!gJ~=V*IhHGhnLhN@_NpL>K{&1vf+~HxH0F80gPS!x(YN zmE&x@Y)|N_BBn{^Bc@p)68;^G@Fhp!SXt;kOK16%vGd|cV`SXT z{ap& zZR4WxN*?=ItxYRvs}MbI(<_o!JBRuGhH&$PwquV|qu8BS9XyMQw+Jb$mFlbpUW27s zMDU7R&JbQA9uIH#m>1l^N_Pz08gf&Kxe9^Q9G#$QwvJp|I&9=BQQu6bQ))5p_DA+e z$27RF3hp+JS$xO2B?LB-CuO-eovKX=Lfo15@6vd;up>NKqwn6Aeq$bJ6}q0gmj0mp zo!(VV9GR{QXDUzMXtDpafc|5E4yvWO@Eye9i#Yky86!f=o;Gi#Fs+~N02GV;!6*Uc z7>C2puL@!EB~CSoy;#U4PI%SQ$OT&In^y0Jt-^I$*h7B`ms5yQSaB_%89P-rHu3ml zT#j>Nns{N`BQgkQ%5mr+>BD>!@TFodUSa=f~ z#^tkJmhfMq8U5~_LP@p&!m24M8%W<|T&A&&v~LzA;xHf%-)s6du?!1gbF9Ztn<9A6 z=dtx8rZX-a8=*vXs^Es|ec>DK0v;pzM+tIXvLl`9eS6 z*s%xo`~|Yz!y;;PhnOtyYc*eyDDJwO<2ZYU2cWEnd@GL25>T5gt#zDA+ZZ5#XE(+6 z80q^WPD5wh;&2%+F24_C~h;LO2}j zAmq4Q41f%uLj^Z*#)8u6LtCCZOU6=`cu8z_zMJlD9_|>Y75Bn`5 zG-(WedxkI;#Y>j#yIBsuCy9fdhA7*szjRRlOJh>JUc>8&f9$TL0kbQDyfbYw2|_&n zD9bu{SH~j*lULpG|K^zIA9yS)D;wi~9O3*=eXUIYLe&0;v-Ph-pnvnW!u&7Z)=!k} zzr3w~wfyI_=>L?v^}`GMZzKG3X!UOZV5Yy^vj4p!wL(=}c9tK_J6qR&d6s<(j*;%UR#A}A;mt4)TxI4aKc;k}X z{eTWR5lAstcdTI1Ox+oMiFp_1HwB#;0d-L{ zd3On5y5bj!ixHa@V69SX$mF0P1V_ZArn6UL*i#zt=E9wga21B&w=GOpSdYD|)rFJb z-|EHZDp?z|iRD*Q*chGMFz3L&vNi<)X{9)_M9xcRsd7L}Fe;dGr-v-8UkYW_*l#C- z40!0kk+|;(fiigcADnWOz+>Nn>S{n#>T+zAItYuFO-c|VC5Ggs$89y0%82HAuwMOD z4D>}N^I9m>YT`^)FS~w(FMb{OZnU(cicZRMFc|5_n}axw8j=VX9yFYvoWyxhkjly3 z=+}{xZs1H!n$*bmc>uJQ>allxKa8txxg+1un|LCqT2%%Qpc$+5Z!E=Syj=j@I+5<- z4uF|AT|~?V4vn&aChg{qj|9*!{RAnc`O<5%t(P)&T_=Ffev)dUTB;p8?0*!-pCKT+G=~kwa+N^GsJ^%}RJ$CD(>5WpUdjB>#KzCa*n) zP7JsIXjSQg_SUA?TgSa^?;avdl`(=9hT`&3XQyF!vgmSWjsP?!;j-q?HSJFi=lma z9iz#BN$~*cjnqJ#)c_tVYWTH-E{P3S`hldPl&a$#K5EMQ#E|vPw6+~^(BX^g50Gv#OIo6ha_H}l+KXLqnBLFdxHCojYv|LQeg%4UtERLipOq%bp0+dt-zKf=_ak{a;)R(u*pPR&4HWV^XeLZ zWlszp7+7tRI<|Hc(Uul|;htv^^~ReY=JVBvo%r8ugP$A1=QhN}$@U*x)4wA@{=Xnj znEpn1e4s=A1vz2*8x-;%n3Ml{7yNAb|LZRJ?}zgLITy$O3J>!MadG|&Rm1Sj+{p0* z0Q31uPyl?uW&Vo(0@uK3|Jydp{O?efzef<#cha}AHT@f?Q+SpOX*j2&STHi)aQCjJ5*p0BU zqoISjos+Et4D-KWG$J-1_d3jNKDdz9H~g#me^Nxv9UPs6%=8@qEX*I5L-hZZVgB%a zFsPavIhp-6@sIZ+BY>6hlU+stC(GxPWa4B5uzaLG!zY!Q?eF|&%m`p({4WE{KV=_n zIR5VOv&H9!gP9q?&d%{y_$&Rv1sfatU-ZxX-(&q#!pX@6^O^n(pS3=qOdoAOQ|uq@ zKBi zOs$MRMrIIjH2g%kadLf#<>#QE89HVr_K!uXZzpbSZffQP!_NNEucDK&waUkz{qv%e z{$E1R`axCuyUNG3IR3684q*9cE8t@KmyCai_D8?c`fh*cKKX$8i^C6RoiQ`uFG*7P z_l5rN%Hh8-Ys{a^K;fVJRZIX@4wjGg^50&tnr(CUQkq-i^BHe;{Wa!dNEn2Cda&+!{^&Js`Ze2g;jUa$5-vX(N?6BhsfYgO z%Zn|N;61HQCwmOdH&3Xkwz_i5N8{#NLlUCnKYL~Dw4;ea8iMB!Ghqd=$-5)Fc+)CDL4x4|1lIecm60Ux)&$qnA zSx4+F;uE-0Qk|HddaI8_a*Ut8aW?9qlS>ihC3D^d{z()ePAal*uq%IM_){>ou?((i z+;KHaN3Zf(>ZH$b^FEKyQ+M+y9FlkAac(&`o%Cma{|}j?30%7@a?VxZeTm(&hId|~ zz4JcJ@%KV}^12{dh(H5hZy|rXFeezo*tMSLU9bRg(pmALIT9x#zJMGYF7h^x$yp^L zL}KYADs$hT#oLUwhpz6_KGbc4ck!H8dE3IixtivWhVOl$d8x-W&13xb=KZ0;+*{GR z7f@0Ud3FJ$4yN#=Bh=%pSn>ivgh(s}eX@5@z5sPFQKPL)fAMmId-*N&`gaYtP+EBz z&yo$Zk}hyxDsJiWJ_bA#gs5M5c#P`Q;=MRqCS$`)eS^p{#2Ere!xme}wb=TQ9{?_5 zCXzdnd?XFG$jhcn&VgpkgLLE~Bo&fVek7)#=v*JN=pgT&0@6T7TNE1#aPdaiMyjLe z*cQ1rydvlDX(v$rv7;7XF9AePdP^j}e2>v8-{bztY_4 zNpk2PbLj80e|(XVz1g!a>X;Ualoo&R;!c^8JX!UX=LfMhZQuY!0FsbEpg{J$;rBBa zOpytNIHLeDC&(AVi@o1^&{GD9BP`PG%zb+hjWl`XNK|L}NN9VNMtf?3iy93EImomL z#QKauzs#6f?0Y9udnY3t!nL^^`HiN3o3b?iE}DXK?GXb+Z)l8Y>WhH)>Z@J7-D zzROG(EB*G{s;*jnwH#$8a@Iqs7*L_+?_+>2DS`h5?GbZ!iYc>&Ub{*r#tB$kgW^QP zxpc0_L_Y-Z5R@RtP!9zQHn%xc<0EYi5y~y1oxW7EkvDN1eH4A7tbWUMYkw!dC;!^5rp;pxKZKDj6)SEwhe)LJDeWWVa6UPP#V~Yve06G^| zpCaG~myX|)-=%jTxZePX)XKfQED~x0Y2eOKws5K7aDt$UR@LCepSU+mn1)d4cXa=H@Uk zgMdBRd+jhA@{vA6O7GAgqmi z4Ae1Yuqt9PdNSyeG=Oq!kV!UWFsrL3`wUak$k5$C8x131MU76QM8E)I(=Ya1al19q zEgfZn#$K3Rn!|^8?P1kvCvqmz-bgKgbu(}!k(2TSH$s*TOA{wT1YT6@w@g6a5KMyF zZ^Ne=6y2OYG6$c@2jj_yiJU7!fgMl*L)S@8-l2(Wy)zv34wMsjsSBm;BT2mYTb~_< zmjQR59jwlpZgE%n2!AOhgKU&mcPT#akySJD7SqF`eB_0^S7(to0nvoOsG9}02goBk z-!(K+p;zbDn3vYhbN2VTn{TDntWWN~y9u4Sepv-G!quEl6H_;YPB`v(do1F~B;F~r zQlCPk72WuO;wNS3@Rh>ql=N=4Hxo!8_ zuVh7Xo|?YH?4*r%$>oBzd3+;~Zsxw)HJ#mp$?;G--k9-}J0z)09)l%Kf2b3Rc4cgi z8MaK>{@}t*OT2D5klOQ(n3|{5Q#W4Uha`sF#8oqMB0|%AcZn~KODk5#Zi@{}P+B4E zfKZj^L+9Ch|9Ou6wzU4s>m4%_G^=N*WgE4(9u;18dA?drR*n)w>8C=GvmYvru3u3N`@7Ujt6 zo26vdUN0bdEA-%JbGuQ1ujty&+whb?m`O7NNBs3m!xn)reCZ&^C-!8FG_Q* zohzX?gHar60XX4mx^ z6&f@asYzznkYX|;67-dOW#}c+@U_yR#k*uNqG900;*Jp(*z+>;Zs=P%Tj5k zvSU#a?1&bk^dnKy?4}JB8ktrha)N(551o`Y9QpojwnWK=oxJmrt7Dk> z;L_y6Dr+vudu%1EVlx%l*3;2tq`a%&Iz*jAb8Xfw#rZ0~{fx0a%$Y;G@8n21@=Vv2 zglImM)EKO$e)G@`-7e4 z2sFP#%)f5oBg9i$tDXxbKJU`U4&4G{MBM3X+&qr6zWLJ8W z+Zzcvv|YGl4T&-O4fh#y1U9_~7Iy`bV4b_<@$3Ea-EBL$^_%W_L#nP7O6NlFi2J*J zmC5=GW2Y+1=Xas^Pwz;38bg|Ry5K;T1Q0+LGKClMBzFeiDqD@xSy~QbuqkH; z@|=Vicx9Smr7rm61OVB;Dt$V58e%p$QytnE}BUH$}Juhcp(wG@`GE-lJ%&CW-@kv9xaPqi!@ zFCMr2cK&+ovcRKWq|sQ3KR2J3yC7&KVCHP*XeMUndN>@D#-X+9B3`+I|KusI25I%v z^2&0%v8N(-dm`L&L7&lb9%YYnndDNJr8NN;hZYR|5qovkLqLv@YXHzzW^zF~>M?G1nt#z+b`qek zuq?LAY47BXU(gHQlpaRz>lAf^+^T|l7nOrX7YuT+k4SxAUWj3n2wNhiEZ>_-+to-D z4*qkb(@LHy45=duE}5cf>>*F^iriv?->n1$5TBGN(e0J(<*_?IjOOq|EA#om`27Ky zd$MvhFrqEQ4rZ%&>xkq)iAKWhVLCQG06sGt3qDYIIMawDG=`-fvrv6FDbTgkN-e1$sN z#Q1~MFk?tUA^Ikm1g=&0bNn{f2A8{|E z?IE6G??A7SxKqpkh#m1B=32j1DS|$$HVGL4w8_ z5mfTk2x&0gO%fJPfj7!kz$wy1i47hv*UQzV zDv5VB4;#qEGGltQKE|62EK@MC2bH$adqnJokt{U107{Qw;v`@UV>zgz=d7nH@Eygo zZ?NB0X+{ZFIoc8fL6`UCN&I@~HU$<&`!}Q%HBq7w+@&dYwK>_qLQr0a73o3@?;oMf z(F!BP-8BJ+gf?NHZ*Tm(z+h1D24l&a=gGh)HaUEK_K0h;*+mS~$%mddjy}~$QD9{z zw^k_WV@Y+mcB|u2bsS3}0b*)!udSGa^eF-B5Q!N5$jabRC8cMjVzUgEC}|~WBFn1d znFQ1X7KBebjMy&lzP1nJTL&TU%*?JvpaXpm?j{J4|}pM1Nraz&vI$ z&&yKfQR}sR5B7&A^V8tT1d%ty$&_H|V}cyy{z5n?*z%*Yt0whs+VaTx#>0c9U5M*^ zpzf?7dRmAxt5D4DB`GV1aI$k+7CR;dx@(^@ybi6bJ6^^>Q4O{gYS7^qju_^N{t0ld z*dox^v2`>nL9|(y+)27sk{4k&0XLxs1tWvd6qJ*`=52?oFIQ|=&R29-G>N)%`5L*) z1s(!tDypRi#SF?DAOAb#HTNk)w8A{7sYU)e*hCl?y#I+T>lvW;^qZl;C$dt0FI=@2 zRSgp%_gh2Icy~m}l9hhqrO;NV^5`Kp5&uT6cMn<5NNh;p@TqQ(#g}CK0;2A!(jck> za%Mq7(~FwAQw9Uz%-_5}p|3EWST*US462#@3g1L&k*XBEp)9*D#OYRqqiZnrn_Fyl zi}F<(!n5Sg465S&+~Ssopv#ZxPU$mlbWc%!W!8r#mR1dsrQC}ts~U7rR9Doixe2X` ztYh|^W=~e0gUz`m%3zY5K9>jcB7}Vv_TFVZBiFt`Af7QjBs>HdZF~2SI6<%gX{ejz z8|qoPD??3l1aeQ04~iR>`~9kAv5LAZe`=BgZ#dw5v~@s7H^XUieB+R!AB1Vj>=&3x zWc9dA7AUD3tEC?LqjIM&v5 zE4-tEZ~XVB0Y1zi?UDJ#1hdmoHP#TFltx68J>dTr05L$$zqs|kK>vRYs^3iiMy{X% ztgK@sA@Y`+#3oRtPSD@&IF^tOlAuLt3bv5HT>k3t0Xx`965gpK*T|EY!Dv zvOOr4VAKIn=5CyE|L{~IXeDW32W@TeK!i7$~$M(!e$ z0K1Dut{d4-7K}VNaxpaAStI*Ef7g%nlWMX>)z0Rsl42!j>`V0L;77lw+d;Rdl6OFl zqSQhDisL63{{+=Dq)+@EXlMP%O(VaBl{Ep&>ph?|(?Pv1BY(rZO%;xj%5kJi%D@Im<@;*!UYE+;-y~L;53*6l{6^K@E` zS|tilrAwN^lRE?1SZ7v@g{Mv}<>l}~R4n{fMQ1jEs>$E&lMV3m7NY4pJ!SM<_8&dv zTu=Gyo>U6dk-E~7KvOu7eYG(h7^L$rY(xI$#&CNeds@lQRPsBOyb<|e5GMqh94i_F zS=t$B%1&OtqOYm5v9yF9(C1AGPg5k2nEWu0N1ipvPmriDwgtrxa6HM)br_R^9powPW-C`-81oK(7yNy=@q>PcCRa+|;< zSv(Ed9ym~Ptna2li7e_&8kU5YEWEfaD=fq{nzEK8?%SBPUvm&^Nm+9L(0c!|e+jJqPkh z-$fe+oN~bV-Imgl1Ck{-tp`lz(Tu_P?aWe4d9F>#DcyOl`J%7qmGUu$r$K4R1{Mb} zlD06OX)KSHjwOAI$71^NYo|CXy9DsKB&(a$*(cSaUcw{NtSTyn1AU(nz(0KYlke6n z9Ia7DrOya&R%&Qqa#re`nv2`>g2B<{^?y5QaO4a>OG&;tZS&VO8`qt&Hz1te<;U+qvk=&quj7)`0aUpw*%tk0f{pCn! zA-zQ;WIDnbgc5{V2mype2yF;65H_+Jl2!eZNUFIcgfJcXu=rWY(EJ?437N%BJZSPN8d3n-YY3$<^w}u%FT(e#AU6=O@wUK2zl=RU%$!F^Zw7jsY>%WJnhr;r*woh zuxr`tY*;83ekN9_ylR8`4$b$p4()Ba2Xyc2iww^guQZjLUFJ`vzgrE~2lCtU|6FLX zm)W0n9Cqp4H+sswRsKL=R^XAqxu7d-kJLn0#9mK4Uo4cgl>Q|(F}=9r?#zuZJqHZY zu%rD%B@VLZo)So&hGz~^;?$~DN72rRK#PcuuB6{5j->Q?-B8^H(%HJ1Lv^GcdFdRY z@^sJ=v_uh6k4Vl1gk$Gq75NJ5@fgm(0XoC8u;LhjQTIPR==_!Q3xi-79QoHkI9jQs zrRhpKIP&2@aaAQ59Ql>(MaJns;{(Sxxuu&+6b9__JBrknUMD93uI8J zddg_b6HUm-g)W!fl6RT-Gy7%4LM=DAJ+}uhy$ZJU&pU=bKW(wrq;e6dAF9K@Je{Q7 z9XWdx?V&9t zi-&(t6IYeUwd2a;KOBA=6HTU${7StT&n;l3g|HqPXp&aC+7{)_Cul|l8VXLS!kDC zwUdjB86;$Z>f>W4{JVN=YZ;Zwb1r=`;rvs1&vma^m~>F4EX$D|E%m#U#MnvO-Ia%^m@Bw81hV!@cb z&{60t6x4>OH<~H0_L7Fk1TU>gA~&uGIh&<`(^0>dy7b6-bjVeuD!jBlhGXkRA_XA%4iXByQ1>@mQk_zd|mPy(m^{YtOY?1BXu|w<1nhWhtvi6_CkAM zrh+RO@ffWr{BFBO>2SVT)pah?F1&kg&Xzm&t#7%$rFHw{OiQ69Qs7UAD#`=Gqf>64 z`PkZN+ge(-O-YAK%R=e0NID%Yh&$J)^h|m za0DOJ|4RRs{zJWJ;JE#?S}@t|E*(c~InQ9wT_*GzXaAV5z0*M73#6u{jUSMDEWq+K z?ZMR8zKy86s_m7frQ;OjOkAA=KBl9$aD2qp)$nw-5!O~4sg=WbS{|{m8x7kmEbnfe zh1?B0BKU)DpDEO;rt04O+27|k>^MCH?7&JtU4Ob9dNn4$AQq3YDv69KP^%ec-N)F* zyO!>}pH_T+^{xwouIZbGS4U?oyM^|>L96M=<;9Jk4Bz?Un@{#VaxcbEhB3@l#!w?i zio{~=RF#06TQCS4%munU3?S#kA*g!`+LWjM?jUG~tqPE~7Dz->Rb6cbxPkC@FWqtf z@M~XOy?bWR*}PG;q_}z6t;1LRcKFxB^zvxa$8_b3zs>eN%tvtf@Zb$FF?Gxw z>cBEOeac#cm1C~~Fgj?r4MXAx7!GMTcAlpu>dTjR>ogjD)S6#j+gv?y`HtboOF}zZ zZAM+bt~OIXdCh|52RMvo(O%ZZ?4TL-a)7CNy-TV$t3U|2$B#gmL|dUT?4*0>ajJ${ zv+^Kh6TZ5o&xblVpi-w1^Pwbd!Gho{rW*Q+*E!yV1vYbM%N%U%T}F>#N-&0W1X=*v6C?|X5rPQ1u*n(hIx4p zh@~@ng!=HHqjL*1@96$~CLaVJ1}M=C8VN^25m<3y9Aaus)Z_NLeQrT*ioE})D+b=R!yyB zyFR2X7d7mDarntkh7aBG{_|f9e|X@Ig+2Re&4>4`Ne9Qz8onQpI0FW(#*7t`yX1V? z(dpRZIO!0HLw2zBFa#aDG*eH{oswbYQ$ zPM@QnC6me;@9wZgGnV|qLVH25Y67d`6TlJr44odd)m=QyI>#2~X`-&^MDdpoezm1% ztdB*b%v*jHd-wL@fX~mdtpv~bIG)i*SIF0D9Qqo&!!y3pAtQ3~$m}aDEYj3zrfDA4 zsO7--#QECq+2=b}YS&uUTJP6CV7kY$U%%hm7P;rE`jNn|((><8U%&cR3ACwNY>~l^P}`aB`by9Kaj# zK_Md>B$aE2lkRpt>156$LIx9e^8jTA-ypqNHWKxpT5Jn!t8JTYqHU0BWSfkKa*==> z=nV*+fjt2ha6Uu7#5y%nSeVmlNosRr4mhsA~ADD4j^}e)O7$*L5HM#Tz@9FaOcO;WJ~Ua=v#t3#)uC z;8{Uu942`qC;JUGI_`>BXQ*k=HRVm#H;2U6bhM~wY>`~qS@~M!$;vPCG^CO?=r)J1 zDtjz)IC7-yrLq&@6Vcz7{WbJa)G$q3G)Qk6NF*dO$W9F$Pt)|EPLNrkj=>%Y|acPI3J!TIHx$h={Vid4fu3)pMr!7(vv*h zQ-8YSG?y9`8dP1D^2PGZqB;}|2P45~P*kg;rdTWw#3dz`E}}j&azTB35zW(;snd(7 z-{|GkMyea#BrU!MKR~Rzqla{Zb#pK&NXq3EjXF5WGVEAM60S->vyecX&&oMH2Ctqe|^jH%@1=L9>8in4Cv-TM*p3hC^S+tX)?-Y zK{nH311->i;8H=SQd7~OHxkic6x9YJR+UG#YP9(pjaDm&8nppCct&b`hTadYLQi+g zMio`-v}(0hB^nIkGjJ^th?Xvs^*Wtdpu2@91ttvAFJuR;SC*HVc7k4=Gz(_6tf3mG z>04{4yH0^n9TpI>A4pvPsIN)oq*!UFr_NGi;hdO@uOj4Eo;91zplCf1Te^E_LD&+u z1gmHU2^9_>+B@_DTX*^1;RrqZ!{K}BvR>iZb2qUEhhX5RWD)Rv19qf?)F)5+u}H1$ zzDs8iwrITgLrqsR-~jZi=DZ7M-#D0wty{dlWjX~du$q; zv*;TUA9LMlMeK_)s*_h1?od2p0y>#;q^iMB#NN<2}W)$8$jnA#FG#-h3~ zD1_u*M1m&dB6+bz)ME`SA|V4J#lwONDoGFxg=EkIlb|_I*SU;hskBC_GZebbd2+#t zzi{XEk36(8vh$YhuPopA%JzlN-Ac{>S~>KJb;{(-w0YaMZi>xQt%w?1e)P+2i%(`B zyXmou2dMWDojTk$)VO6<=X(=VKfe3%e+RG%W{jM|UjGcpjL#p2>-xz7n|lHrvEg8c zj8jWhLa}Zlk&T_kJ;qF1-!t6QsDtG-@!y;>YaTyV%9a7f6*TyKP2u8IE_``vIIHY$g= zcc*7(ys8yhL21i#XqVj(inB|T2e#DR;U`a#NdjHN+j78 zSzN6u<+8ZV=xxw}uIqsLJfxAAK#L_(C08-IsDN$8w-P_6?(P_Ia)d?8Drs4a*FRbhzZ8t)=nl3%=t zlv%+MLn2tDkBj5U!qP<~WkK|fJf7tIkrNyId$4l9R-iP%a7E)+Of*rhWpP~Rm~T__ zWRzRdZ4Y+5^5~C$wd(O~&5U;rJilu023mflynflT-m3EIS*GQ@ejoF}-s1BRub+a-^i$Vi-^=D-OTY5| z+V^|&Thhno52X)%Uz-0Ved+zuSE4gFyO|#KkOA$fkg7YBilP38qH^EBh3DaJpbVaJV>g=Gp)kV-TMO zyB@0BV#@L4EghyZM{)~f{iK7yKqnphpYaxH)6eTPb@(exTHzNwqCMcwL2mX^)Hvv| zV`zrvbUMXEp=ZLQL;t)uG;Y!GoH@?S1p0?CeXFKp*3d^6)+8?f;1l|bH(TQVlqMQA zJJPp_7k_p4)(ch9s8|*(SwM{}GW0H|l_6|1d;kvNgY|zb+a#ywlli`FWUFsm<{sAr z@%^s-@sC`8jsGQO7)!2-Z^+zRaZhG%9oy_}DjToU(V}&FXsNJr7Uty#$QIIaH zXo^g)oJZT7^W%3ii$tW_Idp!cv$m_Yw{}mh)|GZuv=TwmxFWuUGbO5-;IsQ$GFv0} zM&8b70=05&YwcpTSm;!Bsyj8E>GkS0t~Ks1-`dEU_*IGP)i=6t^zF#>*8V#6PU_>x zza!3et=aF^1w)eGT^I~!B1C|TM^(}v5kf^{OEN-PC{b0VD=bRb?S-r?!7*hgWD>qs zYO9oFB2Rh;>KiI~abVJ9C6)70KVyN1=K0bd=9wcV{bNhY`SeIlRaP0ikr6~EVG!n( zc}7bm5otiAV7RZ#(Gs=I#^#h5U?4zjG$IZGm(3EJV-E15c~?#Cv-EWmBnznnG%9() z*(59jr-3`rOp_hm+|RUJDE$b&?MP0yLy3Vc@N7p9?~+XRDB>g+DIsX5PJ?K0!l8)Z z_8M+U04r!nRfZD|AJw?rPB&AlW05F}W?~6PEJIV8@(d08Vi}>5mS=>xyC_4`sFFUo-Cr{fxZrfq9E(tggH?*5m)k9%IR&9$ zacUba?z;Gi*HzZcuurB+#{^|L9it1h&+A8vIor?P5requh>`k z)oS-cSGL?bTf|-qg@x=rl^L`;LYX{K=S$HPO9?6eo#uOd51Ajb9x^{|)oXniJw(~7 z3$84@S?DW#K)BPjUwB5)83dEayi?&as7h%iOT-O4SHZtR&kdjmRI<_B%RaznKf6_wzmU;`bxMU!pt}fu9W{2Ow9140k zrbU-5R;+Kyj-Hv^TJd>LHyC|4$kPzC;hMr`JO4Y?ds@9FNKwy-kkFdbVt493{iVmQE=x;4U8VGg-5;_H6@ zgbyCv^3JC9r|-W0*BkuHhR-}R{N&-jL$v;jlr-z==3K{CL?nu@Jg1}Q`cgp#41?> z*0fc&s;q+}e>}w3lyzE|uf+-VRnXJ|VcHeu+Mo~)+LhI4S5~84nQl7-QG!s}EQ)9o?DlE{uQ(D>{r0am+dat%P|YoS7Rb^zapsfDh^T=l4H9yzcj#=YB{l zhX3o#{58=r!8O9F&4H3=-|%z48Giq{Hx_y5WO#5oX(RS-DBlMbpTdl1Xti7~S1tEk z;khsUsAGTnne@pj?ObPS4bDt0+kVq4s*+A9_4qt!_Ww2ijlH z?LR~#X<9S_uJwW3s$6S*+5VEGzchtYSGo2FAkmy~3+`F1Rb3E(g(@N`ppq1*ND8Q= zP&A@|Dxy?I6i`KWRh~RdwqT>h-;@$#KUlLOJ)t47?yHZ}>14p}ip)-n1?G4`rXsOa#fCl%m#rO6 zEcS#G!wvJ}_CRXFrs4fjyCg3by07siqQk#i*;)YYn}Rr{ zb)qgWn-cC$9!dTp`L^(z_D61 zRpQCSeo{n9QMyPj>M9b8cB<8QrME>xkHT=oMYA$uQXpwkfM!x4X$twg3M9QsrB{KZ zcURf}Sah&4_0%a_rQB))=>4q&P|N`ju*08&5Y$b9-q86J#wJiWa8GLCAOjG~SU7CS zZB1riP5+uN9=(IYb(el7m!m3<;_{|0FaK@n6b$(Cv7`RO(?T}~ZiIdBiGww<- z$ssvepjxi(tiDp)<>+!=S-jJ+)0uT-o%+($di@r?;HWNhwN`gk-y}XEo~#xP!j1Z4 z)xuOQ;Ntja$jYG>u2lHXfWm+90vQpSU!xMe*V|Hm)Iv zn1hyaG~kyknq*!nF@_OI0chvCxVn_;qoz_WTR`sMk~wn4&`i0j0K|+A%0&;($Njo4x14b+110_ZNnb-4Tn@t;_0zu~=pF zvf+gjYR~=ji-8~i?2>toqJl8rW9HoM0j$&}dPPnne0qw^Cyhi0M?OEuArh?|962Xj zdA3r){YnKyl{R#cZM@n>LkjFd%6bXGDFZtt08vQk7;-g8&?LPGB?u{mGB_vwiRcjO z5$d30>c^8vq-;DZ^W-s7pHei-SD{dT@`)1B6lODd>{UQ6nfycY7+f&ia(CC1JyVWP zIXOkNP1)sGwHjzruO#*f$Q^H^DFsmJ5iT5TaFhv=T>PNuil@T=LJkznQ5Vp?Y(3~eGM#3P3h@RZC`qS>o=wk+Ifwd`rt+a@ zMo9!(SQ91@g52-u^(%6O!7eo{Id{i5RDwP*y!e}vu<~2oIL*jFSh^CJ<0sF{tAh#` z4l1YM*Iwlt#Zp zPdRNpce^rYe1@Rs;vIt-WD?8DgIAlLGqF{KZYJy4)u#374OLfFAI&>r)LurYRctE5 zbXBuCY$@wy+vJ_>9(lm{l<7$3$fVyIe^+6&>ZxGDHnyr_JK0jPi|nU+Os`jH^^npD zQyKg^pRpM4tEF`Hx)$9{Pn27m9;Oemz49S;C@(wlRlH_! zoW6mVS3V{mB`yk9&)?!&WbxKb)y#2V9Ffw)%ltl3rInl znbJgv!=K0PP&>3mF?W1i9Lp9I^+hG(N?kW+b~UM?I=5V-Hfr2z&Jlv4l;52QhUyx| zyX#=)b*rEUgG2cJ?szafuCmrW4z{n5$(Rqpxq)Dl?x$WEh>ubk2~ykwOSQtDsA0uqA8tD ziM=Y?rJ|}qHdZ#s##STiHPS5hp9a~Na!JapaDTHxx6BGPGl#&KID(BbCI$a|yBeFG zrF9T<7Emq;F~KD%}I9;-ayr*GNlcviKUaoj`ofu-*3sQb`%=x^8)!b_+G}zLsE?_ znQFMRT)(LXzH%p#pmeNX;-zB;knlyIaIW^89hHt+efVx7wqyRUA)XTgHd- z{B9{24)_CbNR?r(`isYcZ&-wxpvAIV#Rl2WHLM`#q^twH`DJs-cOry7PElg?Fxp<6e2y4yO@9x3%$X_+1p>i6!EBK8jgvujaF<*`D=47- zl5CXB0kef$zsePc0&c>&Je2IuJ9q}W$f;ofbaa6OtT_0N-q#zY*f4b{JEo%_XyCjb z+^8EXSlJ+TrzE(U&|DEp|kF2YkD}5hQ^+6YKG?+{e+&Py{UQ7;G?anp=0TJ;liQM z#C7M^Z}JyMqm_XkVg3AsHy-`!_o7lf*Y|Zp-&fng=SJQi`75-C86-}hmz(>n)VhOG zCbv}WVASemG|ozGV{KR3?uM7k2-Da?AuC{(7YqSr?jT-Gge5eb&*2)jS}l~ZkTsNV zwT7@leduFUfK;-l zhG+Xn6Sq=2f1KoOhgR#yO2( zI*%$}Z4(>M{q%fEc(SR)rh~1G>z!9Z?=%oMIUtwcW!hEp_}`V9c!Z!z*!`XVfss?UFO4 zZuoK|^NnX<1$A=|Kv&EbGZ`$UaGG$G(tHhag(~EvJYy+!GDKEwK`F6|h(oi=s4a^7A6`dJdK8mbRepR&=*q*S43BzS zZB?plonx(gt!HDRtLzSsW`pDD$dSbF-M{y|6HzG_Nx+Ls~$gVDz$~CQwg8;TmQby&nY#G~KcC74pnNU_j z6;X;RtdJ_A6b+e8%4!?6t|HM-t9X7W9SSZ+@N$?0}Q zcF*k~a5t+OZd1u^9e5%|c8?>Ph!xqfSrG9ciaU!lH0p+~JBsPBYfhWJL6&?VKjCp= z$QKv~5c&z_L>r4Bw~-8I7QP%|lld+QJ-<6}-cvlT><>Wh;ciy+$DY{C%Ase#pYz=? zhthu>`tO~;|5bTUL)Asz6?aX&es-pnT|K<6*AMCD83~? z@qT*bYI-$W<6je4AG$KyN4Es-F2%PMqEE-3Eggvzr~}v2n(;5vCy`4UPhhfPQnY~qjyA?NQiKp^i)CAoCU`BNt{}1 zT)<964o@n6l(`ojJf-D6e7UYPoXXI0AEL5QNrr?Y=|Dw>lJgFHumNEY1J)nT>hTpK z#Ss$n_nH3ungo~6L61SGkv(%zs!2;`kY;7?iX_42lgVI^s^JzM|39IkhRf@45P$_y z@o~_oR^Y=5RhfX%CwXEssuZMDghu+ew_e@x(Bst6*|+-Kc$-J}!i&4F!|vr(*wr6i z|6SCqe{|iZ!PxNCH?|qr?R4L@n|I^IFG_kx-V;@z+GE)~*=fC_1fT9QGd(_JCniXd zD%nC?n9fo=NGHq3tH)Nm1h=@rvB0^&wZN@b8C51ye5_Vnt6yteYg%va@^$&UQeEk7 z+8gy-j9X0Co3|wQiTg5=)tE6>8mqh+Z>6`2Z-14F0bjshR8)#>=LxJ{Ogq!Qv>y)A zmE)_X8mAV|*3UK0ljau9O~Q}C&)gY*mAiVjW43d)tG(jl%*B-#S6y5^e~d}c>x*o9 zcTrd$s2x|7uI;h**tSLP*4&-CC%rFq4DU$&B6+O#Ol|%J+OckYM9}>teGNPJo9XE8 znQW}Ow;X=ttNm`D?}!)sX>z6W-h4QP*BMOt27@VSC^m^Posy`-bO>)JCd!3yg72zP z*%zvWI|1L!rD0i0S&mxR34DCS@}%X2MSugqmZ$yueMyOX9HWDOcNsle_V=<8kW_d- z$z`vhAds>^SsH{^EPIwth7ZqVWiyuhICUhu!RC6-a!28wp`MzQ;*wv_w-pq38y;~r z)0ERM&s#hRw|#a{sT=!r$}U}XB(1R}V)_zYh7_4OF|{Gmq*2T(HDrk1P?C&GAgyLo zQE}7?@~Ta#InL)yp9(F_?Y{#1f%_d?PhG5EW?U{UPI5gJPRL1o&OniV4SI*UMogP) zG6^w3qs~4#L#gn1)Y`HC;uTwy6Fz!w zd-LC)9ari9nak;gJA$k2;HpixjH!(e|M<2UC!bigVXWO1%mXJ*ZrStw%@riPy9V&Ovz5|z_6 zF-bLJ_=VY#+A&|9Jud=^29s^o#qb!ztm;Qjst#aQ%jk87NgCpAajG(nu?_?xN-EDT z^i(F)wdxt_4Q4SKj>ap(74fEUQ+#h+Qxva>v)1%l{nh4s<45EFimB^NIlCi_O3q-Y zSg||8@aqVMp%Q{mv1r1mD~4|HkEi&oA@hNvI4Bt|{TFc!LDK28vY|$cS6u?yG(Hmu zwL-S!=fl*f7`wG>62+O?GGrx3@E@1D8LMatk%so zy4pr<3h%OEZxS|iAHTEBnL7K8-of<@1xVay6uY_jfgPN$n8>qmUvs85N8{0mDS%B1OUzJ&9g4i56fGqQJt7Xa8Sb>_TXDA3X28sz&-MwW)twB zM)2kKA`a>3*rHgzWe__AP;2l(9*-{V7WG!9hxZva3G6j6uMHi??-)&SPcx?uwnlv1 zQK!kvORH-%R+Gb4V?c0oeA4PFk@oV`gH-+ynQC2CYPzmnY|}lWQ^(XvZHYc%NZ1mt zB6o2jUS6%PaaE?Ls2erS`l;^O>NZWAwmq-S(B^7O&n~}2y+pH0zrwY`y)v_2T(4fQ zS)X^M{%XV3t}ES}JXZ$RrEU;!()M|_rnaWHmEWqlM}M2`Hpe}#yWO`Z?nvF9-lu&` z_n7`M*FN{7p2xh8qy{tt+NbjdT~DQdnf_AyrT(1v%fR#%sio-^<=b@PSobR5YX9Y> z;!@30?FyaHtefGVnrKdm?e2N03)4cYrd2y%52t##?dkQN!c?)R$X~9h(VsWsdWm&h ztvjvrhnRKdLt-)&-FUg*CIf~^rYdn#FXixBkKzylFqZ_wNy%&y5z5Q|( z_h*6|l;YMD!&|`V^4{{DN_;Nc>3cxIuR5K#)7uK{Y&m$|;?4NJD285ivQ$31ChtR1)cHe_{9y!?_NQ!?}!h zG{e~`zPI^5J?;KyjYcxnX>q_pz^%9)qd%-ZuApE6aszID0`F7Q`*;*cBl0_}`g)}U z7YW;G0UVIj8n`Coj|DoqgUHz>wPCY6cjWeyluD|YanVh)scNDxKRJA30{Z8xk>Sfq z4f##u=;w~Au_aXhULpV)+U9iHikK7`TUkm)%1XS2vGJ-I(O6~p`mdf57N2`ST=oMy z-tLX2L*X9`Y1o#Y?^VQXMynPk+jLQ8^N^o?yfJNuQW@pc9U`OZQH8qy_9+$iIuts& z4@Q~v_Z{DNwx-1r`_=Xhu?_L<_HA*sQ{{x~d%8eVkO-t()2jccu{VK_qq@??>sIwz zy;bjfceQ#~x4N}-OKMqmmv>vXu)vmWVT2WMLLg);o3IAQO28P04Ax`^GLf@yFTogN z>@Wl+aR^x)JQ>1}ydm)N_Yeq}c=8B;CLsBJ=T^&*`DK3Z$?m$hs;jDNIrpsJIoD{! zofiC}Aq6jP#$3o}3fW3$=PIUkUd*%|$#t(56}7 zl>m>oV%x*C$@)LkJV^_Wqps#V^ICo+KbfD(Pv_0~-G10fcLKh6Z^dNex3DI}zRq%s247 z<^;vD0#O_-Lga0-H(=CJgX@0r0Xx^yNLWxZ2RR#|0sL1}89;1=y16iUn+9`f=`{Ow zB(md@!TA%h+$Ci=9HA%7e{5NG$d?mWB&r)0vnZ1XDKPiuM-Tc)eLiD)K+qH~l zJ!BOptPEFbGUotXG@_`BNU_V{|sO!CA{H8IETUW=Cg+M z{HjglyIOnxEM^k8AJse-OKWvmuBlBP9c}_MAddQ@=1SILvyoqeMnqPyqe}ld&F1ll zqdfuI1?#`%3K}}EX?_|&J$&DuhMBi!c#iN?R4Wbv1T9&5D!SwRoJCZhVDe z5l?9e+6n1odqXV5w}$v0AtjqI526xG6&Mh3@NtCb@nTD-s&h2Bo+_OJ^57KTRGOWA zwp2Q)Jar0jbD$4)+F~E=5j`7wnMX}FrkY>39+M?c$+Jh;({qP#m+h{)DeegjD9x#+ z!5Uj{TyI*bu54RBpfwM~Ewa^UAtgEKYPsDuor9%+!lPF19b%wO(d~4VV6|i++a7@2Qg}& z(TcXlhS2uV&X80M?FDA@Ltt8T$DM=PporU^q~ynrI-c8cX_e+S*ebIftYac8^f>JH zS`82WExZBKXwM@I70Thcz%L?#m`!G4>qm^de&;CiGisDy!FQo|qD2&NeItOoK5pLvbMx$Sp0o9r@nm z$zkA^FkWG~ZavcUkT3?mXE%7~(JtI6DDruPRjE1VZ1kzPcb#vdd0w;G8X*;F&GVp4I0oz&o5a)1S9OfDvNm;3`b?sbDn!8@Q!ElGP-|$uGTNd+bGaIn@ z@{Xn6gl|PCh%B{eKv0l^xo{<2N#f)a;OX}pCF9F>%-#^n*zF3i36qX#hhYkf!w#g3 zD-PAsiSDf9w8QMc=zn;y?wH8_V4WVP!oVQQkOt2j;P(7VcQ@U^2G7#xL!%H@C7U^? zN(q%keQV?^n_X%s*6w89j-K~7w*BKj?x@!b;_D~=C|D&q~tDPbygH2Nd;wEDd8??!7MD$Z^WjmEa8 zCqfgk8$!EdANAbropw)$z6z)M{pkl$H}}WpABSJEoQeHGeT{`o;*e*H=dR>k>Qwq% z+U!=@KcP^eicb;@1J$yK0e6BfCQ?%=QAjB%mBW}OQ`7H=N}o$PQtydB2Y2g_16ivX zxAzQu$kb^4o(68xmil?p&NkQ&+C_U+;rN9K6#tzT4hyp=3TmXiV{~p!(?1y7&WY{h z#I|kQwr$(ClM~yvePY{oGP&>Peb)b-wPwwHn6tJ}2qOZ503V(7v*0tws>C&564`6%c#1`_ghPr zrv5l4+@iy2*~{xjYd_7xr=PZ&rrqz@RcG99$4S9x-v-n5v~@Z#Uil2YGT!KH! zwpI&kwo#7Mpi1*`0?5xGn$s8{FS_|L{_%rVg+fP|OB~ncquj>Q+@5v%mI+f!`SniA z7j2KUs1`}aJ=l&?W8&Q3vyWWwW`D>KUpU=Xv7^J$SqB2TU&J7(73CBK`s5rAE(%#8 zop3Vy*C@;U7BYx4ktJ&wZHJ2*Kit0(B3aApCWPEFSg$k8$H+&EKA~uUMRaRv- z<>_R!UNVUR`bul8R?l(@K*n z4rSC}i@Z(UU(7KYEXh39BL1c&D(?qNcP6h5AvH~S6&a?L#}Wkf9G-px23w;*6`aj${XiRU9E+x4kuc3Uo@YH@u4yqE{r%lN{-Iu0NH#o3U ztR@7|7>lNyDQArEBBnuY;Ke2QpZa0AwXqrWHyN-r{Az+H(sXk-e%(^R_I0PG);F zKF>hUFm8GfB|ae_DoKSej;)Y3CUDqk6L#YX2sV;ehlly$^mzMacM7YQfQSU#(X~~< z-z?PnOKXV9&Di;T+&Z*o(`Bh*$oA_*)avnu^(&rnoHh5e|16EO>zwQ&9W^l;_S$=| z^cyOoBOvB8@x0LiZ>K}fTID`m;;bT+wnxdE^ zQsN)q+%S_(lkosPNI)-XQN?3|y>jwLmoIZ45Ds4NG?Bpk2@_yIL{Cs(5^83R0|o?j zL2Ol7z?u%eDf{mvJBSOh0!e%))E6nE2X&R}HvKc?TPyh<$>*cOILUf?bID z5AO%QYh+NdiWK1lfqHa=CY3WTmg z_06Hf1IYbB*XCmwDu!|k2GIaJXyP6WcX#<2B;tetD)RnC!>qPeHH!r$wa%5#cfBr) z1*>TEg-5|-QEMC`-x>>mw}vI3PqCEmd%8$Hs2-#5NiUWwwn;04-!$&r$VZS9#8+2- zkMBohTE~-dP9`Il1xCg82K{W9XHC^es+z3Ctl^n|RP&Br0I&UTxVPvud#LeRxQw`| zztujvUOGRSS54WiL;5_|sILlAa&SPk;0J^oM|O}ofPLD5>FfyBKRrJgzm7k^dU)UH zk{-e3!fO1FhKO`5a^{xZemz9dMKTls-p|nZ3pDOR)n@y5`3C}0++@tK3eRqsNB%Wu zH*=pRq(FoHy(vDS-|HfsV$1OfwhJ(adQ^Dl#6c0NjFc6j=qz!ZH7TxYFx22gD@$!_ zPWu}$kBKDfg@)*WKYZt7gDBL&cwUIxV0>9O+ER~Y)!19cA79f$ z%Z!D!8zLzzl}%W1o((L~i1}dNMe@gt2Qd`6Qmp`gbdb0?xKq$u<2Ts#I>YbccaDUw zT%RW@y6=%XPrTgqnQj}Ue$^+NPHC-&dw8#%@k}r9FLc8mpY(L!rz!ucbf3$C*-#dU z0H?@XLjm!|MiJNMz==w@=*B}h8&%QW8P|-oA$21(B!kI7(@$_%#yb5IPcunq7k$E8 zkc%!-O@>vFvlw6?w=CgUH>spKZx@wx>l^`MEp!P4@rH5nL;E-IV94jSU!!DliV-pV zCi+T|=+c;3V+zE1q2@{HW*`jJs`nFaA`Q25qb>XOJ_C=Ir45Ch_yKwPsV2AgrTG4% zg$PS28^v4uJ!E86aVmdTf}X|SFNQr~|E$%qGDnZSnou(?&rs#M;$78JlHQbGUam_T zOzv@I)OvMB<4C8)N}|!E%S;mE3VL*KCc@UwLqu&)KNXBv3wXo>0r5`Ly5Hc#Z0+xz zi>i5^vMDl&=8tmGw%pmLMjD5y;bo4jOM~-keEUB6+0%JJk)l z?((Fao4MhFz)WESy@ku6_I;ADu7a!OIuara%8S}=I$%Elxj(lvH!EwwtIpbS`Otja zlD&==UG{}re045u!{{@`u(%!t1z66?jDp%fv0Z&*^XTEN0-XhyL;V`c6(@Ew$pi61 z++>Hpn!&jG-VkeCd}06e8rqyvqGzSAI>rC)VFEzwPE8VD$2W3=7FtwWgO-77AgFY0 z3E_;HA!z)ILy(B)h;y1c!?iT{QrH_qEg#~2N&1=EbRt$#JA<2xSaraPm@7C#FE*k_ z2$iUkDXJ)iR+=S!0KFk{!g%H8xkzTT10)4jU+ru$Aq?s2g6l8)0)=ykShvfX_=WQ_ z7OJ@g^19ITQSV=o<5E%755jGMjyyqpVVGL!G=_LUW`;_@MGa0hJuIli;MM-qfc%{z z?%U+6NReaZDZzI0T@Po?V;1Xm0xrjW77iwphLvV^cpnc`&7m%~%JxM7M^I8=*??6% z-fyxg=@HP2A`FlhX2R}|vZJojv5ZkhtoT91a71!=;q)7^f!klU_9~4v_U}0y@C)1$ zs=Do$9T%hoLkHk+avQ-RplfXzz<2c+T)lx&7d)hNKlS+abaeZ zrWBRH8pAfhPoZUgNo;RMXKhIk?0_I3Neq78F+Azsw2?F&<~#ALa4`*W5#R?HgAf8G zEq~S-Y8ez`hXM(}Mt;J|%BogLGSEy}k1E$+woR={)1zqyL1CVs4OG-dF)QoW)nOuw zaD*84lN3u+Y9eZ&8Wz6N&^NnFdf`WG7Dfg=Y70_ovMOCK4IWayydVweMQJN_v5{I) z>->SD3Y?JFL{}~ENNdd%cV&r>ao>$i7(Ida^F?y$Xl)Qrq=GVQgp0ES1-S*N?Pt@o z%6}1$Ba4mCwvi~+B#~>y3eyteYRIN`lgKNPYT$_}(Bs$$5TufWNp?Bzm!cyZKFg%# zc0Oqi$4km86C`hwV13H{7>`&wG^$`^d5s*}=d5|%$lTs;nLet{Y*~?jJ;Gb7D`2Vr zib&?Zw^iZoi(P z#@n9X#DX$TRj-k9Az4U`e79{qyw^;*>^44{H@5X|HMMMJY%sdc(7@O8n-#sJ=YQL- zJ@Oa|q*=wd4xI&tQFEElLKx47sS0Id1G1T8b*sh8K7OQbic9RA%EuO;AV&10Rjxed zRD(Jk8Jt7m6|Eo=IzGw!4R0-c?0i)(Ac|*r(pk`Uas%hn4F^UA!}$6I*RcF0GrFhP zOGdYd@22g1HB#{m|Tu(R&NaN(39wJcWVQJK1La&4FMcKcoV?V z)fA#{{j2Jp#D`vYK>Rf6s&jIvW>UC%a;Sde&goW~W?QwaC${({hoYN(=vivlX4-y+ zjw8(lc@iwU1))Qrw6Ld!q^WJz=22fTg`60k5owDa_vdJL_ai>$PkB9TSIDOEOO*pVa>MMDRsHm~195i|9|act@sBdFBtc3=ahE73DJLlx zj1rC#&cOkkD23i1GjmrA4$`-7sB?rElivjM)cO7j&}~v1K+ix%Jy*Pip)=~ibV!VZ zvzSvuQM(w7nuJGe;m0YZQJf(Zsi_|rHVGr`k|}qr9~4G+291ar0ge2Otgv-W$=T9a zQ?JxmTd(oO=+YuxGHr^Px_c|_#uRM@4d)pIzJ^O5y?c#x2NEJ5%4GK#*Et<~Dao01 z3*GGBvYI=l6GnIJhlI_03#qD>SVxAbstM^)s#n|8G|N?@5tA?m`onH@kk$rN42}Y$ zd3eGmv=MrLTMf$LJ$rS5e-rmIN@74<^o!Y*dBGIDBuqOxZE~=2mNht)Z_V|4dzZ*ZYiWl{5q_2dJPTy5 zO13<=qZP&ntpS;XFL8l7 z>^TAZz`Z+g43IJ;rk_scPqszLPemCCMWbONWo>RUZ#}`E(AdFx;Vey;FH!D_!2%C* zE*_DSa$%so-Y77~Yv{wTcP+FggUugZnAC{oh}_ijGrWvC;GQmOO|?JpbZjEJ7zt>U z`$LPOl)C1z;201Qupnl@NEfDEw8hao5qlpqI|kS9Z!1UKx54j$B&@NB1V*Xmav-y^~n9kP$KfCA-JGE};WkUq_!R zJzAWCMQ|{(OX@mTG-kdSmm;jNJN%$@nRP=@(ljE|^x9InaUdGaEP8%klQ)>{6by=r zk#RfWDs*>#TQt`s%^U+f_s+o!n5N27nR{iGsr;!@KaDHWqdW@NxdgTHdsBk>IikMT z*)^imWqhI3Nd$JQg)gmmM85<3T`MTP|BsMt`>LvIQjFiHd2#%_)9Lwi%?Nz56s1au z#LstGsieZ7!f2)BeRzMw@681BMXb8!&_atj8<8myGD3p1y1pn~mni@yk(io3mrrJ3 zBuZt8Fx36eGo=Y_U#1BDj&vgj4P)A5`O8(uilPc9hZm}Em`Fs^iq8+YEZGsbhYv0;&luwZaLs3?9uid!y;eSm&V}*d86h6vL+x~ecZ;%zThU8g0(AR)KS0L#qC~IztF|#o@?gK zZqGs?qt1KbrRA~lnDCNt{^vawS8^e`Eh6#LqM>fQNwa&)xIE%AB174635*ahVznl! zUNT9#f}qMuUsH>Y+*c*aa?>(_xAu$g>)Xh1_Lp43t&u}<>!fjgo3@VVAg(J(miU5* zIm%oby>X&v;tGn((0=%V$lNRxmy#zzIp3I6{DBwF%!vxh()C-pgoq)zo5)!Q zNi|U_p&V?=%||0LvP&tK$p7^z=&{e*e9$|qgs4Y01NFJqug|4ba8Vp4{4oMG*OYVI zaVU((F6jt85(xsv1D)~4*TS07E{gHP5ukz0D;2mQnY9}qOEkI12>xjA=m50@ zt{0mzR{U+{RjZl+YBaiwyu#-Oif&vC7{5^~zi-p}07jj;j1Zdb4N%+{riLz-61cr# z)X+PS1KteLO4N|L&Qzr9Z@);cu3n=i>R$kA9f?v2O9#73Q;yy7p72cP{d!2X#=*Et zazEiX*Z*-nki7dkUeUOHG7q@n^wVbC>8SB z)rae{^l@7Eg)m{X(X4l7V7@weR>sKCH!M2*fCo%!T=jz(fu|mG(r!oo*$;i-*<{ffV7lA&07y1`j8#&`E()F*uHa4i{536}Jng z#g-#4uN0EPM<$nv5ZB@E>5kP6c7^ZWBllk|cFLkTLd_j#2M=fi~3!t)eTy2W9u3PG7ycc-C`T4>6pQ zUNs7;+ZDlGvvbb{G#U|48cj}*3fGFQ#EnA%sd&onNNED|*j0hO8tqj!+DRn9cWS5oepiqGwJeRmQB zlw0=LyZR0xu4iZYj%7dAySuC0z-#Vq17dgrX59mQJA6L>E-&};YXPT3-moV)6}J(3yv|d)YO&wgKOc4j?DjeNpzvu0O*EpWbR?!zyOtF zE#bk;$L+La@B~V-&Q{G51sbg{OS{V*x+~3Um3DoSG;prd12thM;0~ivHVH$@xxz|; z`%eVKe>SoeUp0iHE9jIDZxdJhjf7X!5ntSo$Fe=P$woS_!pJ*dS1 zvgSxkWU7G4B`l69=aYSMV{v>x96<;6<71n9H$X=bA1(hfxV&Ei$X_5@7kwQluFaIG ztq?-17%Ps{x>A5!hbH!h>&8|8!ve@`h>iKu%nRQEJKHg&00O=PknJNPt-)Cnq4lk; zHyPOG(CCXBr_L7B`>LRmz<>PxKjh5A@=IdV-8?E#GYq`3Lwv8P^)ywU~;B zuw9SQIJ?!(_mLT!h}FkO+LNkCx~se6P1k>Y7TTQ)fm^1xYt>XMIE>?`pUPLZt6=l2e~-M@>wv|SG|GqTz zG|r$-$FeT+yRrec{ge-9i$yYf*w@q>WT}npe8Jf;)L*{%YU~YV8E!>7^t|+ZZ7h7T^GQ9E1?R>n2?o!IBDh%@U3inHh@w9Q`Fe%VK z%6+XLPBRS}a0hcD5e*pNKGuSK;@3-$s_uT6dWH3o00ai)?eMq=QRD7^L%mUI7B}HgBX-h%;$F}>{Zy5@w=A9Tz_WeBE zr&?#-mzQ?SCBKE`CM%zK)$@2*$xz$QzM~@l6U`;i8civ@P*dMpoBN%rdsbWVkhYnr zCjZS=(-qO9{e6Enk+m}%s|D3%q1Iy{DJ(a!D8^I-^h=s-0x;${u%zt55Dk`W0X;L% zLB4|Q?w^hF;%Kn$Oah3`#)RN$QA7n=NX@I-mOfOKx16WQ**^J#^02-UifL2PaA-3- zgm<;QgIVA*%iqKoPsHHjcJe|40QF(t5W)JY9wi?ami~r-Qoy#{nBPrH2d`a|%WA4% z%`eY&vFh-xJ~+%|O;_#yfkY+=7xv$>gas)^mNd|+`+YN079=O%SbPy+E*U9AA&>(_ z`fGN7^%yz@f@d=`yunDPLA3yO1IFkzHC$V3?QuvMx*q^N4K9Jj2Q(%x-pC2^={tI< zZyXzlfl_Th$p{FYiNfc}oO_BoQM^Yei-04*Zdw@vxVxyY`aY-(SW*%jO0w1u6+xvq zO6&blyW%jpYXs%hmHPW_p(_ObhJo5c*hI>KBj5`F1upYB_u`pdoe!N$w~V&1+1_rT zwDLH4PX=JCa(68KQrw6p=i$@O^6V`Ex8N)AC_~GE*U8E$DHI$8^#iQecPOxX6TI%& z9qW?aa#+gAwf)m$yE-Yl^_X9&D*5c|^ZkkEWqC&B>isVG?Y=*MbZqNw@YNFP^RhNV zs^^sTJ@ZO%+L71PIg7Ba(Ws&|c>A>Jz-g|B0@urM01I8A^x%Mdq0GL4dp4!p8Vj7= z!nQ438r-j3EHGwhmq7@E!)NX`1@>5&;QSPm9#<#Osj{P`rk5vUr#8rlFb6Q;y$%hC z4QSW4Q?1ury45T!1F?C$?GXl1r%9RtQZUU#IS~nF=bUBJl_lz7PeTmc`h2FGeL zps8d-tD(B3(mm`g{mL;hb(J~MqSO?|_gq_uFH@do+oK$LTFOZXPol5d*tb9P*SymB) zV1>GC9^iQv92!*~4hZz1fGSTZ*XDOvtOnw)hrVM`oN!PyzDCXf9HCO_+%S+(b$HP> zZMC^kRbK%diECLi-fo4O91uk_4_a8Xn(L>Ag_T&(?KAHL!Jv2K6ov#$GmYU#+SrQ~ z%`p(fE}bME5i?5Iwgk@V^-!SDZ{hU8R9zyT^VcA(C#P z*+qp>!SH#3wdZ_oJ(t6CTc@Ye@T+*j6G^FdV!27M-QW-7!3O8|RQ9QV6?)h4Af2l= zNyU@a|u)Di`a;$o|*< zR=$cJ#SbRs+Lx`gp4z*4&x~6$FD9ImSn{68ak)Y4+x2;c1geA-#YDm)Ey>dc-HZ)m zn9)#U#78_`5|vQmQlR8Ac{dB6QU+-4&`%E4lDDxhLn9xuZ{K0%(CkRPJ)NC?*+m&Z zVl(*k();22D$^{-VYuZlP-OjbB|ESt0qngbU1~&&aAtH&}q>;5jMOM`|ge{Qn zrV6DCIUI$^4IH^qfZiW0tieg8tvu;dq;x5ufSrQAE8CqBM@hOX_H&=ThJ^5NCs-7+Az&( z9wySRBb;E=q=I-|y<pP&qI>#l@w@MBMmX1kt`a_EhQ4^x`V~k4@XA5PUUl1Ie{Owbt0E z4NJXcd{1f>!|>YFE*A2I#Nwq zJjp5Ov54z9Fr@4dd9!Q!<3bSkdDMmXF#IoJ!{*IS!af&b9sn^dG*sbJsP7_w z{R|Q9>YS^vA#rUU!GoI~*sdCUX;4W@R91HB1ty}vS&ymb=@f_#iNWysvjfvg* zlUpSw&B7?Jk~rH*AM-1o!$izND-j1p`vTK?(~6OUGxljEYu>+yGuXV2Ig?qFUzTsD zlQk1`7&>A-aqi_-_e>M}hdt+i6?sPCBEX9&zEO^(Ga_(K*wpOhvN16iu+j`1Uc;Mn zi)iR6cJB3bGVJIzx<@=>y>j2eAIKP3WXI&m;geY(tghS2-4tKpj#n0*pF7FB5E{^F zv>|up>H8A?K*@gCt8frn~_cw_pYAhw1rAKl{9S~0fX zb<&gmjSZG)>Vhtxax0664;~3&m5f0K&nmY z`c+{WN+q5*HqPkI(wFEd7+BF}XVj0KuQyZb7htUvZ{u>n zC75bgZ+gS6gd4Gv<1U(xZdS~2(HJ#wnXPHB_Kej2hrsMI+4I|wk40X>So;;|o-b5%!a^HBQ!eEj^16}s#yAPF8u z*$QT+7SmoNqi*v&?MYU&q;R3mbB!l4=d@O_T%n%EKl@jbf|bR=Lp-a8ATA0Gq<(UQq}{dyeFe+xnlQ zFAtt#zc1#_S+z6oz;VizF78$TY}{49Zo4>bJojzw;xXKVe41&x;zg>C#TB7Gc{klS&M>QOB%Z zYycWQd#2A#bm-8Ic0!v`IO*&U19BWY{Y_diq38hOk-|piCIpa}<6;gC&zsJ%dNs?{ z9_{yBfBgm5Uk<_w@e^*nI+$ncGIAp`g0;Z4|HO=E%@ApcLQDRrXKiE|C|sPk)F0(` zy_uZRVZV1Yn?i&ZG`f{9qJ}UehU)MR9*4XC_;J|KcA^Jq2H5N;st;bSG5`gpZZFeL z6XElaKeU>Kt(EDza2;mPh_~zu{kNh2{x15!Q92tZ0)N61*oe!!%Sf)^jnIprdlK4X zXg^~3Tt7PZAhECMxka&zQa1hSxzn6K8DI|?&iQ8d$qXpg(C2uWh-Hn^erWt+pvN>= zApJhpZ9Rolb@Uhryr*ebY1fRW^j7%a7Qc)|Y2cjW&@meNTXO87G(f2Ps;vB9CTUqX zR(||mP+FV|rs(+DzN{m}k9s1KJlE>F@GGry8zj-Km>Zc^C#R*JP~{|7ULF?0#+L8A z585k;)W5oUzv(KEx*3~`)LQq(b1qdQuBAb%brL;U#YGUiquEar1ynAqr)6I13FK79qCj_jc#NlWG?G+)qxL$vJB5|HhFu=y`AmswYZ2 zhe+MlBbVc248S8Y4G=>28f67!7ggS*g++-J>(J+zs&om)t{E&Vq7g@0DgJEU?(DKv z`bcO>l7UHqWYCjQmWbqmCC~*!P!T4&MY&O@WQ|kE4jF+O#T?l+`R+A_#>5~Mz&*cv za;3S+!>8Hz@S+{u9@~7+Q?rw0;0wfL1@b@uvsWys=lp?Umc84x+mdT|BmrM!9%HVc zH@LOIpI~evh@UE@ac^GCs{JEpy~Q@g&%Q9vn zH#g~b>I)EmA4GJs9i!JQVgF-3sS_S1V;zqZM*9rR?SB@K0~UvU0U(AIfj4j|Bgd{2 zZ4XFMD#%wrpUQa@F(~IrUNIOiD~-V}ktIM&VJ|%Y)$vYFXW%U63};WW{)Cu4V2jyFw%<*AOsWoG`0d}4Z& zm3h8Hhe>yl%!I$tX7)sVwv5t8w{*@>r4r#b?SQ0P zpejyYRcyGELB&!NYGl=GDM-#WS_%3i_SU+%hxEp)yBG47M^J&{7bWZ?p>+Hw;F7SwnS{ zN|0*6l1NpP$FyzaEjHe>kkNV@q1)y);xTfUt{`akIS$21iU-Yu?PGJ4Xl}JxH&`cC z7rqPgG_9^+jah$^)S{Ed`P90gtY!3efBoLQOS<%N`SPf$B-oHyb-gN0)4qkxyi@tJ z);{-LFVneKA<=U5dE5$i#kII ztW1Hq?PZ-sTt|0H3WGoTJEP`XUd+f^v-r=JV#+tyGj>4t5|* zzy;IWlwZK%0({UM1fTZvq8Pv4rUdIFeo^V^(D6hqXZUx+7y@cQW$DdW*0<=(%hyBU znIQ)G;2Q(23U|&!>84TUfS7H81mWcn7d z`4Y;BwRaTDnmAgtJ8ltkyzR?WPK1QNZiFk_c2Qz_Rz@rZ%&AAL-WR3t`) z8kiM%Jl-Z^!}QX$vm&57p>DO>Ev4%dSepZzxuT(Bs&W_#Jut*Mr8u7Se%;`Tc110t ze5K4I&S{D#FJmN&f?LTI?_F?neo+HmN+sSG=3ey~lE-X0tqEOXz}>2)euYb~W3YwY zUgXy6BG<;~gzts#ZTd95p+wTU?ICLCE90Ox@;7=s+6H(SWXptws!&NjfP&vuKz^Y- zh!SN2vX6ulx!Q;=-cQ|kBoG9f%f~0SB)A?EwaLBeNUCm!3Jp3h#HxG2;8FjK&x}mp zS7iFlh&{4RhC@(ammG!mNDhvU-0)+}4L-J3^>-N3MKXBeeJB81pD4QyQq`8@w3N((Hm|=_qE()E z$`h{ilQeQ)Mr>(+m3FbGEN?75y`RkM(^|3V+~&HZ+C`C`5O;=e%_But^6`2>G|`s&L=emzAhnB)20 z$o-qyRl!PN(hyU*?V+y4`CLm}eQ6&zjUl}8`n>b|b3RvtqmY{{)Ph|w)K5{l&osux z4Ydw0+fYEXzl!k}2y(Bn$*VvtMI8C&wK_#ZU`*h2;MHf_XH%)QEVystw{+~1GQZJC zN*!>BnP>hwHda|H?Ip8P#&F5J)AMmgut=4oUX^uVeP0)49ZO4hZFLE(6`bvK42+iT zmgRcoh4VG+1+33FSK(x951I#Ys@*T`h>1%|vg9I8b=vVEJ!igQ>ilI%Pf;+m+AvJ5 z=c!(f`REP5h^AyM8e*&Skg_%W3z_0gsam?_9*F>hPx&W zZLo+E@UzfW?CGVmrW}uD$KP_GT;*_0OnR}`kg>gnN>f)-*TT_Pf#xZ2rhtYQr!R_i zY1f#23tNh&&V_SXu$fco9?PmVrkDO+-3=pG=cjc8H!$j#eK}q%R_8TG0`*2U>F2Zu zv?RBr6{sGt2s<{c;)fA{;R!h5LR!N~R#bRvn0o$DNEcb~75ZS(CTJr1+R%7#ym&n5 zEVEzl4 zadhLFlWJr6AcpZu+5$(yVX(MZ?@VtvyqIRo1_W}3b1Y6!rONqdUSND@agCH?3?uth zlQnhuXRPGGvi(>o13hI7s!@$^iVlk~qd_%ypZGFR>0Igov>WxM#)9O$_VuY0`ge`_ zzJ)6$W8JnQGnkKTI1u9YjOTF&p+H_EGtiYaqEQx@6nDFI&R}=a^`@sy5Nk!Vb~or% zi7MO0c6c2Jg~Nwu3Qt8GC;jQLy`bI69Bw2I;%u{qz(?e1d=-#(dUm2URWS=!2uwJ_qR$?LZ z8TADCPMuNQZFDb`h^a{mY#gK6_Ms2~Ih0ACYM!ua@!%1nH06Ne0!rQxUkJ>*ySe36 z-q!*#{VgO&{P?Ra7y7(M(Jvj&=bA39`+`kvtah1)|VXA zxD$3*jv@;@L;L)*q8MBK)VcDe^LlpUNvJBat=T>1(BS3xGCT@iz?uv{3f`ZK%47K{ zb+Aw@8nn>l%UYcO<6L|Aisugi_(6mdof0EjYuWX zilPu!aG|zHlhOxQsfgXa>D8hg(RB%cVBp{cjmAUHWM=G`T#bX`8?kSmn<;zArP62< zcwo7FNK%$4v#)p>U&h|fGhKONs$LE5d9jc6lw*1t9G4{%i96<^|nQ zz=*|wR&0pe_^Q|W_0wis!NwM{9SvqCBA%4Wyvq>W+0Y-+`KCM_-uDodM>QCNfhhf9 z`u&fP-PS%XWTTj!$C&CZL_h*DUZ{HTY@L|7CY}aqizsMX?R1Bx z6cX?kiL?z}k1!IVJdf4@?K`d z{c`AV3%>=few^iTHr7{ry3(uG+I5;au#IKyZ{@b{ET6;Ix1xJ9HdWwIer#CiI*#D> z-y@Q7BIn^sE$!PJ55YxTy*N{`TCKzn&aNIZ5#Vf6_!XIxiy~)L0myA-S0K{5EbwJ5SI?G;o_{dir5j+*3I1kp8`c(gM&sid$I>) zCe$B1q<0jTH$#^YP%qnIcZJd}16d$jQtRUlX5! z`S*{c|9vqsGUESp|Cjb34(8v#@&CF1*9G;z^8Z6;WnlQp{pa~lEh{VA&y$hupBed$ z&%(m`Z~EVM%*-tRDgQr6`cH5Fa4<13;r}=NN3Z{A!SJu-e7SneDbO&`v*6PS>f4DKo1208i1$ z*jfdjo|*jzgU0&*f|coqto9!sd?ser|L}<6Gco>%bus-HoA^u&KXhq*w|`4e|0?qD zs6@=Ij2V7l`=f)x|DKb72>+cEDPtQ`Co_D8f8h1ANd8l#$7f)qXJh*rvbmL$vBSSZ zcQO_-{#iiA{|Ne@mF)(&)amY}IrPZN6_n`_|0y=l-YH7S&9a`t)RWI-}!J7NcXA^#rPAcsvYp&S)i_Cc9V?nUX97 zj!u_%CNEZtZy1j^ zigxp5g%uYYRIBO2(Guh9?lFRGm(`&%<26~2@P_Z}_{6fwZEBr|Bsi<>CJVBu-qcP? z6XQ!`Xj~R4Vz@@DZ@1U>mBUsPEj|*yFVUkBFCv7fPy(@}za#WBl04D$-O_OG;-E=S zQ@_G@{_=nX^4$&|!_&i`@zwerf@+ONVQC#DJFR-q*POUq|n{i6kcw>r@FMR z7m#Smi>UH2VqsfWpHxhxmy$6C#e-xDsk|}KfE{*SuWgVf;0P5DN&R?ATZI5F=-&j; zRMWl?YBdD{UhoI^A_EU8V=Vi&3CydoEj6CWJe85X<;i-eHu2K>f)1D&2{cpvlFTK8 zDW^J(KSnYv&P7c$nicGs8@~g&Dg`W{fT!Te24MSfLO7wx@*Y(Zf;a(KVM`SZU~JJh zd|CzVN_Rn)gQA-I=MR*_M-h{|BZUOWAx9E@P_Z@}_cWC>q9RhLgF1AJ2`H0!1X;vK zJ?V%z#&eIik64j7Yo`GwNYrzV>Bz*{mJ{a?v&%pD665iNSw-ook=DepfteEI?&pLz5h zg7}?z*X&3%m*Fsva%f9ZY=;f@2oLcIND%Lfm;cOP^30#{&u=c`ncf_d*FOt#hxoN+ z6#tu)9l({hpVCQB6pUz8tc3`fmYR3kH@F9kh#;r;uY8dXX8~hFigse%A-UN~E$G$@ z*Efec#E5?FmL8SEJ>*xVgeqKAYuBG%UsER$n9U`1jl7cVD6pCPfe+_#u?rOu{7-Em zin^N>s^C)5sO3BDye}jn)`4w!R=8)tEGRR{*-#QAQ0z3;ZEl0+Bu$x{iKKo<#v`ax z_ikYAYdMihqJ0QT>l{X8Idujd-B;H-6>4I@FXd{6${;BP4X1$X!4LIOy3s%Jvrod9 zixa~}ePUC8?&gs{fQ+fpjEdw~*c12Vfi5w%U38kFPOAQpR6!YV6*5!3-lmg`jfh-S zJCutbm{N3A`N_Um#MS&NDj()3mkX2UAzb_YzH)OY5j!rC_S-HE^3}jmi!U&N7i2FVgyHN7tJjqDWQ#31KfrH-z=Ji|4m5XNY8v*bTD+ z&kExTVFPnTofVRGfb1p<^tbpXba&pabVn&qnSM<^iVN$SEb*@raeZWmYL2jLytWg1 zOXPO(TJaXiRlUXGxR`xgr@uSNBvp{=W)0RN?D>mA_Mh|R+k)wueUZ`;h75I7fO_dP z<=f0^*LI$(J?ka#AmCqDMv3tnV-&6MJ8ljM5nreXl11@3*9N*Wn+N(_CrG2Rb&A3)e3J=iUa=2LrUFsrOHS6P(TZS&ad7Eu*LWS zWTWOG!41$I-?JmId;ZxiK=>iWyfdHL7hsaWtqtI%9%X1daFhfM<@Izx{Y2zI>240pHi=pJG@_4#qE=667E~Nx`iv zjQJLlKXK~9eyR5M;9a3$%O>1l?XpnOL-HKL^{2MIi$F!x?K(ai7wp6X^ikg!^XCAG zaJ3y$Wl(Q;m)-Fwx#LwB(L+|p50#9xdNNkQJa0{2P^T%|SCs$iAh#UvI{1w(iq_ww z3rVueN=c9ctm4d*BXZK1zzFdjhh%W5M)MbZ$5$og_-f!<=jZ>tY_UxWEVex-0grn1 zre^u8)k%*`rg83U3KT$*di()CMYs{e3+V^%p>MjO9^KoX>KAm1Zef-nsc)!s9k_H{ z==6+-4K5^+{Dz3{<~`fEDg6cA{u0&xlF zUEqvbh~d221*K43sfR6}@L6Z-!STsv9D`=_wyAHv_@jgXW{A`wB9a%a46$l>Q$w_T zlQ@t{sO?DbVbOan7a_0S1l6aTQ zSyk}PO|B5u#T)5J=a#}W0UN>)4T3#gNk+#M!JFuX{){`)`|DGbQMW*HlK&!Tvd=ef zYaqngrw1dW->_XC$-ksd9PeZJ^E2E;6@4@STH1yPz!p^@zvK$yu02T=lCFNq9+^+` z#+c((If>Im$beM%pGnix@yR~uH7cVHc~QueOCw_W^4DpSXjm~$%3`r04Py>@@eTCJ zQ7kh=;xu-o%FuOzlGM=joSb&I$v$lQ%*p()%;n?Ao-jlz%-6nz!kPOf=wrGl0gTm! z-o!wN&`<8kJ}jT;s6trwN89FvG2?{Q01Sa7M*5zIt+Tukaiqs$aqG^CDT3{pOySe{ z7>5QfiV}cn9_Y`J;^&U#G3%$80lif1e(^7}a)-#W8)m$+ETWwe>Cx-hU&MJrKMUN6 zSN4nNq2+SsP5nZe7EP4l@D0p|R2B<@jKh4R3f0Fry!<5>nRAov^Ek~_-Q?hmEXYB* z2EWsOf>U%5xCnpPa$t1`E!k_S#VVm;@HnlzNirSCe@B>D$L-`(2anT?44jA8q%03# zBe+$er0F2OLo#K6G0GeNoG)UJ6&D`q1C!@e_X#6{8kDA}r;S=#RF zo5Mpr5)eN|OP-SYiNyO`5y6E}Qpy^qGoOr@b__V3=7KHWk z{Iq?Vth#%Tr8dNhbyIm`B3mPC3zj9^{PNxW`|n$-kk@j{^2-Ul`+YwD)ytO~t@JRr zX_>wIf)O=cj?PAaR^4J}+df-k$jaorYud^nul0m&OXSL|(`SjoV|6zl<-)9J2rHsV zQ8ZBA?Sxo}z`&bb9o}4#pR2BUpK%4$MWg76sZ#==ry28Um#>6~-%yux@ar!c$wfDY zJLm(Qeg`mu7D=>+^v0p}&!!?Xu?ld0i`a4my>NAlj1%#(Ay%(2qWSQlg*Cjs(qucW?m!-LYPMnk22 zVUWt>s|fAJIOB^c_j0G%EeC{3g_Jcyxuuj2uGuYKa(Ucy;|Xp87w4?2|4kjuA^S8@ zrRCUYf^lPja>HFD^`aKwCf|9@{Y{Ix^}VOnsdm_Gu@#kk_w7_mfZFY|+rrnsP7oKL ztBP@$6jNj82}ByxGt19o7}%Szu4M_H)>(~96!0wM5{0zCJLaFuLMI182-Z_s0#jy4 z*&0f|NrjLm1dxo?jEXjl8a7v!!~Dq1kD4$H5p4)e8Deo>c~X9{?5nk0LHWTOTdq;- zujfL|wO+6Ts)xmYarp^JZ|~=Y%NxrF(!^!!lA3l2)A}Qaxw$>+8Bsp4rfiX z`aRIn-zp7yUw+vr-PspK(vMV#!SP$SOqfI?79n^WvL9mga&cHxOx#dN27`2 zJu>X{v#9gC>K@u6A)vd_9QIx`2d5nAJwZiFPs1tne!mhiBMcJ5a0zONUHfT?8e`2% ztNDqfJ;Xf%JIB6YgS2&s?!fg5S)4~2n3TFV6M3j$hL&RZkmX>Z)z&>c=8~O?JPRD;rQ~% zo(Ix7GUT^I{z&6EBi<2^MBqs~psFbVe;r_A0NIS;;g+B+g>b!h7&Mh?R+Dn_E3PFp z()RN$)=}*YX9(P@M0Wo3V9$=*j?fjYnR%ha!^j6jS`V_z6O+vH#|*9=(IZ{>>iwl# zh`NdQxX{=}uJzQ$W#Y9L|5#Ks)D_|*8BN~tVZA1ib9#&W2Soxv9Ev+!WI)>-Y(cnD zAV)b9CU=Lm8oydE8y|Jmsf1F(om7$RiMWSVw6nu;Mf`NDa0PS4aWE&{dQjP9{)l)R zF{6^(E>;8Iz9@i8FfbIPDCIaSdO9mZWy0+0#Eg^Mxqa3Ab)lzn3!&6HeVxGr%c55= zd{jMdmA8V7^{2rO@gtW@qU5AzoMet6(P=>=pO9w==a5b>-Tt}!P)VzzOmRo$lcJ@P zm)DW8LLtLZS%&;{fDRr8J}1L`?nAq%r+~{*KKCqB>bQUu)xoFsK-?_n>7aw$8}irB zIEB_T2nU^Ptj<*Tj%Q%^jLtDw``3@?8^aqDodwWEp1DtgwRVhNM*+nM1#mN$hoOg{ zGaE^s_OATd{J0@Mg{OU&>Z4(E?;!`%cT&->3x{L|=@`zXDLMf=)Fu~DDD+6dDI>x7qX?b(hrA2;1M$+&RgZKXbxKD0h& zUgMQI6^&fFiNA(FmDva*O?BKIb`Lcc49-3kw-&%ltLW20C%Lez+EYx2v$G;7Y6}Ej ztnfF)QZWc~GHh*hjj~HJ?tncKxWE~B#k7SWc`NLMU%6q$v5AllA@B0aFwbozTDnn? z&-gA)%PjadEN#r)aJT~6Av^tUG;ThJpHCc29MT<7JV8DYJP9^|E%c7@MYS}CHFLix z+mtr1tnIODHMM9 z<#x=z6#}@2`fwifl@L37OWWrj+&ed0zC<-w&J=cd^UXd_`ZCKFh&=8$TWi~Y68>#% zzkrxyrc`C*7uGJEK~!1nm2Fq_&hOc^8*|@{&^cbY(*dKjB31gpF$Y(&(BBLdtbpY`G{%H-0o_w zSoxOa&GiX!0nDxidA$9nt--}_x2{rjJm%OQ)#KlU!=avuFa8kKnJ@egaLrLCWD{5L z-!}0!1Dj3Xf+%^KaY(x#e;liHWnS4R-?BA_mkk^|6}~gw(wi;vnu)?`2_;4=?>Kzj zxB;3x=KdDCy16=Zn`=K}d!$I0!Px<~p<@-kq^-9y14)uG!08aDVej zRF7O@%3iPF&hnIht$S)c%KQS{p)6bW%i200Vn4z3Y{4|RgnxwGa|I~NFwDy!sZaTE zSs*Qp@W`l}&|-igP=jq)^`ldRk~HQd91oVO%0%fRJW{Yr2(_fSC${;-Z~x-ZiMThH zlOb#Vs<aUmSW>Gi@lA!-h*mNgXWwD_ggV?(yontr@p* zB-NSRP?7wy~Aj3;ru7Y5m> z@vbuAu|yIhJ>(ml!npqMK117-#&z#Lnm3W``7+;i2V;QmCUT(TD4mg|0 zEyCVZ`s21dOOZ_Ba(Wf6 zoy*h}R2wRuDCdT9rL&A1`EcX6%rXC1w&OF)jUQ9<^Yo4{j*p~Og}eI`(~#x$xyCdS zbRJfF_IiA|i^z%L1~{ki_nM9C-88{V#HkdVMIxgS$u5&8eO(#|uGB(*WCzrvw%;#< z^7H+Zt99WR^~+HSH~y1_n0w{r1kcI~-k*NBQi$g-coQDav?RQFY*8mf(oWZL5C*rVHU|BT*BkL6K`yzBpO~wyhz#pKR!JKjBv!Vy$uj|Prxv-*p8$AK z8-Lt-B^cc)OwJErn_fP6Zrd>ldEwJ17`%mD;pE10?pN?0RR^0&BYf;0w_c?Q(O_< zT(=m_Py2j7l0$#G*tpH{;1jMAW+zbZU5iD#Qk)EF3_u6mKo)3V&1&LhGv$zObG1$=w%=31kiAU?4J6N-hQ^ z$`AQv$xmN!EHeAOG76%3sgxY7QTodi)nXuBJcB32-EJ_{a|!rUHp~KzGi??DQ^jSZaK3(LrhQc%)ID7 zPqmX2{+gJuOoQp-)mhAdnSFla6;snW}9M@Qx<~ixd~z1GX9TL} zL0TRwEj#&Om&uq4%JeeF3`Zah_)`B=fPzQ?g!pnw!dgl}tu9egE|b+T z)9vc)eL<2-5OwMszyr3X*#|6>)E*p^N8H}fPe-L>+W~jmsLjRa!Bl4fY+X%Re1u>R zDqFXXQAtSbd05HPl1gPd4rF;**~jvy%Z%}Z@$7Muh1mWOZo0}(|KN(jd|NjhPmx}& zycv_j_+|H`8vu*r=dNJgnr2~_c5DJWwiH6UHZW}=l(v>73XGa#g3;_%|FLgzS*!rb zYfmKRA@<0)A%&Ms9W^ALQ0v2#1M|aZEk1V5pU^epzGX4^rH1xD;Til?>AuGEE`V`T z+H~uGF+{7y2GQ{$|MeR{K6jo%!3D$K~f5m>uh6@W%FyJef& z8D=Rw($2q{7M3CZqYJ~vyQAZY*0v_f)<;<5Gzckk8ezMRF4EMbKDQDjk{2lCqNJ41 zi~=-}{kjRyvC^71RB|D7ZD$F53vFh3a=vYm$#@diuqBpX9X4MJ6eC(`l`bj)y z7z0{$EG@K!KS8(YE0N?Oh8!x*_u!(SCqrBL{n{IjZ#g7(gO&Gj!jQQ=Qvy?$2Z!Xn z6Gc3fBCXMO_$iTQcRGP6^u%}R*E>#9(l?0CxxXkekL{NZq~c_`@`p= z%H!}~()6q6={+VgX{q>IrNI33FlTIv1>ZjZbQj+tbVs2&j)+L4Hn~^@JOX~)?|v)T z1TC75U@tz^!K&Xo+*nVs2t5R#2eRhBYCtWPV>@9b@hQ~{d}@TZkEhUR%2~}PE~cX- z=XRT@c&9bje4Azs^lJ7P;lX{po!vCxDQMr=@Orn4;whH1P796baCp9JjlxTR+HLn^ z)v?{}f6qD1-6$Lx!2C4?-H(7OlZpE4NGi|QDkj}k4rh;OFQ+swHIHkPD|#i=D)dkQ z;K1;Pm98HyO_MdlF~-poA4t_R@OWM#9AL0%FlE5tVP4cO8hJ|7!cfq`>gs&Lut_ks z9hgYOLpwbRXa!a-vKBv$m&>;P*nm#i^Vmv$&?HcH?DI@M!f7amhzRx8*08fix{UDwOSobID1(? z^&L4uUsY%+zSPiE&cbg&>bRa37iV%`Y|UEVv7azCa$-aX&dReHug_*0&6RtZZFdx! zuzM)ZI-$#_w1$Cbe{;u##dvv~*BvzK3&6Il z=%aFBxAxnz_V5!9Y&dw+v^TaYB078? zBBGJ?fc!DmGDvms<4=^@6)-G7179x@VD!t$!3%n|M@+=GmG4dWWXIneH#KX!m*i7_ zYa8i>bmMGDm=-^;d%i?^&`ND&_ z$8&!3qTA4#g?tVd02LA(-92ctBP)o5kv{;O!Xw|IM{R=0j(ACPkBzho0`BkkY=tDC z4uS_kKP+*TC5Do(s(78P5}R6pnP-;qET}{^8#~TXrYos3X70AXH@B)jSmc%|r&4k! zzXxc)=SUycbM74CEj#FX#Jo9CzTmlfnP`=(vA%B#pnv=fTmtgDx1Nb$p+$m@vW2k= z#;By$WQ)IeWh<17+O~Qy1@>usbE`yE>18@6HOwlKD;hI`D|NV-tCN3!7`|pyo7gPn0>Vj5m-ypD9w^2?}%FN-z_{nBd-=>JWpQE z^{Zd``+}oUPE@%C^$B1pKSB?jvtA!qo4@94m17+!R$KM$J0bn8+TAt8`Y)l-oJ+XC z+l2Iw-2}11tb#X89~(CPH$?*R)FgFB+ARjKeZNJ=?v4%@4Bd$jPFa*QZc0u1m!S-+ zm;2)@$bwjD^(EEUxIUg^gcX3MR6C(c90N28N~3 z;=av?LQVwtlfGM!!q8;CalWIz6H*Y}u6|!42-YtjrkhU~xh*f&pgf<%F5AIcw9{Nt zbE!+vE_GqLJbf|W<@5b znCJ;bqM`MN5nXQOJcSlgN-JKJgf5|Iq39Ghm2bU;o-2OWxts}oS7MOULodrhadfie zZB=#)Q0_B5uDh=1tYHnqqd?}MrlN?S0$A9 z4{H+;m0u`Y$#>%`yaKfmB-39(&nGjKvqHD}y%YEbQL$+Y=~#sYZKvm~w_TU$x}B$9 zwpTJTXGb2hT*h6xCq7rxqOn$~T+CH#PU{dgU4yjhL4Yz4J2+ruYsH`Tm24s%ku2`972Ixp5_J%LSP)fbF7$Tq$#oj7P@|^y-ZYnU zV=S6mQno>2Zgd#tP)HjXvLgbDC{V{@cBkgid$91>)oy$_$4$7G?5>FCuaaBh=Lp&s zWT_CoxwRA#WQB1=WHXuaWu}JkCs$Iz$5weM3_2{&6UR|W+ZGt)t4(doePHn>K1$JA zNm;+fJdO&^;BxDxNzhpO?5=a{k1^?1uaXU06z)aB0z42JW3O(Co?kMWkUDvM;hCn0G{|Nzf6{=k-yAMzFqQ%I3@>yGMcrVo=oy4!Je^Biek2aT;$>@FL{a83;?Krx>H zP=v?un1`F%i(%9id~Gy%T1O|by&6267wMn7zS^cKJt_1W9ZadpdmVv`$B<0I^^1T& zo!4H98xP9F16=1L8gu>zI1eGNBCMNp=#6|2$=9O0ByqR&I|!Vh)cYr`>54(Eqt+gf zQz8M6_Ss?4(JvK0Q8nR1`RcO1Eo&q+Q>5^diy~Ne*1r$ErB`b4c)6W+X9?p~X1RgA zzO;lk1C_wXB<8Nk$eiILHXP0e77~@qmAGoi$tFgGNbxd`Hc(1ZIgeKgb89`b5*wYTuE5 zZOLgRvJ&-_0oQ)QiBbLSXpo?d-5Pt}9J>NEy+C&ML{+q{ouHf0`AmnevgzpqlBUB5czZrhi(Pv;T$fEiqn@O8~ z-%P2bA3j0`fg z3^J1;u${^PC+}_4%$;qIy@x)f8xPA0mduV?9NoJy;(o;>_NAO7u5U*jMv!A3y8)Fr z9Uf|y$G*%FxH3WuuWoPZXal=gn#u#}rakJHn<&?fwllUf428o+rSso154JVs z`3oI+$RD-g89=cLyPsTAxPz@xgb(`$>Mps2bLYL`JOfT_PrrOTvGoatmgc3g2(RX( z+7HBiW-=hAc~JG05TF>B()X?!Mn~@^G>~mDXlsex7PF#vAs-?kInZI$V6E{(d9{d- z2&0@*kgwIiPz(sEVd%pQ^@@v-G87Uf*HxuM%F)+37@C!@gb?8jxQ5NU1`TEC1y9l9L0cz@1Gv3&Z9S32dull9td0BiMR8 zw$ZKEWRe}j5iYAj;SNk;{1)soH+)g`?n@;4C!M>=H^0ljGn{}`VUBooSZ^f+T#3jmtyP3%| zI{5()xxva_t})+GHt9N=8E zeF}-!BwnL*gA1!io}!m;9_WyfW$V(Mmd787yhjZ(CF)xw*SAFvKv5G)>bBHR#~5U9lE^W4ff`4>f||GQtkBJy+TO{j-xX*+PgOEIcBM5MtL*&Slsdo7L_s zLwJ6_-uCBhf?VG4oN67YNURA=W?^(9@mNi#LrhBgId>DEoC_Zs{U|IB0GOO8 z)GHL~Z~G0uw%LY;TN!k-2B`U3BLDsnDC+yBFPA|}Rp@D#OC3cA(iUP$0y-AIDa*5m zLx({~y)w}hYanQX58)GYfAPsIy-JXIt*M%J9+EzV6;^iGt)I+V@Bxvv9>G_+J1nn; zB~}r+u?yCxA6Lgpmt!FN6BdY5N!hFNRnm>$Y!F$cKvqMlG%u z2g+%aaGp3d2%bL9Ib%gUibW>I0x3M>CSoI!x`EDQ%$8H`5|C6N01Cev-11xJ$w>J7 zvZ+JcfZG(KDYe}*8u~BR(M(K3POREL!$MPm$iBv=k}L`mwX>tl&R~KGUl2kF>w{wk zLvq{H8$=ZHRf{x3)#c?wC5C)V+LGF3_|2*{RpTg)Kx*@U*k8WsceMU!OQPzif!Si@ z?9k)iPj(fyw#`8({n4L!g7t)43*;9I#0Urx-ajJSzLmX-L6Qv2hz>ceK^Ej^S4yvj#@pr3)?MBa2IwJ29ILL$~x6~Q4OU8eMv`AizQrX~j$01t&p z3e8ZUm42I~BUdrB6-NbvSovkxqQ@w3mi%5yomDYO7t(YR$d1Q&ot{J57wI6%Bo6@dy#!dEjcz& zDuf$0n>0Q(HDTfsd#*b)mJ9I-IO<;4rEn_Y>&=f>&mt+}JddSHC1tALEBg#?*_Sf# z{Lp;kC%loySb>Gsi$`X4RXr2mp$7EyGk!x$ur!Q8O6d&^gAh2Y4s&L^i;{Yq%quD# zRq=u+%Nk_p)*d;A$GM1(nTav#7&56##)JU+R%tQj0JYji%K3K4+X%oYLpx3AxY&?s z#bu%U)*`}!)`|p=HE!vv+h_R4X>O(Ig@cu-uB7uinL!=;K#I>e2f5rpb#EVm(7+U? z9xP16uAU#CBDJ`6PLS4D5U%*w5Q9~^N0tp`V<^v z*XQO+Y?5GFlj2$i0cB;NLuEd)Uw=^06L%Tm8MA*-%G zB^4dV{@SnjtzbrKHM_uJMinX3pLM;L)n(5i+%8N61P0%&JHd6Gw~$&kO=PsI;|V-m z&0xw`qc-UBgjQ4^2{3>{Z>_grC_MKg%{bgmI2MX*r0o$RR-HUzPiSP8gqzaj3a**k zjEN+`4(}GzV<`e{xi}|_aW`&Mt45$f%(HwPVNRIl5CpszZ_=zt-;_jFm!Kiw`Gc@; zRp#i;%yu2`gNuk0!503o zAH0?nzyi8*JRa!|QC?udkriXbV7Toqu9-#wIk+VPTR*IZf80(eNPjpH9e;^u=60>6(8`)$w^`NF)f%(@7C+BxupQ?~6DL9K zE4u`KbEjpuNZ=@asKN@pS@ql5YO5EHf<^=FwCPzb!Vz&_ao}=->g_5#TbXPUELDOz zJ$?|8EMCHBwyd`5uiKK1!AMy?)zIyo@$*L2QP&CY6cy7pSzh7(Y_)J{9wItN zm1&$wJMeaYqV}X7y+lx{2&YjGn2LrKnPv9YRTeT}q6X!u-6=H~W$Ewugzlc2>JjjO z3tzn-i)yDeCQ>0zX2uP8VR52lO@{hkM})143_Y;<;GhLbpar?0scDD!XA64e(EMt^ zHS7o{KzcY-ZTa}(@{^~!YUNVGBR8pk8jBIl)yhNnCX{C#T=f(e6(*H)Cq>aacYUgb zEY4w!kBhTkqLCKUg^gK?TjZmHhULR6G3JoF`?B@2tWbxiOll?PvrseAt^=^ZY&dZ?*NgBT!YWi+lqEQ& z*9*eoYVq~efAW*eT~*R+flpt4ydIf9bALrMUguV0SOBs(#1XrLle#g*jCB4u)wz59 z-4AKH8CE&{kXFI-Vq&n7=92OI1cm8hr@|dFR`O$Cj$&_7Ut+PwAzTglky&j0kM9m9 zD0VTjK}k+M7C!!ZaYme*rj!~)x9;RHB@Qkr1iVs%sbseZmU2$!W+o_+Q!&0nM!HS(B+y)m5+4 zJr&b;L(V9dA?u3D492O*!yK@pMN&)iqaxRuLp~XR*@@Jy{FilU7tjSD{ zjzK&wYk2qWw`-+-%?RgZT372EuE$o!^BvOUa)|~I$WtH(gmMDq-B!r8640<15=Rq6 z+Ji?H2P5*Z@Wy%KBq`@3q#AHI874^~h zC+eYD$F4%Z=19J@WLGKn8mV}S>R3g4-2VRIG8d5`u|z5m3Z~PsJ}q)HF^%%TV%1j- znqoU<>eL*@O@$>I-AB^Qrb3Nu2*|08@rj;R6!+2Y1OcY~F6{SppcfUB?QJjI=tVWG zPb>QlV|%yS+z|qWxB!JbRE!JolY$>L8k7&UgINkG7Cg~&3NE|2NVffnv7;B`+=S0y zxC`o}2ps`RcwXw!$TC;Q6t74-c_8)JECkOphaStUkg{Z3&aFPobGMJpx-cKhpzydd zZM0vSAjOLF#mC=oEOxRC8 zI;TqEM3`H26phi>Mgmibgy75Re!47d}HVUforrB~vQngD_7GZj>!&ah{ZfvAVL-6)OVR?s&+mx=aTDjL@ z1vTE$QEZa1CX?#{)6r4~oHYKz2C7egId(U_bbEXz#kAqF^1`lNI(E3%$$HyfSy2k_ z#wEnl1U&R@Z7~S6TW&X&(JHI+7!OQ=QMTs5u2$s_9}p^hA79H)=`G)Kv)P@h+8CtB z=obC{@z&@;Zq16zz%4e)3&b6s|BoKjHT7Db9vtb~v`Q9i<~`S9==W)HnqqcG>Vd;W zy~h1P^jB<_e8B_jp|t9>OKk3e*pVg}PAh-v#Xj0peq8@g!7#B}NWn4vLjaEx7Fr~j ziMgH^s6LDhEP3+)B8N9v8TKh7VJ0UEi<|OBYPo9D1na4(_0saE^aWf~WhrT{X4k6# zp`n+#@#?u+Kj$g009PH~FVbQ}<7mSV1EN3IJ=6TBT12uD{Reu|#t)eL!|fwV)VDoc zWOgFF$hYzz?T!q)pG z0KGO;lH88xphPr2^w($wHGNO8eobK2IISjQxjg$&7~@z?79+jDZHXDlaHlex+tE`3 z78+d}hw>zEXN%!>O?hb92e z-;;|&RGRs=joYfQ#m95dvEyw^tH(fip@*>tCVl*7c=0z~$;dsiV&Q%MeR-dSr1}`) z-5lCV4Le{l|6I&(WSnpC@Ky<|s-hVisfi0#h^K75`Tm;v3`B$m<6atYEAU0i6OsWVr@>9_gox4PRo{n5(+f zdW$?yJ#o`)R>hxJL!=VoiqwjQ9+XoeGY61~2x`7~Q0@p+@WU9nW9&{n=F|2S=V_^8 z_iDRTZl<*jsA^x%XhODIUM1Y-n(Br?F5ewA?DwXP5`K7ighz0=msNB3^RZ8*EQlrt zHQRF-R4RG<`FI)WhJbg+7ph1R}cgqfQacqR?JUOmZH(0;4t@QC&)hBUcqp)B?e6Z9|PisfqiP((8#W4uJwJvucuP;-mt=Wx;*J1PGJTEpf#`P>}dv1;EzS- z)>@+yI_Ji~8MJuw()g3nV7pl-t@e^mhv{wFW4k@4K|AcYf0AUW8DsC#)*Z(^C7t{C z=o#Qa=F69Q>GJk@@Wk}Cq?U)DZ?#J!v*T&lLIpq-;`l|hLPjs66!(}X6)B%W-wrv< zyNx!Ha|z~GZi&uvHKlWV-a%l0TwM%7j_#cf zgjXUSJ$7&gUQAt>TbUJm6R*%7E2X1ae0rxY>SEcT<-FoRs_BVc&df^i2XmXAo=zV& z#?amU`an9d*&07&pI05W_SwvE{m~u@7XR5(XsRRm>NUWiykzj5AiWc~=ERq;eEbTm z=Q8I^IB|D4+JDc;Syx5%C?XRfGtu<08|mt>bn}|K;gMe?r|u`m&cFz;ta;a2?X+w5 zj@zI5-HLYeQh%|E-e=Ng)T`v-pm5k;_{%r{K}M<2PsrXRKUT#`0y(O6&76m87(YwGCN}?6okNV}`bm@*$Da~P( zo^c5;ZqUv(-&b5{I5t}Dj2t>1z4EfohXFWU@5LYdp-&;-rIo`UQm7$yVghVinF7yy zVjr$bP~IDJaoz=+{6_n{oRXsGbWt~-pZgwK+$lQX&I#HdC|^xq8b9^RS=yW+UW{R= zu~gO@iy6BQJ&nv;X;s|~!?5TyTwR^@h3t)`&O6W$ykoUh<)HlGUi^!@@`vp3hxWk0 z!pg?_zg&zzc#%IUxqyNgjevk8AI1M(`okgl>(U1&1z$!1U-biPLCe5M%YrZGsAuQk ziZ3fDEbu{T0se6KPp7{@F65$S)^-l|Ce~)u_~MQhrg~QYYXkU$`1ot!|40mfODfiO zhX0ZOpi%y^6y#u}XJDgeXJBJwU}6O@s?*bxeVqTX{F^F6{vYWN@8wTt4SaG6e67FJ z`rFfAAP`fB4>HFGHRCVl1)rmX$sbP4pAV@? z3iBZdd|+)pz%tN)KSTcAHnp}A)N=s-;m7Y53b1n9QZ8E%m8M5BmDoXv9K^Re0ckF;9LEVMh{^A zL#g?P#=ywT`d@bR^nds>|FHY$`zPmrX)Meiq5V^1VPgL%#XmF#MplN8`u#&=Vq;?a zn0)`x=mD%Bpp$=TEKIB)AH_fH7#Qf;KFa$Kjh==1PnrIqeb}-5J7xxkKkSl!*)anC z5Ox05m+`;iV_^Ki6aCwck%{%c{4%nz{5xL;0Mj4p&p&;9xc#^GQ5TkfkH^GL&;H*U z;J^BQU}Bj5o!f`@|FUEMcOEP(fd85sAKHJ_fQ5yL{sR&7k32rKKhyu;+W*5Z(;tG- z-*ygmdLIlLyT9l=3Z||f%Yzx8PTtzu;RBcRf0u!nm60|6|12ai!4JyPcNTUa8xY9I z!p_XfZotZ5X!sG00VC_j+%%v!`heSTL;wFx@c&`}**kpPuKrp`EG#S^_dXI5AsJ!l F{{ta8d+z`M literal 0 HcmV?d00001 diff --git a/vendor/github.com/container-storage-interface/spec/CONTRIBUTING.md b/vendor/github.com/container-storage-interface/spec/CONTRIBUTING.md index 8f33951f..e96ebc79 100644 --- a/vendor/github.com/container-storage-interface/spec/CONTRIBUTING.md +++ b/vendor/github.com/container-storage-interface/spec/CONTRIBUTING.md @@ -1,6 +1,9 @@ # How to Contribute CSI is under [Apache 2.0](LICENSE) and accepts contributions via GitHub pull requests. + +Contributions require signing an individual or Corporate CLA available [here](https://github.com/container-storage-interface/spec/blob/master/CCLA.pdf) which should be signed and mailed to the [mailing list]( https://groups.google.com/forum/#!topic/container-storage-interface-community/). + This document outlines some of the conventions on development workflow, commit message formatting, contact points and other resources to make it easier to get your contribution accepted. ## Markdown style diff --git a/vendor/github.com/container-storage-interface/spec/OWNERS b/vendor/github.com/container-storage-interface/spec/OWNERS index b11f9191..7225bd01 100644 --- a/vendor/github.com/container-storage-interface/spec/OWNERS +++ b/vendor/github.com/container-storage-interface/spec/OWNERS @@ -3,8 +3,8 @@ approvers: - thockin # Representing Kubernetes - jieyu # Representing Mesos - jdef # Representing Mesos - - cpuguy83 # Representing Docker - - mycure # Representing Docker - - julian-hj # Representing Cloud Foundry - - paulcwarren # Representing Cloud Foundry + - anusha-ragunathan # Representing Docker + - ddebroy # Representing Docker + - julian-hj # Representing Cloud Foundry + - paulcwarren # Representing Cloud Foundry reviewers: diff --git a/vendor/github.com/container-storage-interface/spec/README.md b/vendor/github.com/container-storage-interface/spec/README.md index d270cedd..c686e423 100644 --- a/vendor/github.com/container-storage-interface/spec/README.md +++ b/vendor/github.com/container-storage-interface/spec/README.md @@ -8,6 +8,6 @@ This project contains the CSI [specification](spec.md) and [protobuf](csi.proto) ### Container Orchestrators (CO) -* [Cloud Foundry](https://github.com/cloudfoundry/csi-local-volume-release) +* [Cloud Foundry](https://github.com/cloudfoundry/csi-plugins-release/blob/master/CSI_SUPPORT.md) * [Kubernetes](https://kubernetes-csi.github.io/docs/) * [Mesos](http://mesos.apache.org/documentation/latest/csi/) diff --git a/vendor/github.com/container-storage-interface/spec/VERSION b/vendor/github.com/container-storage-interface/spec/VERSION index 0d91a54c..3eefcb9d 100644 --- a/vendor/github.com/container-storage-interface/spec/VERSION +++ b/vendor/github.com/container-storage-interface/spec/VERSION @@ -1 +1 @@ -0.3.0 +1.0.0 diff --git a/vendor/github.com/container-storage-interface/spec/csi.proto b/vendor/github.com/container-storage-interface/spec/csi.proto index 22cff40c..d240b668 100644 --- a/vendor/github.com/container-storage-interface/spec/csi.proto +++ b/vendor/github.com/container-storage-interface/spec/csi.proto @@ -1,10 +1,18 @@ // Code generated by make; DO NOT EDIT. syntax = "proto3"; -package csi.v0; +package csi.v1; +import "google/protobuf/descriptor.proto"; +import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; option go_package = "csi"; + +extend google.protobuf.FieldOptions { + // Indicates that a field MAY contain information that is sensitive + // and MUST be treated as such (e.g. not logged). + bool csi_secret = 1059; +} service Identity { rpc GetPluginInfo(GetPluginInfoRequest) returns (GetPluginInfoResponse) {} @@ -64,20 +72,12 @@ service Node { rpc NodeUnpublishVolume (NodeUnpublishVolumeRequest) returns (NodeUnpublishVolumeResponse) {} - // NodeGetId is being deprecated in favor of NodeGetInfo and will be - // removed in CSI 1.0. Existing drivers, however, may depend on this - // RPC call and hence this RPC call MUST be implemented by the CSI - // plugin prior to v1.0. - rpc NodeGetId (NodeGetIdRequest) - returns (NodeGetIdResponse) { - option deprecated = true; - } + rpc NodeGetVolumeStats (NodeGetVolumeStatsRequest) + returns (NodeGetVolumeStatsResponse) {} rpc NodeGetCapabilities (NodeGetCapabilitiesRequest) returns (NodeGetCapabilitiesResponse) {} - // Prior to CSI 1.0 - CSI plugins MUST implement both NodeGetId and - // NodeGetInfo RPC calls. rpc NodeGetInfo (NodeGetInfoRequest) returns (NodeGetInfoResponse) {} } @@ -86,13 +86,13 @@ message GetPluginInfoRequest { } message GetPluginInfoResponse { - // The name MUST follow reverse domain name notation format - // (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). - // It SHOULD include the plugin's host company name and the plugin - // name, to minimize the possibility of collisions. It MUST be 63 + // The name MUST follow domain name notation format + // (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD + // include the plugin's host company name and the plugin name, + // to minimize the possibility of collisions. It MUST be 63 // characters or less, beginning and ending with an alphanumeric - // character ([a-z0-9A-Z]) with dashes (-), underscores (_), - // dots (.), and alphanumerics between. This field is REQUIRED. + // character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. This field is REQUIRED. string name = 1; // This field is REQUIRED. Value of this field is opaque to the CO. @@ -108,7 +108,7 @@ message GetPluginCapabilitiesRequest { message GetPluginCapabilitiesResponse { // All the capabilities that the controller service supports. This // field is OPTIONAL. - repeated PluginCapability capabilities = 2; + repeated PluginCapability capabilities = 1; } // Specifies a capability of the plugin. @@ -119,7 +119,7 @@ message PluginCapability { // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for // the ControllerService. Plugins SHOULD provide this capability. - // In rare cases certain plugins may wish to omit the + // In rare cases certain plugins MAY wish to omit the // ControllerService entirely from their implementation, but such // SHOULD NOT be the common case. // The presence of this capability determines whether the CO will @@ -127,13 +127,13 @@ message PluginCapability { // as specific RPCs as indicated by ControllerGetCapabilities. CONTROLLER_SERVICE = 1; - // ACCESSIBILITY_CONSTRAINTS indicates that the volumes for this - // plugin may not be equally accessible by all nodes in the + // VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for + // this plugin MAY NOT be equally accessible by all nodes in the // cluster. The CO MUST use the topology information returned by // CreateVolumeRequest along with the topology information // returned by NodeGetInfo to ensure that a given volume is // accessible from a given node when scheduling workloads. - ACCESSIBILITY_CONSTRAINTS = 2; + VOLUME_ACCESSIBILITY_CONSTRAINTS = 2; } Type type = 1; } @@ -174,37 +174,53 @@ message CreateVolumeRequest { // The suggested name for the storage space. This field is REQUIRED. // It serves two purposes: // 1) Idempotency - This name is generated by the CO to achieve - // idempotency. If `CreateVolume` fails, the volume may or may not - // be provisioned. In this case, the CO may call `CreateVolume` - // again, with the same name, to ensure the volume exists. The - // Plugin should ensure that multiple `CreateVolume` calls for the - // same name do not result in more than one piece of storage - // provisioned corresponding to that name. If a Plugin is unable to - // enforce idempotency, the CO's error recovery logic could result - // in multiple (unused) volumes being provisioned. + // idempotency. The Plugin SHOULD ensure that multiple + // `CreateVolume` calls for the same name do not result in more + // than one piece of storage provisioned corresponding to that + // name. If a Plugin is unable to enforce idempotency, the CO's + // error recovery logic could result in multiple (unused) volumes + // being provisioned. + // In the case of error, the CO MUST handle the gRPC error codes + // per the recovery behavior defined in the "CreateVolume Errors" + // section below. + // The CO is responsible for cleaning up volumes it provisioned + // that it no longer needs. If the CO is uncertain whether a volume + // was provisioned or not when a `CreateVolume` call fails, the CO + // MAY call `CreateVolume` again, with the same name, to ensure the + // volume exists and to retrieve the volume's `volume_id` (unless + // otherwise prohibited by "CreateVolume Errors"). // 2) Suggested name - Some storage systems allow callers to specify // an identifier by which to refer to the newly provisioned // storage. If a storage system supports this, it can optionally // use this name as the identifier for the new volume. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) string name = 1; // This field is OPTIONAL. This allows the CO to specify the capacity // requirement of the volume to be provisioned. If not specified, the // Plugin MAY choose an implementation-defined capacity range. If // specified it MUST always be honored, even when creating volumes - // from a source; which may force some backends to internally extend + // from a source; which MAY force some backends to internally extend // the volume after creating it. - CapacityRange capacity_range = 2; - // The capabilities that the provisioned volume MUST have: the Plugin - // MUST provision a volume that could satisfy ALL of the - // capabilities specified in this list. The Plugin MUST assume that - // the CO MAY use the provisioned volume later with ANY of the - // capabilities specified in this list. This also enables the CO to do - // early validation: if ANY of the specified volume capabilities are - // not supported by the Plugin, the call SHALL fail. This field is - // REQUIRED. + // The capabilities that the provisioned volume MUST have. SP MUST + // provision a volume that will satisfy ALL of the capabilities + // specified in this list. Otherwise SP MUST return the appropriate + // gRPC error code. + // The Plugin MUST assume that the CO MAY use the provisioned volume + // with ANY of the capabilities specified in this list. + // For example, a CO MAY specify two volume capabilities: one with + // access mode SINGLE_NODE_WRITER and another with access mode + // MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the + // provisioned volume can be used in either mode. + // This also enables the CO to do early validation: If ANY of the + // specified volume capabilities are not supported by the SP, the call + // MUST return the appropriate gRPC error code. + // This field is REQUIRED. repeated VolumeCapability volume_capabilities = 3; // Plugin specific parameters passed in as opaque key-value pairs. @@ -215,7 +231,7 @@ message CreateVolumeRequest { // Secrets required by plugin to complete volume creation request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map controller_create_secrets = 5; + map secrets = 5 [(csi_secret) = true]; // If specified, the new volume will be pre-populated with data from // this source. This field is OPTIONAL. @@ -228,10 +244,10 @@ message CreateVolumeRequest { // topological accessibility information supported by the SP. // This field is OPTIONAL. // This field SHALL NOT be specified unless the SP has the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // If this field is not specified and the SP has the - // ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY choose - // where the provisioned volume is accessible from. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY + // choose where the provisioned volume is accessible from. TopologyRequirement accessibility_requirements = 7; } @@ -243,11 +259,19 @@ message VolumeContentSource { // This field is REQUIRED. Plugin is REQUIRED to support creating // volume from snapshot if it supports the capability // CREATE_DELETE_SNAPSHOT. - string id = 1; + string snapshot_id = 1; + } + + message VolumeSource { + // Contains identity information for the existing source volume. + // This field is REQUIRED. Plugins reporting CLONE_VOLUME + // capability MUST support creating a volume from another volume. + string volume_id = 1; } oneof type { SnapshotSource snapshot = 1; + VolumeSource volume = 2; } } @@ -334,7 +358,7 @@ message CapacityRange { int64 limit_bytes = 2; } -// The information about a provisioned volume. +// Information about a specific volume. message Volume { // The capacity of the volume in bytes. This field is OPTIONAL. If not // set (value of 0), it indicates that the capacity of the volume is @@ -342,20 +366,32 @@ message Volume { // The value of this field MUST NOT be negative. int64 capacity_bytes = 1; - // Contains identity information for the created volume. This field is - // REQUIRED. The identity information will be used by the CO in - // subsequent calls to refer to the provisioned volume. - string id = 2; - - // Attributes reflect static properties of a volume and MUST be passed - // to volume validation and publishing calls. - // Attributes SHALL be opaque to a CO. Attributes SHALL NOT be mutable - // and SHALL be safe for the CO to cache. Attributes SHOULD NOT - // contain sensitive information. Attributes MAY NOT uniquely identify - // a volume. A volume uniquely identified by `id` SHALL always report - // the same attributes. This field is OPTIONAL and when present MUST - // be passed to volume validation and publishing calls. - map attributes = 3; + // The identifier for this volume, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + // The SP is NOT responsible for global uniqueness of volume_id across + // multiple SPs. + string volume_id = 2; + + // Opaque static properties of the volume. SP MAY use this field to + // ensure subsequent volume validation and publishing calls have + // contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // A volume uniquely identified by `volume_id` SHALL always report the + // same volume_context. + // This field is OPTIONAL and when present MUST be passed to volume + // validation and publishing calls. + map volume_context = 3; // If specified, indicates that the volume is not empty and is // pre-populated with data from the specified source. @@ -365,7 +401,7 @@ message Volume { // Specifies where (regions, zones, racks, etc.) the provisioned // volume is accessible from. // A plugin that returns this field MUST also set the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // An SP MAY specify multiple topologies to indicate the volume is // accessible from multiple locations. // COs MAY use this information along with the topology information @@ -373,7 +409,7 @@ message Volume { // from a given node when scheduling workloads. // This field is OPTIONAL. If it is not specified, the CO MAY assume // the volume is equally accessible from all nodes in the cluster and - // may schedule workloads referencing the volume on any available + // MAY schedule workloads referencing the volume on any available // node. // // Example 1: @@ -527,15 +563,18 @@ message TopologyRequirement { // A topological segment is a specific instance of a topological domain, // like "zone3", "rack3", etc. // For example {"com.company/zone": "Z1", "com.company/rack": "R3"} -// Valid keys have two segments: an optional prefix and name, separated +// Valid keys have two segments: an OPTIONAL prefix and name, separated // by a slash (/), for example: "com.company.example/zone". -// The key name segment is required. The prefix is optional. -// Both the key name and the prefix MUST each be 63 characters or less, -// begin and end with an alphanumeric character ([a-z0-9A-Z]) and -// contain only dashes (-), underscores (_), dots (.), or alphanumerics -// in between, for example "zone". -// The key prefix MUST follow reverse domain name notation format -// (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). // The key prefix SHOULD include the plugin's host company name and/or // the plugin name, to minimize the possibility of collisions with keys // from other plugins. @@ -558,7 +597,7 @@ message DeleteVolumeRequest { // Secrets required by plugin to complete volume deletion request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map controller_delete_secrets = 2; + map secrets = 2 [(csi_secret) = true]; } message DeleteVolumeResponse { @@ -573,31 +612,44 @@ message ControllerPublishVolumeRequest { // field to match the node ID returned by `NodeGetInfo`. string node_id = 2; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 3; - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. + // Indicates SP MUST publish the volume in readonly mode. + // CO MUST set this field to false if SP does not have the + // PUBLISH_READONLY controller capability. + // This is a REQUIRED field. bool readonly = 4; // Secrets required by plugin to complete controller publish volume // request. This field is OPTIONAL. Refer to the // `Secrets Requirements` section on how to use this field. - map controller_publish_secrets = 5; + map secrets = 5 [(csi_secret) = true]; - // Attributes of the volume to be used on a node. This field is - // OPTIONAL and MUST match the attributes of the Volume identified - // by `volume_id`. - map volume_attributes = 6; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; } message ControllerPublishVolumeResponse { - // The SP specific information that will be passed to the Plugin in - // the subsequent `NodeStageVolume` or `NodePublishVolume` calls - // for the given volume. - // This information is opaque to the CO. This field is OPTIONAL. - map publish_info = 1; + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + map publish_context = 1; } message ControllerUnpublishVolumeRequest { // The ID of the volume. This field is REQUIRED. @@ -615,7 +667,7 @@ message ControllerUnpublishVolumeRequest { // ControllerPublishVolume call for the specified volume. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map controller_unpublish_secrets = 3; + map secrets = 3 [(csi_secret) = true]; } message ControllerUnpublishVolumeResponse { @@ -625,30 +677,52 @@ message ValidateVolumeCapabilitiesRequest { // The ID of the volume to check. This field is REQUIRED. string volume_id = 1; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 2; + // The capabilities that the CO wants to check for the volume. This - // call SHALL return "supported" only if all the volume capabilities + // call SHALL return "confirmed" only if all the volume capabilities // specified below are supported. This field is REQUIRED. - repeated VolumeCapability volume_capabilities = 2; + repeated VolumeCapability volume_capabilities = 3; - // Attributes of the volume to check. This field is OPTIONAL and MUST - // match the attributes of the Volume identified by `volume_id`. - map volume_attributes = 3; + // See CreateVolumeRequest.parameters. + // This field is OPTIONAL. + map parameters = 4; - // Specifies where (regions, zones, racks, etc.) the caller believes - // the volume is accessible from. - // A caller MAY specify multiple topologies to indicate they believe - // the volume to be accessible from multiple locations. - // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. - repeated Topology accessible_topology = 4; + // Secrets required by plugin to complete volume validation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; } message ValidateVolumeCapabilitiesResponse { - // True if the Plugin supports the specified capabilities for the - // given volume. This field is REQUIRED. - bool supported = 1; + message Confirmed { + // Volume context validated by the plugin. + // This field is OPTIONAL. + map volume_context = 1; + + // Volume capabilities supported by the plugin. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 2; + + // The volume creation parameters validated by the plugin. + // This field is OPTIONAL. + map parameters = 3; + } + + // Confirmed indicates to the CO the set of capabilities that the + // plugin has validated. This field SHALL only be set to a non-empty + // value for successful validation responses. + // For successful validation responses, the CO SHALL compare the + // fields of this message to the originally requested capabilities in + // order to guard against an older plugin reporting "valid" for newer + // capability fields that it does not yet understand. + // This field is OPTIONAL. + Confirmed confirmed = 1; - // Message to the CO if `supported` above is false. This field is + // Message to the CO if `confirmed` above is empty. This field is // OPTIONAL. // An empty string is equal to an unspecified field value. string message = 2; @@ -705,7 +779,7 @@ message GetCapacityRequest { // `accessible_topology`. This is the same as the // `accessible_topology` the CO returns in a `CreateVolumeResponse`. // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. + // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. Topology accessible_topology = 3; } @@ -725,7 +799,7 @@ message ControllerGetCapabilitiesRequest { message ControllerGetCapabilitiesResponse { // All the capabilities that the controller service supports. This // field is OPTIONAL. - repeated ControllerServiceCapability capabilities = 2; + repeated ControllerServiceCapability capabilities = 1; } // Specifies a capability of the controller service. @@ -742,11 +816,15 @@ message ControllerServiceCapability { // CREATE_DELETE_SNAPSHOT MUST support creating volume from // snapshot. CREATE_DELETE_SNAPSHOT = 5; - // LIST_SNAPSHOTS is NOT REQUIRED. For plugins that need to upload - // a snapshot after it is being cut, LIST_SNAPSHOTS COULD be used - // with the snapshot_id as the filter to query whether the - // uploading process is complete or not. LIST_SNAPSHOTS = 6; + // Plugins supporting volume cloning at the storage level MAY + // report this capability. The source volume MUST be managed by + // the same plugin. Not all volume sources and parameters + // combinations MAY work. + CLONE_VOLUME = 7; + // Indicates the SP supports ControllerPublishVolume.readonly + // field. + PUBLISH_READONLY = 8; } Type type = 1; @@ -764,12 +842,16 @@ message CreateSnapshotRequest { // The suggested name for the snapshot. This field is REQUIRED for // idempotency. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) string name = 2; // Secrets required by plugin to complete snapshot creation request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map create_snapshot_secrets = 3; + map secrets = 3 [(csi_secret) = true]; // Plugin specific parameters passed in as opaque key-value pairs. // This field is OPTIONAL. The Plugin is responsible for parsing and @@ -791,7 +873,7 @@ message CreateSnapshotResponse { Snapshot snapshot = 1; } -// The information about a provisioned snapshot. +// Information about a specific snapshot. message Snapshot { // This is the complete size of the snapshot in bytes. The purpose of // this field is to give CO guidance on how much space is needed to @@ -802,11 +884,16 @@ message Snapshot { // zero means it is unspecified. int64 size_bytes = 1; - // Uniquely identifies a snapshot and is generated by the plugin. It - // will not change over time. This field is REQUIRED. The identity - // information will be used by the CO in subsequent calls to refer to - // the provisioned snapshot. - string id = 2; + // The identifier for this snapshot, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific snapshot vs all other snapshots supported by this + // plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this snapshot. + // The SP is NOT responsible for global uniqueness of snapshot_id + // across multiple SPs. + string snapshot_id = 2; // Identity information for the source volume. Note that creating a // snapshot from a snapshot is not supported here so the source has to @@ -814,43 +901,13 @@ message Snapshot { string source_volume_id = 3; // Timestamp when the point-in-time snapshot is taken on the storage - // system. The format of this field should be a Unix nanoseconds time - // encoded as an int64. On Unix, the command `date +%s%N` returns the - // current time in nanoseconds since 1970-01-01 00:00:00 UTC. This - // field is REQUIRED. - int64 created_at = 4; - - // The status of a snapshot. - SnapshotStatus status = 5; -} + // system. This field is REQUIRED. + .google.protobuf.Timestamp creation_time = 4; -// The status of a snapshot. -message SnapshotStatus { - enum Type { - UNKNOWN = 0; - // A snapshot is ready for use. - READY = 1; - // A snapshot is cut and is now being uploaded. - // Some cloud providers and storage systems uploads the snapshot - // to the cloud after the snapshot is cut. During this phase, - // `thaw` can be done so the application can be running again if - // `freeze` was done before taking the snapshot. - UPLOADING = 2; - // An error occurred during the snapshot uploading process. - // This error status is specific for uploading because - // `CreateSnaphot` is a blocking call before the snapshot is - // cut and therefore it SHOULD NOT come back with an error - // status when an error occurs. Instead a gRPC error code SHALL - // be returned by `CreateSnapshot` when an error occurs before - // a snapshot is cut. - ERROR_UPLOADING = 3; - } - // This field is REQUIRED. - Type type = 1; - - // Additional information to describe why a snapshot ended up in the - // `ERROR_UPLOADING` status. This field is OPTIONAL. - string details = 2; + // Indicates if a snapshot is ready to use as a + // `volume_content_source` in a `CreateVolumeRequest`. The default + // value is false. This field is REQUIRED. + bool ready_to_use = 5; } message DeleteSnapshotRequest { // The ID of the snapshot to be deleted. @@ -860,7 +917,7 @@ message DeleteSnapshotRequest { // Secrets required by plugin to complete snapshot deletion request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map delete_snapshot_secrets = 2; + map secrets = 2 [(csi_secret) = true]; } message DeleteSnapshotResponse {} @@ -890,7 +947,8 @@ message ListSnapshotsRequest { // Identity information for a specific snapshot. This field is // OPTIONAL. It can be used to list only a specific snapshot. // ListSnapshots will return with current snapshot information - // and will not block if the snapshot is being uploaded. + // and will not block if the snapshot is being processed after + // it is cut. string snapshot_id = 4; } @@ -918,28 +976,33 @@ message NodeStageVolumeRequest { // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be // left unset if the corresponding Controller Plugin does not have // this capability. This is an OPTIONAL field. - map publish_info = 2; + map publish_context = 2; - // The path to which the volume will be published. It MUST be an + // The path to which the volume MAY be staged. It MUST be an // absolute path in the root filesystem of the process serving this // request. The CO SHALL ensure that there is only one - // staging_target_path per volume. + // `staging_target_path` per volume. The CO SHALL ensure that the + // process serving the request has `read` and `write` permission to + // the path, and is able to create files or directories at the path + // if it does not exist. // This is a REQUIRED field. string staging_target_path = 3; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the staged volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 4; // Secrets required by plugin to complete node stage volume request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map node_stage_secrets = 5; + map secrets = 5 [(csi_secret) = true]; - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the `Volume` identified by - // `volume_id`. - map volume_attributes = 6; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; } message NodeStageVolumeResponse { @@ -949,7 +1012,7 @@ message NodeUnstageVolumeRequest { // The ID of the volume. This field is REQUIRED. string volume_id = 1; - // The path at which the volume was published. It MUST be an absolute + // The path at which the volume was staged. It MUST be an absolute // path in the root filesystem of the process serving this request. // This is a REQUIRED field. string staging_target_path = 2; @@ -967,9 +1030,9 @@ message NodePublishVolumeRequest { // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be // left unset if the corresponding Controller Plugin does not have // this capability. This is an OPTIONAL field. - map publish_info = 2; + map publish_context = 2; - // The path to which the device was mounted by `NodeStageVolume`. + // The path to which the volume was staged by `NodeStageVolume`. // It MUST be an absolute path in the root filesystem of the process // serving this request. // It MUST be set if the Node Plugin implements the @@ -980,28 +1043,31 @@ message NodePublishVolumeRequest { // The path to which the volume will be published. It MUST be an // absolute path in the root filesystem of the process serving this // request. The CO SHALL ensure uniqueness of target_path per volume. - // The CO SHALL ensure that the path exists, and that the process - // serving the request has `read` and `write` permissions to the path. + // The CO SHALL ensure that the process serving the request has + // `read` and `write` permissions to the path, and is able to create + // files or directories at the path if it does not exist. // This is a REQUIRED field. string target_path = 4; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 5; - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. + // Indicates SP MUST publish the volume in readonly mode. + // This field is REQUIRED. bool readonly = 6; // Secrets required by plugin to complete node publish volume request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map node_publish_secrets = 7; + map secrets = 7 [(csi_secret) = true]; - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the Volume identified by - // `volume_id`. - map volume_attributes = 8; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 8; } message NodePublishVolumeResponse { @@ -1020,15 +1086,43 @@ message NodeUnpublishVolumeRequest { message NodeUnpublishVolumeResponse { // Intentionally empty. } -message NodeGetIdRequest { - // Intentionally empty. -} +message NodeGetVolumeStatsRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; -message NodeGetIdResponse { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent `ControllerPublishVolume`. + // It can be any valid path where volume was previously + // staged or published. + // It MUST be an absolute path in the root filesystem of + // the process serving this request. // This is a REQUIRED field. - string node_id = 1; + string volume_path = 2; +} + +message NodeGetVolumeStatsResponse { + // This field is OPTIONAL. + repeated VolumeUsage usage = 1; +} + +message VolumeUsage { + enum Unit { + UNKNOWN = 0; + BYTES = 1; + INODES = 2; + } + // The available capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 available = 1; + + // The total capacity in specified Unit. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 total = 2; + + // The used capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 used = 3; + + // Units by which values are measured. This field is REQUIRED. + Unit unit = 4; } message NodeGetCapabilitiesRequest { // Intentionally empty. @@ -1046,6 +1140,10 @@ message NodeServiceCapability { enum Type { UNKNOWN = 0; STAGE_UNSTAGE_VOLUME = 1; + // If Plugin implements GET_VOLUME_STATS capability + // then it MUST implement NodeGetVolumeStats RPC + // call for fetching volume statistics. + GET_VOLUME_STATS = 2; } Type type = 1; @@ -1060,9 +1158,14 @@ message NodeGetInfoRequest { } message NodeGetInfoResponse { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent calls to `ControllerPublishVolume`. - // This is a REQUIRED field. + // The identifier of the node as understood by the SP. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific node vs all other nodes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls, including + // `ControllerPublishVolume`, to refer to this node. + // The SP is NOT responsible for global uniqueness of node_id across + // multiple SPs. string node_id = 1; // Maximum number of volumes that controller can publish to the node. @@ -1075,7 +1178,7 @@ message NodeGetInfoResponse { // Specifies where (regions, zones, racks, etc.) the node is // accessible from. // A plugin that returns this field MUST also set the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // COs MAY use this information along with the topology information // returned in CreateVolumeResponse to ensure that a given volume is // accessible from a given node when scheduling workloads. diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/Makefile b/vendor/github.com/container-storage-interface/spec/lib/go/Makefile index a7443eae..3b1c5eab 100644 --- a/vendor/github.com/container-storage-interface/spec/lib/go/Makefile +++ b/vendor/github.com/container-storage-interface/spec/lib/go/Makefile @@ -58,14 +58,14 @@ $(PROTOC): PROTOC_GEN_GO_PKG := github.com/golang/protobuf/protoc-gen-go PROTOC_GEN_GO := protoc-gen-go $(PROTOC_GEN_GO): PROTOBUF_PKG := $(dir $(PROTOC_GEN_GO_PKG)) -$(PROTOC_GEN_GO): PROTOBUF_VERSION := v1.1.0 +$(PROTOC_GEN_GO): PROTOBUF_VERSION := v1.2.0 $(PROTOC_GEN_GO): mkdir -p $(dir $(GOPATH)/src/$(PROTOBUF_PKG)) test -d $(GOPATH)/src/$(PROTOBUF_PKG)/.git || git clone https://$(PROTOBUF_PKG) $(GOPATH)/src/$(PROTOBUF_PKG) (cd $(GOPATH)/src/$(PROTOBUF_PKG) && \ (test "$$(git describe --tags | head -1)" = "$(PROTOBUF_VERSION)" || \ (git fetch && git checkout tags/$(PROTOBUF_VERSION)))) - (cd $(GOPATH)/src/$(PROTOBUF_PKG) && go get -v -d ./...) && \ + (cd $(GOPATH)/src/$(PROTOBUF_PKG) && go get -v -d $$(go list -f '{{ .ImportPath }}' ./...)) && \ go build -o "$@" $(PROTOC_GEN_GO_PKG) @@ -83,18 +83,25 @@ export PATH := $(shell pwd):$(PATH) ## BUILD ## ######################################################################## CSI_PROTO := ../../csi.proto -CSI_PKG := $(shell cat $(CSI_PROTO) | sed -n -e 's/^package.\([^;]*\);$$/\1/p'|tr '.' '/') -CSI_GO := $(CSI_PKG)/csi.pb.go +CSI_PKG_ROOT := github.com/container-storage-interface/spec +CSI_PKG_SUB := $(shell cat $(CSI_PROTO) | sed -n -e 's/^package.\([^;]*\).v[0-9]\+;$$/\1/p'|tr '.' '/') +CSI_BUILD := $(CSI_PKG_SUB)/.build +CSI_GO := $(CSI_PKG_SUB)/csi.pb.go CSI_A := csi.a -CSI_GO_TMP := $(CSI_PKG)/.build/csi.pb.go +CSI_GO_TMP := $(CSI_BUILD)/$(CSI_PKG_ROOT)/csi.pb.go # This recipe generates the go language bindings to a temp area. +$(CSI_GO_TMP): HERE := $(shell pwd) +$(CSI_GO_TMP): PTYPES_PKG := github.com/golang/protobuf/ptypes $(CSI_GO_TMP): GO_OUT := plugins=grpc -$(CSI_GO_TMP): GO_OUT := $(GO_OUT),Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers -$(CSI_GO_TMP): INCLUDE = -I$(PROTOC_TMP_DIR)/include +$(CSI_GO_TMP): GO_OUT := $(GO_OUT),Mgoogle/protobuf/descriptor.proto=github.com/golang/protobuf/protoc-gen-go/descriptor +$(CSI_GO_TMP): GO_OUT := $(GO_OUT),Mgoogle/protobuf/wrappers.proto=$(PTYPES_PKG)/wrappers +$(CSI_GO_TMP): GO_OUT := $(GO_OUT):"$(HERE)/$(CSI_BUILD)" +$(CSI_GO_TMP): INCLUDE := -I$(GOPATH)/src -I$(HERE)/$(PROTOC_TMP_DIR)/include $(CSI_GO_TMP): $(CSI_PROTO) | $(PROTOC) $(PROTOC_GEN_GO) @mkdir -p "$(@D)" - $(PROTOC) -I "$( controller_create_secrets = 5; + map secrets = 5 [(csi_secret) = true]; // If specified, the new volume will be pre-populated with data from // this source. This field is OPTIONAL. @@ -669,10 +708,10 @@ message CreateVolumeRequest { // topological accessibility information supported by the SP. // This field is OPTIONAL. // This field SHALL NOT be specified unless the SP has the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // If this field is not specified and the SP has the - // ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY choose - // where the provisioned volume is accessible from. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY + // choose where the provisioned volume is accessible from. TopologyRequirement accessibility_requirements = 7; } @@ -684,11 +723,19 @@ message VolumeContentSource { // This field is REQUIRED. Plugin is REQUIRED to support creating // volume from snapshot if it supports the capability // CREATE_DELETE_SNAPSHOT. - string id = 1; + string snapshot_id = 1; + } + + message VolumeSource { + // Contains identity information for the existing source volume. + // This field is REQUIRED. Plugins reporting CLONE_VOLUME + // capability MUST support creating a volume from another volume. + string volume_id = 1; } oneof type { SnapshotSource snapshot = 1; + VolumeSource volume = 2; } } @@ -775,7 +822,7 @@ message CapacityRange { int64 limit_bytes = 2; } -// The information about a provisioned volume. +// Information about a specific volume. message Volume { // The capacity of the volume in bytes. This field is OPTIONAL. If not // set (value of 0), it indicates that the capacity of the volume is @@ -783,20 +830,32 @@ message Volume { // The value of this field MUST NOT be negative. int64 capacity_bytes = 1; - // Contains identity information for the created volume. This field is - // REQUIRED. The identity information will be used by the CO in - // subsequent calls to refer to the provisioned volume. - string id = 2; - - // Attributes reflect static properties of a volume and MUST be passed - // to volume validation and publishing calls. - // Attributes SHALL be opaque to a CO. Attributes SHALL NOT be mutable - // and SHALL be safe for the CO to cache. Attributes SHOULD NOT - // contain sensitive information. Attributes MAY NOT uniquely identify - // a volume. A volume uniquely identified by `id` SHALL always report - // the same attributes. This field is OPTIONAL and when present MUST - // be passed to volume validation and publishing calls. - map attributes = 3; + // The identifier for this volume, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + // The SP is NOT responsible for global uniqueness of volume_id across + // multiple SPs. + string volume_id = 2; + + // Opaque static properties of the volume. SP MAY use this field to + // ensure subsequent volume validation and publishing calls have + // contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // A volume uniquely identified by `volume_id` SHALL always report the + // same volume_context. + // This field is OPTIONAL and when present MUST be passed to volume + // validation and publishing calls. + map volume_context = 3; // If specified, indicates that the volume is not empty and is // pre-populated with data from the specified source. @@ -806,7 +865,7 @@ message Volume { // Specifies where (regions, zones, racks, etc.) the provisioned // volume is accessible from. // A plugin that returns this field MUST also set the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // An SP MAY specify multiple topologies to indicate the volume is // accessible from multiple locations. // COs MAY use this information along with the topology information @@ -814,7 +873,7 @@ message Volume { // from a given node when scheduling workloads. // This field is OPTIONAL. If it is not specified, the CO MAY assume // the volume is equally accessible from all nodes in the cluster and - // may schedule workloads referencing the volume on any available + // MAY schedule workloads referencing the volume on any available // node. // // Example 1: @@ -968,15 +1027,18 @@ message TopologyRequirement { // A topological segment is a specific instance of a topological domain, // like "zone3", "rack3", etc. // For example {"com.company/zone": "Z1", "com.company/rack": "R3"} -// Valid keys have two segments: an optional prefix and name, separated +// Valid keys have two segments: an OPTIONAL prefix and name, separated // by a slash (/), for example: "com.company.example/zone". -// The key name segment is required. The prefix is optional. -// Both the key name and the prefix MUST each be 63 characters or less, -// begin and end with an alphanumeric character ([a-z0-9A-Z]) and -// contain only dashes (-), underscores (_), dots (.), or alphanumerics -// in between, for example "zone". -// The key prefix MUST follow reverse domain name notation format -// (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). // The key prefix SHOULD include the plugin's host company name and/or // the plugin name, to minimize the possibility of collisions with keys // from other plugins. @@ -1001,18 +1063,17 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Condition | gRPC Code | Description | Recovery Behavior | |-----------|-----------|-------------|-------------------| +| Source incompatible or not supported | 3 INVALID_ARGUMENT | Besides the general cases, this code MUST also be used to indicate when plugin supporting CREATE_DELETE_VOLUME cannot create a volume from the requested source (`SnapshotSource` or `VolumeSource`). Failure MAY be caused by not supporting the source (CO SHOULD NOT have provided that source) or incompatibility between `parameters` from the source and the ones requested for the new volume. More human-readable information SHOULD be provided in the gRPC `status.message` field if the problem is the source. | On source related issues, caller MUST use different parameters, a different source, or no source at all. | +| Source does not exist | 5 NOT_FOUND | Indicates that the specified source does not exist. | Caller MUST verify that the `volume_content_source` is correct, the source is accessible, and has not been deleted before retrying with exponential back off. | | Volume already exists but is incompatible | 6 ALREADY_EXISTS | Indicates that a volume corresponding to the specified volume `name` already exists but is incompatible with the specified `capacity_range`, `volume_capabilities` or `parameters`. | Caller MUST fix the arguments or use a different `name` before retrying. | | Unable to provision in `accessible_topology` | 8 RESOURCE_EXHAUSTED | Indicates that although the `accessible_topology` field is valid, a new volume can not be provisioned with the specified topology constraints. More human-readable information MAY be provided in the gRPC `status.message` field. | Caller MUST ensure that whatever is preventing volumes from being provisioned in the specified location (e.g. quota issues) is addressed before retrying with exponential backoff. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | | Unsupported `capacity_range` | 11 OUT_OF_RANGE | Indicates that the capacity range is not allowed by the Plugin, for example when trying to create a volume smaller than the source snapshot. More human-readable information MAY be provided in the gRPC `status.message` field. | Caller MUST fix the capacity range before retrying. | -| Call not implemented | 12 UNIMPLEMENTED | CreateVolume call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | #### `DeleteVolume` A Controller Plugin MUST implement this RPC call if it has `CREATE_DELETE_VOLUME` capability. This RPC will be called by the CO to deprovision a volume. -If successful, the storage space associated with the volume MUST be released and all the data in the volume SHALL NOT be accessible anymore. This operation MUST be idempotent. If a volume corresponding to the specified `volume_id` does not exist or the artifacts associated with the volume do not exist anymore, the Plugin MUST reply `0 OK`. @@ -1026,7 +1087,7 @@ message DeleteVolumeRequest { // Secrets required by plugin to complete volume deletion request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map controller_delete_secrets = 2; + map secrets = 2 [(csi_secret) = true]; } message DeleteVolumeResponse { @@ -1043,8 +1104,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Condition | gRPC Code | Description | Recovery Behavior | |-----------|-----------|-------------|-------------------| | Volume in use | 9 FAILED_PRECONDITION | Indicates that the volume corresponding to the specified `volume_id` could not be deleted because it is in use by another resource. | Caller SHOULD ensure that there are no other resources using the volume, and then retry with exponential back off. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | -| Call not implemented | 12 UNIMPLEMENTED | DeleteVolume call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | #### `ControllerPublishVolume` @@ -1071,31 +1130,44 @@ message ControllerPublishVolumeRequest { // field to match the node ID returned by `NodeGetInfo`. string node_id = 2; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 3; - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. + // Indicates SP MUST publish the volume in readonly mode. + // CO MUST set this field to false if SP does not have the + // PUBLISH_READONLY controller capability. + // This is a REQUIRED field. bool readonly = 4; // Secrets required by plugin to complete controller publish volume // request. This field is OPTIONAL. Refer to the // `Secrets Requirements` section on how to use this field. - map controller_publish_secrets = 5; + map secrets = 5 [(csi_secret) = true]; - // Attributes of the volume to be used on a node. This field is - // OPTIONAL and MUST match the attributes of the Volume identified - // by `volume_id`. - map volume_attributes = 6; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; } message ControllerPublishVolumeResponse { - // The SP specific information that will be passed to the Plugin in - // the subsequent `NodeStageVolume` or `NodePublishVolume` calls - // for the given volume. - // This information is opaque to the CO. This field is OPTIONAL. - map publish_info = 1; + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + map publish_context = 1; } ``` @@ -1112,8 +1184,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Volume published but is incompatible | 6 ALREADY_EXISTS | Indicates that a volume corresponding to the specified `volume_id` has already been published at the node corresponding to the specified `volume_id` but is incompatible with the specified `volume_capability` or `readonly` flag . | Caller MUST fix the arguments before retying. | | Volume published to another node | 9 FAILED_PRECONDITION | Indicates that a volume corresponding to the specified `volume_id` has already been published at another node and does not have MULTI_NODE volume capability. If this error code is returned, the Plugin SHOULD specify the `node_id` of the node at which the volume is published as part of the gRPC `status.message`. | Caller SHOULD ensure the specified volume is not published at any other node before retrying with exponential back off. | | Max volumes attached | 8 RESOURCE_EXHAUSTED | Indicates that the maximum supported number of volumes that can be attached to the specified node are already attached. Therefore, this operation will fail until at least one of the existing attached volumes is detached from the node. | Caller MUST ensure that the number of volumes already attached to the node is less then the maximum supported number of volumes before retrying with exponential backoff. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | -| Call not implemented | 12 UNIMPLEMENTED | ControllerPublishVolume call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | #### `ControllerUnpublishVolume` @@ -1146,7 +1216,7 @@ message ControllerUnpublishVolumeRequest { // ControllerPublishVolume call for the specified volume. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map controller_unpublish_secrets = 3; + map secrets = 3 [(csi_secret) = true]; } message ControllerUnpublishVolumeResponse { @@ -1164,46 +1234,69 @@ The CO MUST implement the specified error recovery behavior when it encounters t |-----------|-----------|-------------|-------------------| | Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible and has not been deleted before retrying with exponential back off. | | Node does not exist | 5 NOT_FOUND | Indicates that a node corresponding to the specified `node_id` does not exist. | Caller MUST verify that the `node_id` is correct and that the node is available and has not been terminated or deleted before retrying with exponential backoff. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | -| Call not implemented | 12 UNIMPLEMENTED | ControllerUnpublishVolume call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | #### `ValidateVolumeCapabilities` A Controller Plugin MUST implement this RPC call. This RPC will be called by the CO to check if a pre-provisioned volume has all the capabilities that the CO wants. -This RPC call SHALL return `supported` only if all the volume capabilities specified in the request are supported. +This RPC call SHALL return `confirmed` only if all the volume capabilities specified in the request are supported (see caveat below). This operation MUST be idempotent. +NOTE: Older plugins will parse but likely not "process" newer fields that MAY be present in capability-validation messages (and sub-messages) sent by a CO that is communicating using a newer, backwards-compatible version of the CSI protobufs. +Therefore, the CO SHALL reconcile successful capability-validation responses by comparing the validated capabilities with those that it had originally requested. + ```protobuf message ValidateVolumeCapabilitiesRequest { // The ID of the volume to check. This field is REQUIRED. string volume_id = 1; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 2; + // The capabilities that the CO wants to check for the volume. This - // call SHALL return "supported" only if all the volume capabilities + // call SHALL return "confirmed" only if all the volume capabilities // specified below are supported. This field is REQUIRED. - repeated VolumeCapability volume_capabilities = 2; + repeated VolumeCapability volume_capabilities = 3; - // Attributes of the volume to check. This field is OPTIONAL and MUST - // match the attributes of the Volume identified by `volume_id`. - map volume_attributes = 3; + // See CreateVolumeRequest.parameters. + // This field is OPTIONAL. + map parameters = 4; - // Specifies where (regions, zones, racks, etc.) the caller believes - // the volume is accessible from. - // A caller MAY specify multiple topologies to indicate they believe - // the volume to be accessible from multiple locations. - // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. - repeated Topology accessible_topology = 4; + // Secrets required by plugin to complete volume validation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + map secrets = 5 [(csi_secret) = true]; } message ValidateVolumeCapabilitiesResponse { - // True if the Plugin supports the specified capabilities for the - // given volume. This field is REQUIRED. - bool supported = 1; + message Confirmed { + // Volume context validated by the plugin. + // This field is OPTIONAL. + map volume_context = 1; - // Message to the CO if `supported` above is false. This field is + // Volume capabilities supported by the plugin. + // This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 2; + + // The volume creation parameters validated by the plugin. + // This field is OPTIONAL. + map parameters = 3; + } + + // Confirmed indicates to the CO the set of capabilities that the + // plugin has validated. This field SHALL only be set to a non-empty + // value for successful validation responses. + // For successful validation responses, the CO SHALL compare the + // fields of this message to the originally requested capabilities in + // order to guard against an older plugin reporting "valid" for newer + // capability fields that it does not yet understand. + // This field is OPTIONAL. + Confirmed confirmed = 1; + + // Message to the CO if `confirmed` above is empty. This field is // OPTIONAL. // An empty string is equal to an unspecified field value. string message = 2; @@ -1225,6 +1318,8 @@ The CO MUST implement the specified error recovery behavior when it encounters t A Controller Plugin MUST implement this RPC call if it has `LIST_VOLUMES` capability. The Plugin SHALL return the information about all the volumes that it knows about. +If volumes are created and/or deleted while the CO is concurrently paging through `ListVolumes` results then it is possible that the CO MAY either witness duplicate volumes in the list, not witness existing volumes, or both. +The CO SHALL NOT expect a consistent "view" of all volumes when paging through the volume list via multiple calls to `ListVolumes`. ```protobuf message ListVolumesRequest { @@ -1298,7 +1393,7 @@ message GetCapacityRequest { // `accessible_topology`. This is the same as the // `accessible_topology` the CO returns in a `CreateVolumeResponse`. // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. + // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. Topology accessible_topology = 3; } @@ -1329,7 +1424,7 @@ message ControllerGetCapabilitiesRequest { message ControllerGetCapabilitiesResponse { // All the capabilities that the controller service supports. This // field is OPTIONAL. - repeated ControllerServiceCapability capabilities = 2; + repeated ControllerServiceCapability capabilities = 1; } // Specifies a capability of the controller service. @@ -1346,11 +1441,15 @@ message ControllerServiceCapability { // CREATE_DELETE_SNAPSHOT MUST support creating volume from // snapshot. CREATE_DELETE_SNAPSHOT = 5; - // LIST_SNAPSHOTS is NOT REQUIRED. For plugins that need to upload - // a snapshot after it is being cut, LIST_SNAPSHOTS COULD be used - // with the snapshot_id as the filter to query whether the - // uploading process is complete or not. LIST_SNAPSHOTS = 6; + // Plugins supporting volume cloning at the storage level MAY + // report this capability. The source volume MUST be managed by + // the same plugin. Not all volume sources and parameters + // combinations MAY work. + CLONE_VOLUME = 7; + // Indicates the SP supports ControllerPublishVolume.readonly + // field. + PUBLISH_READONLY = 8; } Type type = 1; @@ -1373,17 +1472,43 @@ A Controller Plugin MUST implement this RPC call if it has `CREATE_DELETE_SNAPSH This RPC will be called by the CO to create a new snapshot from a source volume on behalf of a user. This operation MUST be idempotent. -If a snapshot corresponding to the specified snapshot `name` is already successfully cut and uploaded (if upload is part of the process) and is compatible with the specified `source_volume_id` and `parameters` in the `CreateSnapshotRequest`, the Plugin MUST reply `0 OK` with the corresponding `CreateSnapshotResponse`. +If a snapshot corresponding to the specified snapshot `name` is successfully cut and ready to use (meaning it MAY be specified as a `volume_content_source` in a `CreateVolumeRequest`), the Plugin MUST reply `0 OK` with the corresponding `CreateSnapshotResponse`. If an error occurs before a snapshot is cut, `CreateSnapshot` SHOULD return a corresponding gRPC error code that reflects the error condition. -For plugins that implement snapshot uploads, `CreateSnapshot` SHOULD return `10 ABORTED`, a gRPC code that indicates the operation is pending for snapshot, during the snapshot uploading processs. -If an error occurs during the uploading process, `CreateSnapshot` SHOULD return a corresponding gRPC error code that reflects the error condition. +For plugins that supports snapshot post processing such as uploading, `CreateSnapshot` SHOULD return `0 OK` and `ready_to_use` SHOULD be set to `false` after the snapshot is cut but still being processed. +CO SHOULD then reissue the same `CreateSnapshotRequest` periodically until boolean `ready_to_use` flips to `true` indicating the snapshot has been "processed" and is ready to use to create new volumes. +If an error occurs during the process, `CreateSnapshot` SHOULD return a corresponding gRPC error code that reflects the error condition. A snapshot MAY be used as the source to provision a new volume. -A CreateVolumeRequest message may specify an OPTIONAL source snapshot parameter. +A CreateVolumeRequest message MAY specify an OPTIONAL source snapshot parameter. Reverting a snapshot, where data in the original volume is erased and replaced with data in the snapshot, is an advanced functionality not every storage system can support and therefore is currently out of scope. +##### The ready_to_use Parameter + +Some SPs MAY "process" the snapshot after the snapshot is cut, for example, maybe uploading the snapshot somewhere after the snapshot is cut. +The post-cut process MAY be a long process that could take hours. +The CO MAY freeze the application using the source volume before taking the snapshot. +The purpose of `freeze` is to ensure the application data is in consistent state. +When `freeze` is performed, the container is paused and the application is also paused. +When `thaw` is performed, the container and the application start running again. +During the snapshot processing phase, since the snapshot is already cut, a `thaw` operation can be performed so application can start running without waiting for the process to complete. +The `ready_to_use` parameter of the snapshot will become `true` after the process is complete. + +For SPs that do not do additional processing after cut, the `ready_to_use` parameter SHOULD be `true` after the snapshot is cut. +`thaw` can be done when the `ready_to_use` parameter is `true` in this case. + +The `ready_to_use` parameter provides guidance to the CO on when it can "thaw" the application in the process of snapshotting. +If the cloud provider or storage system needs to process the snapshot after the snapshot is cut, the `ready_to_use` parameter returned by CreateSnapshot SHALL be `false`. +CO MAY continue to call CreateSnapshot while waiting for the process to complete until `ready_to_use` becomes `true`. +Note that CreateSnapshot no longer blocks after the snapshot is cut. + +A gRPC error code SHALL be returned if an error occurs during any stage of the snapshotting process. +A CO SHOULD explicitly delete snapshots when an error occurs. + +Based on this information, CO can issue repeated (idemponent) calls to CreateSnapshot, monitor the response, and make decisions. +Note that CreateSnapshot is a synchronous call and it MUST block until the snapshot is cut. + ```protobuf message CreateSnapshotRequest { // The ID of the source volume to be snapshotted. @@ -1392,12 +1517,16 @@ message CreateSnapshotRequest { // The suggested name for the snapshot. This field is REQUIRED for // idempotency. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) string name = 2; // Secrets required by plugin to complete snapshot creation request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map create_snapshot_secrets = 3; + map secrets = 3 [(csi_secret) = true]; // Plugin specific parameters passed in as opaque key-value pairs. // This field is OPTIONAL. The Plugin is responsible for parsing and @@ -1419,7 +1548,7 @@ message CreateSnapshotResponse { Snapshot snapshot = 1; } -// The information about a provisioned snapshot. +// Information about a specific snapshot. message Snapshot { // This is the complete size of the snapshot in bytes. The purpose of // this field is to give CO guidance on how much space is needed to @@ -1430,11 +1559,16 @@ message Snapshot { // zero means it is unspecified. int64 size_bytes = 1; - // Uniquely identifies a snapshot and is generated by the plugin. It - // will not change over time. This field is REQUIRED. The identity - // information will be used by the CO in subsequent calls to refer to - // the provisioned snapshot. - string id = 2; + // The identifier for this snapshot, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific snapshot vs all other snapshots supported by this + // plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this snapshot. + // The SP is NOT responsible for global uniqueness of snapshot_id + // across multiple SPs. + string snapshot_id = 2; // Identity information for the source volume. Note that creating a // snapshot from a snapshot is not supported here so the source has to @@ -1442,43 +1576,13 @@ message Snapshot { string source_volume_id = 3; // Timestamp when the point-in-time snapshot is taken on the storage - // system. The format of this field should be a Unix nanoseconds time - // encoded as an int64. On Unix, the command `date +%s%N` returns the - // current time in nanoseconds since 1970-01-01 00:00:00 UTC. This - // field is REQUIRED. - int64 created_at = 4; - - // The status of a snapshot. - SnapshotStatus status = 5; -} + // system. This field is REQUIRED. + .google.protobuf.Timestamp creation_time = 4; -// The status of a snapshot. -message SnapshotStatus { - enum Type { - UNKNOWN = 0; - // A snapshot is ready for use. - READY = 1; - // A snapshot is cut and is now being uploaded. - // Some cloud providers and storage systems uploads the snapshot - // to the cloud after the snapshot is cut. During this phase, - // `thaw` can be done so the application can be running again if - // `freeze` was done before taking the snapshot. - UPLOADING = 2; - // An error occurred during the snapshot uploading process. - // This error status is specific for uploading because - // `CreateSnaphot` is a blocking call before the snapshot is - // cut and therefore it SHOULD NOT come back with an error - // status when an error occurs. Instead a gRPC error code SHALL - // be returned by `CreateSnapshot` when an error occurs before - // a snapshot is cut. - ERROR_UPLOADING = 3; - } - // This field is REQUIRED. - Type type = 1; - - // Additional information to describe why a snapshot ended up in the - // `ERROR_UPLOADING` status. This field is OPTIONAL. - string details = 2; + // Indicates if a snapshot is ready to use as a + // `volume_content_source` in a `CreateVolumeRequest`. The default + // value is false. This field is REQUIRED. + bool ready_to_use = 5; } ``` @@ -1491,16 +1595,14 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Condition | gRPC Code | Description | Recovery Behavior | |-----------|-----------|-------------|-------------------| | Snapshot already exists but is incompatible | 6 ALREADY_EXISTS | Indicates that a snapshot corresponding to the specified snapshot `name` already exists but is incompatible with the specified `volume_id`. | Caller MUST fix the arguments or use a different `name` before retrying. | -| Operation pending for snapshot | 10 ABORTED | Indicates that there is a already an operation pending for the specified snapshot. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per snapshot at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same snapshot. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified snapshot, and then retry with exponential back off. | -| Call not implemented | 12 UNIMPLEMENTED | CreateSnapshot call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` to discover Plugin capabilities. | -| Not enough space to create snapshot | 13 RESOURCE_EXHAUSTED | There is not enough space on the storage system to handle the create snapshot request. | Caller should fail this request. Future calls to CreateSnapshot may succeed if space is freed up. | +| Operation pending for snapshot | 10 ABORTED | Indicates that there is already an operation pending for the specified snapshot. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per snapshot at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same snapshot. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified snapshot, and then retry with exponential back off. | +| Not enough space to create snapshot | 13 RESOURCE_EXHAUSTED | There is not enough space on the storage system to handle the create snapshot request. | Caller SHOULD fail this request. Future calls to CreateSnapshot MAY succeed if space is freed up. | #### `DeleteSnapshot` A Controller Plugin MUST implement this RPC call if it has `CREATE_DELETE_SNAPSHOT` capability. This RPC will be called by the CO to delete a snapshot. -If successful, the storage space associated with the snapshot MUST be released and all the data in the snapshot SHALL NOT be accessible anymore. This operation MUST be idempotent. If a snapshot corresponding to the specified `snapshot_id` does not exist or the artifacts associated with the snapshot do not exist anymore, the Plugin MUST reply `0 OK`. @@ -1514,7 +1616,7 @@ message DeleteSnapshotRequest { // Secrets required by plugin to complete snapshot deletion request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map delete_snapshot_secrets = 2; + map secrets = 2 [(csi_secret) = true]; } message DeleteSnapshotResponse {} @@ -1530,7 +1632,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t |-----------|-----------|-------------|-------------------| | Snapshot in use | 9 FAILED_PRECONDITION | Indicates that the snapshot corresponding to the specified `snapshot_id` could not be deleted because it is in use by another resource. | Caller SHOULD ensure that there are no other resources using the snapshot, and then retry with exponential back off. | | Operation pending for snapshot | 10 ABORTED | Indicates that there is already an operation pending for the specified snapshot. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per snapshot at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same snapshot. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified snapshot, and then retry with exponential back off. | -| Call not implemented | 12 UNIMPLEMENTED | DeleteSnapshot call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` to discover Plugin capabilities. | #### `ListSnapshots` @@ -1538,6 +1639,8 @@ The CO MUST implement the specified error recovery behavior when it encounters t A Controller Plugin MUST implement this RPC call if it has `LIST_SNAPSHOTS` capability. The Plugin SHALL return the information about all snapshots on the storage system within the given parameters regardless of how they were created. `ListSnapshots` SHALL NOT list a snapshot that is being created but has not been cut successfully yet. +If snapshots are created and/or deleted while the CO is concurrently paging through `ListSnapshots` results then it is possible that the CO MAY either witness duplicate snapshots in the list, not witness existing snapshots, or both. +The CO SHALL NOT expect a consistent "view" of all snapshots when paging through the snapshot list via multiple calls to `ListSnapshots`. ```protobuf // List all snapshots on the storage system regardless of how they were @@ -1566,7 +1669,8 @@ message ListSnapshotsRequest { // Identity information for a specific snapshot. This field is // OPTIONAL. It can be used to list only a specific snapshot. // ListSnapshots will return with current snapshot information - // and will not block if the snapshot is being uploaded. + // and will not block if the snapshot is being processed after + // it is cut. string snapshot_id = 4; } @@ -1621,40 +1725,10 @@ If a `CreateSnapshot` operation times out before the snapshot is cut, leaving th 2. The CO takes no further action regarding the timed out RPC, a snapshot is possibly leaked and the operator/user is expected to clean up. It is NOT REQUIRED for a controller plugin to implement the `LIST_SNAPSHOTS` capability if it supports the `CREATE_DELETE_SNAPSHOT` capability: the onus is upon the CO to take into consideration the full range of plugin capabilities before deciding how to proceed in the above scenario. -A controller plugin COULD implement the `LIST_SNAPSHOTS` capability and call it repeatedly with the `snapshot_id` as a filter to query whether the uploading process is complete or not if it needs to upload a snapshot after it is being cut. -##### Snapshot Statuses - -A snapshot could have the following statusus: UPLOADING, READY, and ERROR. - -Some cloud providers will upload the snapshot to a location in the cloud (i.e., an object store) after the snapshot is cut. -Uploading may be a long process that could take hours. -If a `freeze` operation was done on the application before taking the snapshot, it could be a long time before the application can be running again if we wait until the upload is complete to `thaw` the application. -The purpose of `freeze` is to ensure the application data is in consistent state. -When `freeze` is performed, the container is paused and the application is also paused. -When `thaw` is performed, the container and the application start running again. -During the snapshot uploading phase, since the snapshot is already cut, a `thaw` operation can be performed so application can start running without waiting for the upload to complete. -The status of the snapshot will become `READY` after the upload is complete. - -For cloud providers and storage systems that don't have the uploading process, the status should be `READY` after the snapshot is cut. -`thaw` can be done when the status is `READY` in this case. - -A `CREATING` status is not included here because CreateSnapshot is synchronous and will block until the snapshot is cut. - -`ERROR` is a terminal snapshot status. -A CO SHOULD explicitly delete snapshots in this status. - -The SnapshotStatus parameter provides guidance to the CO on what action can be taken in the process of snapshotting. -Based on this information, CO can issue repeated (idemponent) calls to CreateSnapshot, monitor the response, and make decisions. -Note that CreateSnapshot is a synchronous call and it must block until the snapshot is cut. -If the cloud provider or storage system does not need to upload the snapshot after it is cut, the status returned by CreateSnapshot SHALL be `READY`. -If the cloud provider or storage system needs to upload the snapshot after the snapshot is cut, the status returned by CreateSnapshot SHALL be `UPLOADING`. -CO MAY continue to call CreateSnapshot while waiting for the upload to complete until the status becomes `READY`. -Note that CreateSnapshot no longer blocks after the snapshot is cut. - -Alternatively, ListSnapshots can be called repeatedly with snapshot_id as filtering to wait for the upload to complete. ListSnapshots SHALL return with current information regarding the snapshots on the storage system. -When upload is complete, the status of the snapshot from ListSnapshots SHALL become `READY`. +When processing is complete, the `ready_to_use` parameter of the snapshot from ListSnapshots SHALL become `true`. +The downside of calling ListSnapshots is that ListSnapshots will not return a gRPC error code if an error occurs during the processing. So calling CreateSnapshot repeatedly is the preferred way to check if the processing is complete. ### Node Service RPC @@ -1684,28 +1758,33 @@ message NodeStageVolumeRequest { // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be // left unset if the corresponding Controller Plugin does not have // this capability. This is an OPTIONAL field. - map publish_info = 2; + map publish_context = 2; - // The path to which the volume will be published. It MUST be an + // The path to which the volume MAY be staged. It MUST be an // absolute path in the root filesystem of the process serving this // request. The CO SHALL ensure that there is only one - // staging_target_path per volume. + // `staging_target_path` per volume. The CO SHALL ensure that the + // process serving the request has `read` and `write` permission to + // the path, and is able to create files or directories at the path + // if it does not exist. // This is a REQUIRED field. string staging_target_path = 3; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the staged volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 4; // Secrets required by plugin to complete node stage volume request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map node_stage_secrets = 5; + map secrets = 5 [(csi_secret) = true]; - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the `Volume` identified by - // `volume_id`. - map volume_attributes = 6; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 6; } message NodeStageVolumeResponse { @@ -1723,7 +1802,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t |-----------|-----------|-------------|-------------------| | Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible and has not been deleted before retrying with exponential back off. | | Volume published but is incompatible | 6 ALREADY_EXISTS | Indicates that a volume corresponding to the specified `volume_id` has already been published at the specified `staging_target_path` but is incompatible with the specified `volume_capability` flag. | Caller MUST fix the arguments before retying. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | | Exceeds capabilities | 9 FAILED_PRECONDITION | Indicates that the CO has exceeded the volume's capabilities because the volume does not have MULTI_NODE capability. | Caller MAY choose to call `ValidateVolumeCapabilities` to validate the volume capabilities, or wait for the volume to be unpublished on the node. | #### `NodeUnstageVolume` @@ -1751,7 +1829,7 @@ message NodeUnstageVolumeRequest { // The ID of the volume. This field is REQUIRED. string volume_id = 1; - // The path at which the volume was published. It MUST be an absolute + // The path at which the volume was staged. It MUST be an absolute // path in the root filesystem of the process serving this request. // This is a REQUIRED field. string staging_target_path = 2; @@ -1771,7 +1849,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Condition | gRPC Code | Description | Recovery Behavior | |-----------|-----------|-------------|-------------------| | Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible and has not been deleted before retrying with exponential back off. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | #### RPC Interactions and Reference Counting `NodeStageVolume`, `NodeUnstageVolume`, `NodePublishVolume`, `NodeUnpublishVolume` @@ -1802,7 +1879,7 @@ The following table shows what the Plugin SHOULD return when receiving a second | MULTI_NODE | OK (idempotent) | ALREADY_EXISTS | OK | OK | | Non MULTI_NODE | OK (idempotent) | ALREADY_EXISTS | FAILED_PRECONDITION | FAILED_PRECONDITION| -(`Tn`: target path of the n-th `NodePublishVolume`, `Pn`: other arguments of the n-th `NodePublishVolume` except `node_publish_secrets`) +(`Tn`: target path of the n-th `NodePublishVolume`, `Pn`: other arguments of the n-th `NodePublishVolume` except `secrets`) ```protobuf message NodePublishVolumeRequest { @@ -1814,9 +1891,9 @@ message NodePublishVolumeRequest { // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be // left unset if the corresponding Controller Plugin does not have // this capability. This is an OPTIONAL field. - map publish_info = 2; + map publish_context = 2; - // The path to which the device was mounted by `NodeStageVolume`. + // The path to which the volume was staged by `NodeStageVolume`. // It MUST be an absolute path in the root filesystem of the process // serving this request. // It MUST be set if the Node Plugin implements the @@ -1827,28 +1904,31 @@ message NodePublishVolumeRequest { // The path to which the volume will be published. It MUST be an // absolute path in the root filesystem of the process serving this // request. The CO SHALL ensure uniqueness of target_path per volume. - // The CO SHALL ensure that the path exists, and that the process - // serving the request has `read` and `write` permissions to the path. + // The CO SHALL ensure that the process serving the request has + // `read` and `write` permissions to the path, and is able to create + // files or directories at the path if it does not exist. // This is a REQUIRED field. string target_path = 4; - // The capability of the volume the CO expects the volume to have. + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. // This is a REQUIRED field. VolumeCapability volume_capability = 5; - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. + // Indicates SP MUST publish the volume in readonly mode. + // This field is REQUIRED. bool readonly = 6; // Secrets required by plugin to complete node publish volume request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - map node_publish_secrets = 7; + map secrets = 7 [(csi_secret) = true]; - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the Volume identified by - // `volume_id`. - map volume_attributes = 8; + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + map volume_context = 8; } message NodePublishVolumeResponse { @@ -1866,7 +1946,6 @@ The CO MUST implement the specified error recovery behavior when it encounters t |-----------|-----------|-------------|-------------------| | Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible and has not been deleted before retrying with exponential back off. | | Volume published but is incompatible | 6 ALREADY_EXISTS | Indicates that a volume corresponding to the specified `volume_id` has already been published at the specified `target_path` but is incompatible with the specified `volume_capability` or `readonly` flag. | Caller MUST fix the arguments before retying. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | | Exceeds capabilities | 9 FAILED_PRECONDITION | Indicates that the CO has exceeded the volume's capabilities because the volume does not have MULTI_NODE capability. | Caller MAY choose to call `ValidateVolumeCapabilities` to validate the volume capabilities, or wait for the volume to be unpublished on the node. | | Staging target path not set | 9 FAILED_PRECONDITION | Indicates that `STAGE_UNSTAGE_VOLUME` capability is set but no `staging_target_path` was set. | Caller MUST make sure call to `NodeStageVolume` is made and returns success before retrying with valid `staging_target_path`. | @@ -1910,41 +1989,68 @@ The CO MUST implement the specified error recovery behavior when it encounters t | Condition | gRPC Code | Description | Recovery Behavior | |-----------|-----------|-------------|-------------------| | Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible and has not been deleted before retrying with exponential back off. | -| Operation pending for volume | 10 ABORTED | Indicates that there is a already an operation pending for the specified volume. In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call "in-flight" per volume at a given time. However, in some circumstances, the CO MAY lose state (for example when the CO crashes and restarts), and MAY issue multiple calls simultaneously for the same volume. The Plugin, SHOULD handle this as gracefully as possible, and MAY return this error code to reject secondary calls. | Caller SHOULD ensure that there are no other calls pending for the specified volume, and then retry with exponential back off. | -#### `NodeGetId` +#### `NodeGetVolumeStats` -`NodeGetId` RPC call is deprecated. -Users of this RPC call SHOULD use `NodeGetInfo`. +A Node plugin MUST implement this RPC call if it has GET_VOLUME_STATS node capability. +`NodeGetVolumeStats` RPC call returns the volume capacity statistics available for the volume. + +If the volume is being used in `BlockVolume` mode then `used` and `available` MAY be omitted from `usage` field of `NodeGetVolumeStatsResponse`. +Similarly, inode information MAY be omitted from `NodeGetVolumeStatsResponse` when unavailable. -A Node Plugin MUST implement this RPC call if the plugin has `PUBLISH_UNPUBLISH_VOLUME` controller capability. -The Plugin SHALL assume that this RPC will be executed on the node where the volume will be used. -The CO SHOULD call this RPC for the node at which it wants to place the workload. -The result of this call will be used by CO in `ControllerPublishVolume`. ```protobuf -message NodeGetIdRequest { - // Intentionally empty. -} +message NodeGetVolumeStatsRequest { + // The ID of the volume. This field is REQUIRED. + string volume_id = 1; -message NodeGetIdResponse { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent `ControllerPublishVolume`. + // It can be any valid path where volume was previously + // staged or published. + // It MUST be an absolute path in the root filesystem of + // the process serving this request. // This is a REQUIRED field. - string node_id = 1; + string volume_path = 2; +} + +message NodeGetVolumeStatsResponse { + // This field is OPTIONAL. + repeated VolumeUsage usage = 1; +} + +message VolumeUsage { + enum Unit { + UNKNOWN = 0; + BYTES = 1; + INODES = 2; + } + // The available capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 available = 1; + + // The total capacity in specified Unit. This field is REQUIRED. + // The value of this field MUST NOT be negative. + int64 total = 2; + + // The used capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + int64 used = 3; + + // Units by which values are measured. This field is REQUIRED. + Unit unit = 4; } ``` -##### NodeGetId Errors +##### NodeGetVolumeStats Errors -If the plugin is unable to complete the NodeGetId call successfully, it MUST return a non-ok gRPC code in the gRPC status. +If the plugin is unable to complete the `NodeGetVolumeStats` call successfully, it MUST return a non-ok gRPC code in the gRPC status. If the conditions defined below are encountered, the plugin MUST return the specified gRPC error code. The CO MUST implement the specified error recovery behavior when it encounters the gRPC error code. -Condition | gRPC Code | Description | Recovery Behavior -| --- | --- | --- | --- | -| Call not implemented | 12 UNIMPLEMENTED | NodeGetId call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | + +| Condition | gRPC Code | Description | Recovery Behavior | +|-----------|-----------|-------------|-------------------| +| Volume does not exist | 5 NOT_FOUND | Indicates that a volume corresponding to the specified `volume_id` does not exist on specified `volume_path`. | Caller MUST verify that the `volume_id` is correct and that the volume is accessible on specified `volume_path` and has not been deleted before retrying with exponential back off. | #### `NodeGetCapabilities` @@ -1968,6 +2074,10 @@ message NodeServiceCapability { enum Type { UNKNOWN = 0; STAGE_UNSTAGE_VOLUME = 1; + // If Plugin implements GET_VOLUME_STATS capability + // then it MUST implement NodeGetVolumeStats RPC + // call for fetching volume statistics. + GET_VOLUME_STATS = 2; } Type type = 1; @@ -1990,6 +2100,8 @@ If the plugin is unable to complete the NodeGetCapabilities call successfully, i A Node Plugin MUST implement this RPC call if the plugin has `PUBLISH_UNPUBLISH_VOLUME` controller capability. The Plugin SHALL assume that this RPC will be executed on the node where the volume will be used. The CO SHOULD call this RPC for the node at which it wants to place the workload. +The CO MAY call this RPC more than once for a given node. +The SP SHALL NOT expect the CO to call this RPC more than once. The result of this call will be used by CO in `ControllerPublishVolume`. ```protobuf @@ -1997,9 +2109,14 @@ message NodeGetInfoRequest { } message NodeGetInfoResponse { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent calls to `ControllerPublishVolume`. - // This is a REQUIRED field. + // The identifier of the node as understood by the SP. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific node vs all other nodes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls, including + // `ControllerPublishVolume`, to refer to this node. + // The SP is NOT responsible for global uniqueness of node_id across + // multiple SPs. string node_id = 1; // Maximum number of volumes that controller can publish to the node. @@ -2012,7 +2129,7 @@ message NodeGetInfoResponse { // Specifies where (regions, zones, racks, etc.) the node is // accessible from. // A plugin that returns this field MUST also set the - // ACCESSIBILITY_CONSTRAINTS plugin capability. + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. // COs MAY use this information along with the topology information // returned in CreateVolumeResponse to ensure that a given volume is // accessible from a given node when scheduling workloads. @@ -2033,14 +2150,8 @@ message NodeGetInfoResponse { ##### NodeGetInfo Errors If the plugin is unable to complete the NodeGetInfo call successfully, it MUST return a non-ok gRPC code in the gRPC status. -If the conditions defined below are encountered, the plugin MUST return the specified gRPC error code. The CO MUST implement the specified error recovery behavior when it encounters the gRPC error code. -Condition | gRPC Code | Description | Recovery Behavior -| --- | --- | --- | --- | -| Call not implemented | 12 UNIMPLEMENTED | NodeGetInfo call is not implemented by the plugin or disabled in the Plugin's current mode of operation. | Caller MUST NOT retry. Caller MAY call `ControllerGetCapabilities` or `NodeGetCapabilities` to discover Plugin capabilities. | - - ## Protocol ### Connectivity @@ -2051,7 +2162,7 @@ Condition | gRPC Code | Description | Recovery Behavior Support for OPTIONAL RPCs is reported by the `ControllerGetCapabilities` and `NodeGetCapabilities` RPC calls. * The CO SHALL provide the listen-address for the Plugin by way of the `CSI_ENDPOINT` environment variable. Plugin components SHALL create, bind, and listen for RPCs on the specified listen address. - * Only UNIX Domain Sockets may be used as endpoints. + * Only UNIX Domain Sockets MAY be used as endpoints. This will likely change in a future version of this specification to support non-UNIX platforms. * All supported RPC services MUST be available at the listen address of the Plugin. @@ -2060,7 +2171,7 @@ Condition | gRPC Code | Description | Recovery Behavior * The CO operator and Plugin Supervisor SHOULD take steps to ensure that any and all communication between the CO and Plugin Service are secured according to best practices. * Communication between a CO and a Plugin SHALL be transported over UNIX Domain Sockets. * gRPC is compatible with UNIX Domain Sockets; it is the responsibility of the CO operator and Plugin Supervisor to properly secure access to the Domain Socket using OS filesystem ACLs and/or other OS-specific security context tooling. - * SP’s supplying stand-alone Plugin controller appliances, or other remote components that are incompatible with UNIX Domain Sockets must provide a software component that proxies communication between a UNIX Domain Socket and the remote component(s). + * SP’s supplying stand-alone Plugin controller appliances, or other remote components that are incompatible with UNIX Domain Sockets MUST provide a software component that proxies communication between a UNIX Domain Socket and the remote component(s). Proxy components transporting communication over IP networks SHALL be responsible for securing communications over such networks. * Both the CO and Plugin SHOULD avoid accidental leakage of sensitive information (such as redacting such information from log files). @@ -2105,8 +2216,8 @@ Condition | gRPC Code | Description | Recovery Behavior * Variables defined by this specification SHALL be identifiable by their `CSI_` name prefix. * Configuration properties not defined by the CSI specification SHALL NOT use the same `CSI_` name prefix; this prefix is reserved for common configuration properties defined by the CSI specification. -* The Plugin Supervisor SHOULD supply all recommended CSI environment variables to a Plugin. -* The Plugin Supervisor SHALL supply all required CSI environment variables to a Plugin. +* The Plugin Supervisor SHOULD supply all RECOMMENDED CSI environment variables to a Plugin. +* The Plugin Supervisor SHALL supply all REQUIRED CSI environment variables to a Plugin. ##### `CSI_ENDPOINT` @@ -2141,8 +2252,8 @@ Supervised plugins MAY be isolated and/or resource-bounded. ##### Available Services * Plugin Packages MAY support all or a subset of CSI services; service combinations MAY be configurable at runtime by the Plugin Supervisor. - * A plugin must know the "mode" in which it is operating (e.g. node, controller, or both). - * This specification does not dictate the mechanism by which mode of operation must be discovered, and instead places that burden upon the SP. + * A plugin MUST know the "mode" in which it is operating (e.g. node, controller, or both). + * This specification does not dictate the mechanism by which mode of operation MUST be discovered, and instead places that burden upon the SP. * Misconfigured plugin software SHOULD fail-fast with an OS-appropriate error code. ##### Linux Capabilities @@ -2158,7 +2269,7 @@ Supervised plugins MAY be isolated and/or resource-bounded. ##### Cgroup Isolation * A Plugin MAY be constrained by cgroups. -* An operator or Plugin Supervisor MAY configure the devices cgroup subsystem to ensure that a Plugin may access requisite devices. +* An operator or Plugin Supervisor MAY configure the devices cgroup subsystem to ensure that a Plugin MAY access requisite devices. * A Plugin Supervisor MAY define resource limits for a Plugin. ##### Resource Requirements diff --git a/vendor/github.com/kubernetes-csi/csi-test/.gitignore b/vendor/github.com/kubernetes-csi/csi-test/.gitignore index 984ec0fb..fc211aec 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/.gitignore +++ b/vendor/github.com/kubernetes-csi/csi-test/.gitignore @@ -9,5 +9,8 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out -bin/mock + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ +vendor/ cmd/csi-sanity/csi-sanity diff --git a/vendor/github.com/kubernetes-csi/csi-test/.travis.yml b/vendor/github.com/kubernetes-csi/csi-test/.travis.yml index 261662d3..349982d2 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/.travis.yml +++ b/vendor/github.com/kubernetes-csi/csi-test/.travis.yml @@ -1,11 +1,12 @@ language: go +install: + - curl https://glide.sh/get | sh + - glide install -v matrix: include: - - go: 1.10.3 + - go: 1.9.2 script: -- make test -after_success: - - if [ "${TRAVIS_BRANCH}" == "master" ] && [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then - docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" quay.io; - make push; - fi +- go fmt $(go list ./... | grep -v vendor) | wc -l | grep 0 +- go vet $(go list ./... | grep -v vendor) +- go test $(go list ./... | grep -v vendor | grep -v "cmd/csi-sanity") +- ./hack/e2e.sh diff --git a/vendor/github.com/kubernetes-csi/csi-test/Dockerfile.mock b/vendor/github.com/kubernetes-csi/csi-test/Dockerfile.mock deleted file mode 100644 index 72697712..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/Dockerfile.mock +++ /dev/null @@ -1,6 +0,0 @@ -FROM alpine -LABEL maintainers="Kubernetes Authors" -LABEL description="CSI Mock Driver" - -COPY ./bin/mock mock -ENTRYPOINT ["/mock"] diff --git a/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock deleted file mode 100644 index 2737ba71..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock +++ /dev/null @@ -1,195 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/container-storage-interface/spec" - packages = ["lib/go/csi/v0"] - revision = "2178fdeea87f1150a17a63252eee28d4d8141f72" - version = "v0.3.0" - -[[projects]] - name = "github.com/golang/mock" - packages = ["gomock"] - revision = "c34cdb4725f4c3844d095133c6e40e448b86589b" - version = "v1.1.1" - -[[projects]] - name = "github.com/golang/protobuf" - packages = [ - "proto", - "protoc-gen-go/descriptor", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - "ptypes/wrappers" - ] - revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" - version = "v1.1.0" - -[[projects]] - name = "github.com/onsi/ginkgo" - packages = [ - ".", - "config", - "internal/codelocation", - "internal/containernode", - "internal/failer", - "internal/leafnodes", - "internal/remote", - "internal/spec", - "internal/spec_iterator", - "internal/specrunner", - "internal/suite", - "internal/testingtproxy", - "internal/writer", - "reporters", - "reporters/stenographer", - "reporters/stenographer/support/go-colorable", - "reporters/stenographer/support/go-isatty", - "types" - ] - revision = "fa5fabab2a1bfbd924faf4c067d07ae414e2aedf" - version = "v1.5.0" - -[[projects]] - name = "github.com/onsi/gomega" - packages = [ - ".", - "format", - "internal/assertion", - "internal/asyncassertion", - "internal/oraclematcher", - "internal/testingtsupport", - "matchers", - "matchers/support/goraph/bipartitegraph", - "matchers/support/goraph/edge", - "matchers/support/goraph/node", - "matchers/support/goraph/util", - "types" - ] - revision = "62bff4df71bdbc266561a0caee19f0594b17c240" - version = "v1.4.0" - -[[projects]] - name = "github.com/sirupsen/logrus" - packages = ["."] - revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" - version = "v1.0.5" - -[[projects]] - branch = "master" - name = "golang.org/x/crypto" - packages = ["ssh/terminal"] - revision = "8ac0e0d97ce45cd83d1d7243c060cb8461dda5e9" - -[[projects]] - branch = "master" - name = "golang.org/x/net" - packages = [ - "context", - "html", - "html/atom", - "html/charset", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "trace" - ] - revision = "1e491301e022f8f977054da4c2d852decd59571f" - -[[projects]] - branch = "master" - name = "golang.org/x/sys" - packages = [ - "unix", - "windows" - ] - revision = "9527bec2660bd847c050fda93a0f0c6dee0800bb" - -[[projects]] - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "encoding", - "encoding/charmap", - "encoding/htmlindex", - "encoding/internal", - "encoding/internal/identifier", - "encoding/japanese", - "encoding/korean", - "encoding/simplifiedchinese", - "encoding/traditionalchinese", - "encoding/unicode", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "internal/utf8internal", - "language", - "runes", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable" - ] - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - revision = "32ee49c4dd805befd833990acba36cb75042378c" - -[[projects]] - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "balancer/base", - "balancer/roundrobin", - "channelz", - "codes", - "connectivity", - "credentials", - "encoding", - "encoding/proto", - "grpclb/grpc_lb_v1/messages", - "grpclog", - "internal", - "keepalive", - "metadata", - "naming", - "peer", - "reflection", - "reflection/grpc_reflection_v1alpha", - "resolver", - "resolver/dns", - "resolver/passthrough", - "stats", - "status", - "tap", - "transport" - ] - revision = "7a6a684ca69eb4cae85ad0a484f2e531598c047b" - version = "v1.12.2" - -[[projects]] - name = "gopkg.in/yaml.v2" - packages = ["."] - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "5dd480018adbb94025564b74bad8dd269cc516183b7b428317f6dd04b07726f4" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml deleted file mode 100644 index e7312785..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml +++ /dev/null @@ -1,62 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - name = "github.com/container-storage-interface/spec" - version = "~0.3.0" - -[[constraint]] - name = "github.com/golang/mock" - version = "1.0.0" - -[[constraint]] - name = "github.com/golang/protobuf" - version = "v1.1.0" - -[[constraint]] - name = "github.com/onsi/ginkgo" - version = "1.4.0" - -[[constraint]] - name = "github.com/onsi/gomega" - version = "1.3.0" - -[[constraint]] - branch = "master" - name = "golang.org/x/net" - -[[constraint]] - name = "google.golang.org/grpc" - version = "1.9.2" - -[[constraint]] - name = "gopkg.in/yaml.v2" - version = "v2.1.1" - -[prune] - go-tests = true - unused-packages = true diff --git a/vendor/github.com/kubernetes-csi/csi-test/Makefile b/vendor/github.com/kubernetes-csi/csi-test/Makefile deleted file mode 100644 index 621cec71..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/Makefile +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2018 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -IMAGE_NAME = quay.io/k8scsi/mock-driver -IMAGE_VERSION = canary -APP := ./bin/mock - - -ifdef V -TESTARGS = -v -args -alsologtostderr -v 5 -else -TESTARGS = -endif - -all: $(APP) - -$(APP): - mkdir -p bin - CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o $(APP) ./mock/main.go - -clean: - rm -rf bin - -container: $(APP) - docker build -f Dockerfile.mock -t $(IMAGE_NAME):$(IMAGE_VERSION) . - -push: container - docker push $(IMAGE_NAME):$(IMAGE_VERSION) - -test: - files=$$(find ./ -name '*.go' | grep -v '^./vendor' ); \ - if [ $$(gofmt -d $$files | wc -l) -ne 0 ]; then \ - echo "formatting errors:"; \ - gofmt -d $$files; \ - false; \ - fi - go vet $$(go list ./... | grep -v vendor) - go test $$(go list ./... | grep -v vendor | grep -v "cmd/csi-sanity") - ./hack/e2e.sh - -.PHONY: all clean container push test diff --git a/vendor/github.com/kubernetes-csi/csi-test/OWNERS b/vendor/github.com/kubernetes-csi/csi-test/OWNERS deleted file mode 100644 index a780cce6..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -approvers: -- saad-ali -- lpabon -- pohly diff --git a/vendor/github.com/kubernetes-csi/csi-test/README.md b/vendor/github.com/kubernetes-csi/csi-test/README.md index 61daecc0..e21d8399 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/README.md +++ b/vendor/github.com/kubernetes-csi/csi-test/README.md @@ -1,29 +1,15 @@ [![Build Status](https://travis-ci.org/kubernetes-csi/csi-test.svg?branch=master)](https://travis-ci.org/kubernetes-csi/csi-test) -[![Docker Repository on Quay](https://quay.io/repository/k8scsi/mock-driver/status "Docker Repository on -Quay")](https://quay.io/repository/k8scsi/mock-driver) - # csi-test csi-test houses packages and libraries to help test CSI client and plugins. -## For Container Orchestration Tests +## For Container Orchestration Unit Tests CO developers can use this framework to create drivers based on the [Golang mock](https://github.com/golang/mock) framework. Please see [co_test.go](test/co_test.go) for an example. -### Mock driver for testing -We also provide a container called `quay.io/k8scsi/mock-driver:canary` which can be used as an in-memory mock driver. -It follows the same release cycle as other containers, so the latest release is `quay.io/k8scsi/mock-driver:v0.3.0`. - -You will need to setup the environment variable `CSI_ENDPOINT` for the mock driver to know where to create the unix -domain socket. - -## For CSI Driver Tests -To test drivers please take a look at [pkg/sanity](https://github.com/kubernetes-csi/csi-test/tree/master/pkg/sanity). -This package and [csi-sanity](https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity) are meant to test -the CSI API capability of a driver. They are meant to be an additional test to the unit, functional, and e2e tests of a -CSI driver. +## For CSI Driver Unit Tests +To test drivers please take a look at [pkg/sanity](https://github.com/kubernetes-csi/csi-test/tree/master/pkg/sanity) ### Note -* Master is for CSI v0.4.0. Please see the branches for other CSI releases. * Only Golang 1.9+ supported. See [gRPC issue](https://github.com/grpc/grpc-go/issues/711#issuecomment-326626790) diff --git a/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS b/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS deleted file mode 100644 index 00e28e4e..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS +++ /dev/null @@ -1,14 +0,0 @@ -# Defined below are the security contacts for this repo. -# -# They are the contact point for the Product Security Team to reach out -# to for triaging and handling of incoming issues. -# -# The below names agree to abide by the -# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) -# and will be removed and replaced if they violate that agreement. -# -# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE -# INSTRUCTIONS AT https://kubernetes.io/security/ - -saad-ali -lpabon diff --git a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/Makefile b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/Makefile index 520c2153..b0ecbeac 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/Makefile +++ b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/Makefile @@ -1,6 +1,5 @@ APP_NAME := csi-sanity VER :=$(shell git describe) -RELEASEVER := $(shell git describe --abbrev=0) BRANCH := $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) SHA := $(shell git rev-parse --short HEAD) ARCH := $(shell go env GOARCH) @@ -18,7 +17,7 @@ endif endif LDFLAGS :=-ldflags "-w -X github.com/kubernetes-csi/csi-test/cmd/csi-sanity.VERSION=$(VERSION) -extldflags '-z relro -z now'" -PACKAGE :=$(DIR)/dist/$(APP_NAME)-$(RELEASEVER).$(GOOS).$(ARCH).tar.gz +PACKAGE :=$(DIR)/dist/$(APP_NAME)-$(VERSION).$(GOOS).$(ARCH).tar.gz all: $(APP_NAME) @@ -30,11 +29,9 @@ install: $(APP_NAME) clean: rm -f csi-sanity - -dist-clean: rm -rf $(DIR)/dist -dist: clean $(PACKAGE) +dist: $(PACKAGE) $(PACKAGE): $(APP_NAME) @echo Packaging Binaries... @@ -49,13 +46,16 @@ $(PACKAGE): $(APP_NAME) linux_amd64_dist: GOOS=linux GOARCH=amd64 $(MAKE) dist +linux_arm_dist: + GOOS=linux GOARCH=arm $(MAKE) dist + linux_arm64_dist: GOOS=linux GOARCH=arm64 $(MAKE) dist darwin_amd64_dist: GOOS=darwin GOARCH=amd64 $(MAKE) dist -release: dist-clean darwin_amd64_dist linux_amd64_dist linux_arm64_dist +release: darwin_amd64_dist linux_arm_dist linux_amd64_dist linux_arm64_dist .PHONY: release darwin_amd64_dist linux_arm64_dist linux_amd64_dist \ - linux_arm_dist linux_amd64_dist clean dist-clean + linux_arm_dist linux_amd64_dist clean diff --git a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/README.md b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/README.md index dade1018..36c282ad 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/README.md +++ b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/README.md @@ -7,42 +7,12 @@ Example: $ csi-sanity --csi.endpoint= ``` -If you want to specify a mount point: - -``` -$ csi-sanity --csi.endpoint= --csi.mountpoint=/mnt -``` - For verbose type: ``` $ csi-sanity --ginkgo.v --csi.endpoint= ``` -For csi-credentials, create a secrets file with all the secrets in it: -```yaml -CreateVolumeSecret: - secretKey: secretval1 -DeleteVolumeSecret: - secretKey: secretval2 -ControllerPublishVolumeSecret: - secretKey: secretval3 -ControllerUnpublishVolumeSecret: - secretKey: secretval4 -NodeStageVolumeSecret: - secretKey: secretval5 -NodePublishVolumeSecret: - secretKey: secretval6 -``` - -Pass the file path to csi-sanity as: -``` -$ csi-sanity --csi.endpoint= --csi.secrets= -``` - -Replace the keys and values of the credentials appropriately. Since the whole -secret is passed in the request, multiple key-val pairs can be used. - ### Help The full Ginkgo and golang unit test parameters are available. Type diff --git a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go index a4f4707a..88793f96 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go +++ b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go @@ -18,7 +18,6 @@ package sanity import ( "flag" "fmt" - "os" "testing" "github.com/kubernetes-csi/csi-test/pkg/sanity" @@ -29,18 +28,14 @@ const ( ) var ( - VERSION = "(dev)" - version bool - config sanity.Config + VERSION = "(dev)" + endpoint string + version bool ) func init() { - flag.StringVar(&config.Address, prefix+"endpoint", "", "CSI endpoint") + flag.StringVar(&endpoint, prefix+"endpoint", "", "CSI endpoint") flag.BoolVar(&version, prefix+"version", false, "Version of this program") - flag.StringVar(&config.TargetPath, prefix+"mountdir", os.TempDir()+"/csi", "Mount point for NodePublish") - flag.StringVar(&config.StagingPath, prefix+"stagingdir", os.TempDir()+"/csi", "Mount point for NodeStage if staging is supported") - flag.StringVar(&config.SecretsFile, prefix+"secrets", "", "CSI secrets file") - flag.Int64Var(&config.TestVolumeSize, prefix+"testvolumesize", sanity.DefTestVolumeSize, "Base volume size used for provisioned volumes") flag.Parse() } @@ -49,8 +44,8 @@ func TestSanity(t *testing.T) { fmt.Printf("Version = %s\n", VERSION) return } - if len(config.Address) == 0 { + if len(endpoint) == 0 { t.Fatalf("--%sendpoint must be provided with an CSI endpoint", prefix) } - sanity.Test(t, &config) + sanity.Test(t, endpoint) } diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go index a8cd796f..94145df2 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go @@ -14,234 +14,127 @@ See the License for the specific language governing permissions and limitations under the License. */ -//go:generate mockgen -package=driver -destination=driver.mock.go github.com/container-storage-interface/spec/lib/go/csi/v0 IdentityServer,ControllerServer,NodeServer +//go:generate mockgen -package=driver -destination=driver.mock.go github.com/container-storage-interface/spec/lib/go/csi IdentityServer,ControllerServer,NodeServer package driver import ( - "context" - "errors" "net" "sync" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/kubernetes-csi/csi-test/utils" "google.golang.org/grpc" "google.golang.org/grpc/reflection" ) -var ( - // ErrNoCredentials is the error when a secret is enabled but not passed in the request. - ErrNoCredentials = errors.New("secret must be provided") - // ErrAuthFailed is the error when the secret is incorrect. - ErrAuthFailed = errors.New("authentication failed") -) - -type CSIDriverServers struct { - Controller csi.ControllerServer - Identity csi.IdentityServer - Node csi.NodeServer -} - -// This is the key name in all the CSI secret objects. -const secretField = "secretKey" - -// CSICreds is a driver specific secret type. Drivers can have a key-val pair of -// secrets. This mock driver has a single string secret with secretField as the -// key. -type CSICreds struct { - CreateVolumeSecret string - DeleteVolumeSecret string - ControllerPublishVolumeSecret string - ControllerUnpublishVolumeSecret string - NodeStageVolumeSecret string - NodePublishVolumeSecret string - CreateSnapshotSecret string - DeleteSnapshotSecret string +type MockCSIDriverServers struct { + Controller *MockControllerServer + Identity *MockIdentityServer + Node *MockNodeServer } -type CSIDriver struct { +type MockCSIDriver struct { listener net.Listener server *grpc.Server - servers *CSIDriverServers + conn *grpc.ClientConn + servers *MockCSIDriverServers wg sync.WaitGroup running bool lock sync.Mutex - creds *CSICreds } -func NewCSIDriver(servers *CSIDriverServers) *CSIDriver { - return &CSIDriver{ +func NewMockCSIDriver(servers *MockCSIDriverServers) *MockCSIDriver { + return &MockCSIDriver{ servers: servers, } } -func (c *CSIDriver) goServe(started chan<- bool) { - c.wg.Add(1) +func (m *MockCSIDriver) goServe(started chan<- bool) { + m.wg.Add(1) go func() { - defer c.wg.Done() + defer m.wg.Done() started <- true - err := c.server.Serve(c.listener) + err := m.server.Serve(m.listener) if err != nil { panic(err.Error()) } }() } -func (c *CSIDriver) Address() string { - return c.listener.Addr().String() +func (m *MockCSIDriver) Address() string { + return m.listener.Addr().String() } -func (c *CSIDriver) Start(l net.Listener) error { - c.lock.Lock() - defer c.lock.Unlock() +func (m *MockCSIDriver) Start() error { + m.lock.Lock() + defer m.lock.Unlock() - // Set listener - c.listener = l + // Listen on a port assigned by the net package + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return err + } + m.listener = l // Create a new grpc server - c.server = grpc.NewServer( - grpc.UnaryInterceptor(c.authInterceptor), - ) + m.server = grpc.NewServer() // Register Mock servers - if c.servers.Controller != nil { - csi.RegisterControllerServer(c.server, c.servers.Controller) + if m.servers.Controller != nil { + csi.RegisterControllerServer(m.server, m.servers.Controller) } - if c.servers.Identity != nil { - csi.RegisterIdentityServer(c.server, c.servers.Identity) + if m.servers.Identity != nil { + csi.RegisterIdentityServer(m.server, m.servers.Identity) } - if c.servers.Node != nil { - csi.RegisterNodeServer(c.server, c.servers.Node) + if m.servers.Node != nil { + csi.RegisterNodeServer(m.server, m.servers.Node) } - reflection.Register(c.server) + reflection.Register(m.server) // Start listening for requests waitForServer := make(chan bool) - c.goServe(waitForServer) + m.goServe(waitForServer) <-waitForServer - c.running = true + m.running = true return nil } -func (c *CSIDriver) Stop() { - c.lock.Lock() - defer c.lock.Unlock() - - if !c.running { - return +func (m *MockCSIDriver) Nexus() (*grpc.ClientConn, error) { + // Start server + err := m.Start() + if err != nil { + return nil, err } - c.server.Stop() - c.wg.Wait() -} - -func (c *CSIDriver) Close() { - c.server.Stop() -} - -func (c *CSIDriver) IsRunning() bool { - c.lock.Lock() - defer c.lock.Unlock() - - return c.running -} - -// SetDefaultCreds sets the default secrets for CSI creds. -func (c *CSIDriver) SetDefaultCreds() { - c.creds = &CSICreds{ - CreateVolumeSecret: "secretval1", - DeleteVolumeSecret: "secretval2", - ControllerPublishVolumeSecret: "secretval3", - ControllerUnpublishVolumeSecret: "secretval4", - NodeStageVolumeSecret: "secretval5", - NodePublishVolumeSecret: "secretval6", - CreateSnapshotSecret: "secretval7", - DeleteSnapshotSecret: "secretval8", - } -} - -func (c *CSIDriver) authInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - if c.creds != nil { - authenticated, authErr := isAuthenticated(req, c.creds) - if !authenticated { - if authErr == ErrNoCredentials { - return nil, status.Error(codes.InvalidArgument, authErr.Error()) - } - if authErr == ErrAuthFailed { - return nil, status.Error(codes.Unauthenticated, authErr.Error()) - } - } + // Create a client connection + m.conn, err = utils.Connect(m.Address()) + if err != nil { + return nil, err } - h, err := handler(ctx, req) - - return h, err -} - -func isAuthenticated(req interface{}, creds *CSICreds) (bool, error) { - switch r := req.(type) { - case *csi.CreateVolumeRequest: - return authenticateCreateVolume(r, creds) - case *csi.DeleteVolumeRequest: - return authenticateDeleteVolume(r, creds) - case *csi.ControllerPublishVolumeRequest: - return authenticateControllerPublishVolume(r, creds) - case *csi.ControllerUnpublishVolumeRequest: - return authenticateControllerUnpublishVolume(r, creds) - case *csi.NodeStageVolumeRequest: - return authenticateNodeStageVolume(r, creds) - case *csi.NodePublishVolumeRequest: - return authenticateNodePublishVolume(r, creds) - case *csi.CreateSnapshotRequest: - return authenticateCreateSnapshot(r, creds) - case *csi.DeleteSnapshotRequest: - return authenticateDeleteSnapshot(r, creds) - default: - return true, nil - } -} - -func authenticateCreateVolume(req *csi.CreateVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetControllerCreateSecrets(), creds.CreateVolumeSecret) -} - -func authenticateDeleteVolume(req *csi.DeleteVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetControllerDeleteSecrets(), creds.DeleteVolumeSecret) -} - -func authenticateControllerPublishVolume(req *csi.ControllerPublishVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetControllerPublishSecrets(), creds.ControllerPublishVolumeSecret) + return m.conn, nil } -func authenticateControllerUnpublishVolume(req *csi.ControllerUnpublishVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetControllerUnpublishSecrets(), creds.ControllerUnpublishVolumeSecret) -} +func (m *MockCSIDriver) Stop() { + m.lock.Lock() + defer m.lock.Unlock() -func authenticateNodeStageVolume(req *csi.NodeStageVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetNodeStageSecrets(), creds.NodeStageVolumeSecret) -} - -func authenticateNodePublishVolume(req *csi.NodePublishVolumeRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetNodePublishSecrets(), creds.NodePublishVolumeSecret) -} + if !m.running { + return + } -func authenticateCreateSnapshot(req *csi.CreateSnapshotRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetCreateSnapshotSecrets(), creds.CreateSnapshotSecret) + m.server.Stop() + m.wg.Wait() } -func authenticateDeleteSnapshot(req *csi.DeleteSnapshotRequest, creds *CSICreds) (bool, error) { - return credsCheck(req.GetDeleteSnapshotSecrets(), creds.DeleteSnapshotSecret) +func (m *MockCSIDriver) Close() { + m.conn.Close() + m.server.Stop() } -func credsCheck(secrets map[string]string, secretVal string) (bool, error) { - if len(secrets) == 0 { - return false, ErrNoCredentials - } +func (m *MockCSIDriver) IsRunning() bool { + m.lock.Lock() + defer m.lock.Unlock() - if secrets[secretField] != secretVal { - return false, ErrAuthFailed - } - return true, nil + return m.running } diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go index f6d2b135..ed14e019 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go @@ -1,12 +1,12 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/container-storage-interface/spec/lib/go/csi/v0 (interfaces: IdentityServer,ControllerServer,NodeServer) +// Source: github.com/container-storage-interface/spec/lib/go/csi (interfaces: IdentityServer,ControllerServer,NodeServer) // Package driver is a generated GoMock package. package driver import ( context "context" - v0 "github.com/container-storage-interface/spec/lib/go/csi/v0" + csi "github.com/container-storage-interface/spec/lib/go/csi" gomock "github.com/golang/mock/gomock" reflect "reflect" ) @@ -34,23 +34,10 @@ func (m *MockIdentityServer) EXPECT() *MockIdentityServerMockRecorder { return m.recorder } -// GetPluginCapabilities mocks base method -func (m *MockIdentityServer) GetPluginCapabilities(arg0 context.Context, arg1 *v0.GetPluginCapabilitiesRequest) (*v0.GetPluginCapabilitiesResponse, error) { - ret := m.ctrl.Call(m, "GetPluginCapabilities", arg0, arg1) - ret0, _ := ret[0].(*v0.GetPluginCapabilitiesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPluginCapabilities indicates an expected call of GetPluginCapabilities -func (mr *MockIdentityServerMockRecorder) GetPluginCapabilities(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPluginCapabilities", reflect.TypeOf((*MockIdentityServer)(nil).GetPluginCapabilities), arg0, arg1) -} - // GetPluginInfo mocks base method -func (m *MockIdentityServer) GetPluginInfo(arg0 context.Context, arg1 *v0.GetPluginInfoRequest) (*v0.GetPluginInfoResponse, error) { +func (m *MockIdentityServer) GetPluginInfo(arg0 context.Context, arg1 *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { ret := m.ctrl.Call(m, "GetPluginInfo", arg0, arg1) - ret0, _ := ret[0].(*v0.GetPluginInfoResponse) + ret0, _ := ret[0].(*csi.GetPluginInfoResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -60,17 +47,17 @@ func (mr *MockIdentityServerMockRecorder) GetPluginInfo(arg0, arg1 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPluginInfo", reflect.TypeOf((*MockIdentityServer)(nil).GetPluginInfo), arg0, arg1) } -// Probe mocks base method -func (m *MockIdentityServer) Probe(arg0 context.Context, arg1 *v0.ProbeRequest) (*v0.ProbeResponse, error) { - ret := m.ctrl.Call(m, "Probe", arg0, arg1) - ret0, _ := ret[0].(*v0.ProbeResponse) +// GetSupportedVersions mocks base method +func (m *MockIdentityServer) GetSupportedVersions(arg0 context.Context, arg1 *csi.GetSupportedVersionsRequest) (*csi.GetSupportedVersionsResponse, error) { + ret := m.ctrl.Call(m, "GetSupportedVersions", arg0, arg1) + ret0, _ := ret[0].(*csi.GetSupportedVersionsResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// Probe indicates an expected call of Probe -func (mr *MockIdentityServerMockRecorder) Probe(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Probe", reflect.TypeOf((*MockIdentityServer)(nil).Probe), arg0, arg1) +// GetSupportedVersions indicates an expected call of GetSupportedVersions +func (mr *MockIdentityServerMockRecorder) GetSupportedVersions(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSupportedVersions", reflect.TypeOf((*MockIdentityServer)(nil).GetSupportedVersions), arg0, arg1) } // MockControllerServer is a mock of ControllerServer interface @@ -97,9 +84,9 @@ func (m *MockControllerServer) EXPECT() *MockControllerServerMockRecorder { } // ControllerGetCapabilities mocks base method -func (m *MockControllerServer) ControllerGetCapabilities(arg0 context.Context, arg1 *v0.ControllerGetCapabilitiesRequest) (*v0.ControllerGetCapabilitiesResponse, error) { +func (m *MockControllerServer) ControllerGetCapabilities(arg0 context.Context, arg1 *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { ret := m.ctrl.Call(m, "ControllerGetCapabilities", arg0, arg1) - ret0, _ := ret[0].(*v0.ControllerGetCapabilitiesResponse) + ret0, _ := ret[0].(*csi.ControllerGetCapabilitiesResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -109,10 +96,23 @@ func (mr *MockControllerServerMockRecorder) ControllerGetCapabilities(arg0, arg1 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerGetCapabilities", reflect.TypeOf((*MockControllerServer)(nil).ControllerGetCapabilities), arg0, arg1) } +// ControllerProbe mocks base method +func (m *MockControllerServer) ControllerProbe(arg0 context.Context, arg1 *csi.ControllerProbeRequest) (*csi.ControllerProbeResponse, error) { + ret := m.ctrl.Call(m, "ControllerProbe", arg0, arg1) + ret0, _ := ret[0].(*csi.ControllerProbeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ControllerProbe indicates an expected call of ControllerProbe +func (mr *MockControllerServerMockRecorder) ControllerProbe(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerProbe", reflect.TypeOf((*MockControllerServer)(nil).ControllerProbe), arg0, arg1) +} + // ControllerPublishVolume mocks base method -func (m *MockControllerServer) ControllerPublishVolume(arg0 context.Context, arg1 *v0.ControllerPublishVolumeRequest) (*v0.ControllerPublishVolumeResponse, error) { +func (m *MockControllerServer) ControllerPublishVolume(arg0 context.Context, arg1 *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { ret := m.ctrl.Call(m, "ControllerPublishVolume", arg0, arg1) - ret0, _ := ret[0].(*v0.ControllerPublishVolumeResponse) + ret0, _ := ret[0].(*csi.ControllerPublishVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -123,9 +123,9 @@ func (mr *MockControllerServerMockRecorder) ControllerPublishVolume(arg0, arg1 i } // ControllerUnpublishVolume mocks base method -func (m *MockControllerServer) ControllerUnpublishVolume(arg0 context.Context, arg1 *v0.ControllerUnpublishVolumeRequest) (*v0.ControllerUnpublishVolumeResponse, error) { +func (m *MockControllerServer) ControllerUnpublishVolume(arg0 context.Context, arg1 *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) { ret := m.ctrl.Call(m, "ControllerUnpublishVolume", arg0, arg1) - ret0, _ := ret[0].(*v0.ControllerUnpublishVolumeResponse) + ret0, _ := ret[0].(*csi.ControllerUnpublishVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -135,23 +135,10 @@ func (mr *MockControllerServerMockRecorder) ControllerUnpublishVolume(arg0, arg1 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerUnpublishVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerUnpublishVolume), arg0, arg1) } -// CreateSnapshot mocks base method -func (m *MockControllerServer) CreateSnapshot(arg0 context.Context, arg1 *v0.CreateSnapshotRequest) (*v0.CreateSnapshotResponse, error) { - ret := m.ctrl.Call(m, "CreateSnapshot", arg0, arg1) - ret0, _ := ret[0].(*v0.CreateSnapshotResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateSnapshot indicates an expected call of CreateSnapshot -func (mr *MockControllerServerMockRecorder) CreateSnapshot(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSnapshot", reflect.TypeOf((*MockControllerServer)(nil).CreateSnapshot), arg0, arg1) -} - // CreateVolume mocks base method -func (m *MockControllerServer) CreateVolume(arg0 context.Context, arg1 *v0.CreateVolumeRequest) (*v0.CreateVolumeResponse, error) { +func (m *MockControllerServer) CreateVolume(arg0 context.Context, arg1 *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { ret := m.ctrl.Call(m, "CreateVolume", arg0, arg1) - ret0, _ := ret[0].(*v0.CreateVolumeResponse) + ret0, _ := ret[0].(*csi.CreateVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -161,23 +148,10 @@ func (mr *MockControllerServerMockRecorder) CreateVolume(arg0, arg1 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVolume", reflect.TypeOf((*MockControllerServer)(nil).CreateVolume), arg0, arg1) } -// DeleteSnapshot mocks base method -func (m *MockControllerServer) DeleteSnapshot(arg0 context.Context, arg1 *v0.DeleteSnapshotRequest) (*v0.DeleteSnapshotResponse, error) { - ret := m.ctrl.Call(m, "DeleteSnapshot", arg0, arg1) - ret0, _ := ret[0].(*v0.DeleteSnapshotResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteSnapshot indicates an expected call of DeleteSnapshot -func (mr *MockControllerServerMockRecorder) DeleteSnapshot(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSnapshot", reflect.TypeOf((*MockControllerServer)(nil).DeleteSnapshot), arg0, arg1) -} - // DeleteVolume mocks base method -func (m *MockControllerServer) DeleteVolume(arg0 context.Context, arg1 *v0.DeleteVolumeRequest) (*v0.DeleteVolumeResponse, error) { +func (m *MockControllerServer) DeleteVolume(arg0 context.Context, arg1 *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { ret := m.ctrl.Call(m, "DeleteVolume", arg0, arg1) - ret0, _ := ret[0].(*v0.DeleteVolumeResponse) + ret0, _ := ret[0].(*csi.DeleteVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -188,9 +162,9 @@ func (mr *MockControllerServerMockRecorder) DeleteVolume(arg0, arg1 interface{}) } // GetCapacity mocks base method -func (m *MockControllerServer) GetCapacity(arg0 context.Context, arg1 *v0.GetCapacityRequest) (*v0.GetCapacityResponse, error) { +func (m *MockControllerServer) GetCapacity(arg0 context.Context, arg1 *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) { ret := m.ctrl.Call(m, "GetCapacity", arg0, arg1) - ret0, _ := ret[0].(*v0.GetCapacityResponse) + ret0, _ := ret[0].(*csi.GetCapacityResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -200,23 +174,10 @@ func (mr *MockControllerServerMockRecorder) GetCapacity(arg0, arg1 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCapacity", reflect.TypeOf((*MockControllerServer)(nil).GetCapacity), arg0, arg1) } -// ListSnapshots mocks base method -func (m *MockControllerServer) ListSnapshots(arg0 context.Context, arg1 *v0.ListSnapshotsRequest) (*v0.ListSnapshotsResponse, error) { - ret := m.ctrl.Call(m, "ListSnapshots", arg0, arg1) - ret0, _ := ret[0].(*v0.ListSnapshotsResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListSnapshots indicates an expected call of ListSnapshots -func (mr *MockControllerServerMockRecorder) ListSnapshots(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSnapshots", reflect.TypeOf((*MockControllerServer)(nil).ListSnapshots), arg0, arg1) -} - // ListVolumes mocks base method -func (m *MockControllerServer) ListVolumes(arg0 context.Context, arg1 *v0.ListVolumesRequest) (*v0.ListVolumesResponse, error) { +func (m *MockControllerServer) ListVolumes(arg0 context.Context, arg1 *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { ret := m.ctrl.Call(m, "ListVolumes", arg0, arg1) - ret0, _ := ret[0].(*v0.ListVolumesResponse) + ret0, _ := ret[0].(*csi.ListVolumesResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -227,9 +188,9 @@ func (mr *MockControllerServerMockRecorder) ListVolumes(arg0, arg1 interface{}) } // ValidateVolumeCapabilities mocks base method -func (m *MockControllerServer) ValidateVolumeCapabilities(arg0 context.Context, arg1 *v0.ValidateVolumeCapabilitiesRequest) (*v0.ValidateVolumeCapabilitiesResponse, error) { +func (m *MockControllerServer) ValidateVolumeCapabilities(arg0 context.Context, arg1 *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { ret := m.ctrl.Call(m, "ValidateVolumeCapabilities", arg0, arg1) - ret0, _ := ret[0].(*v0.ValidateVolumeCapabilitiesResponse) + ret0, _ := ret[0].(*csi.ValidateVolumeCapabilitiesResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -262,49 +223,49 @@ func (m *MockNodeServer) EXPECT() *MockNodeServerMockRecorder { return m.recorder } -// NodeGetCapabilities mocks base method -func (m *MockNodeServer) NodeGetCapabilities(arg0 context.Context, arg1 *v0.NodeGetCapabilitiesRequest) (*v0.NodeGetCapabilitiesResponse, error) { - ret := m.ctrl.Call(m, "NodeGetCapabilities", arg0, arg1) - ret0, _ := ret[0].(*v0.NodeGetCapabilitiesResponse) +// GetNodeID mocks base method +func (m *MockNodeServer) GetNodeID(arg0 context.Context, arg1 *csi.GetNodeIDRequest) (*csi.GetNodeIDResponse, error) { + ret := m.ctrl.Call(m, "GetNodeID", arg0, arg1) + ret0, _ := ret[0].(*csi.GetNodeIDResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// NodeGetCapabilities indicates an expected call of NodeGetCapabilities -func (mr *MockNodeServerMockRecorder) NodeGetCapabilities(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetCapabilities", reflect.TypeOf((*MockNodeServer)(nil).NodeGetCapabilities), arg0, arg1) +// GetNodeID indicates an expected call of GetNodeID +func (mr *MockNodeServerMockRecorder) GetNodeID(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeID", reflect.TypeOf((*MockNodeServer)(nil).GetNodeID), arg0, arg1) } -// NodeGetId mocks base method -func (m *MockNodeServer) NodeGetId(arg0 context.Context, arg1 *v0.NodeGetIdRequest) (*v0.NodeGetIdResponse, error) { - ret := m.ctrl.Call(m, "NodeGetId", arg0, arg1) - ret0, _ := ret[0].(*v0.NodeGetIdResponse) +// NodeGetCapabilities mocks base method +func (m *MockNodeServer) NodeGetCapabilities(arg0 context.Context, arg1 *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "NodeGetCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetCapabilitiesResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// NodeGetId indicates an expected call of NodeGetId -func (mr *MockNodeServerMockRecorder) NodeGetId(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetId", reflect.TypeOf((*MockNodeServer)(nil).NodeGetId), arg0, arg1) +// NodeGetCapabilities indicates an expected call of NodeGetCapabilities +func (mr *MockNodeServerMockRecorder) NodeGetCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetCapabilities", reflect.TypeOf((*MockNodeServer)(nil).NodeGetCapabilities), arg0, arg1) } -// NodeGetInfo mocks base method -func (m *MockNodeServer) NodeGetInfo(arg0 context.Context, arg1 *v0.NodeGetInfoRequest) (*v0.NodeGetInfoResponse, error) { - ret := m.ctrl.Call(m, "NodeGetInfo", arg0, arg1) - ret0, _ := ret[0].(*v0.NodeGetInfoResponse) +// NodeProbe mocks base method +func (m *MockNodeServer) NodeProbe(arg0 context.Context, arg1 *csi.NodeProbeRequest) (*csi.NodeProbeResponse, error) { + ret := m.ctrl.Call(m, "NodeProbe", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeProbeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// NodeGetInfo indicates an expected call of NodeGetInfo -func (mr *MockNodeServerMockRecorder) NodeGetInfo(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetInfo", reflect.TypeOf((*MockNodeServer)(nil).NodeGetInfo), arg0, arg1) +// NodeProbe indicates an expected call of NodeProbe +func (mr *MockNodeServerMockRecorder) NodeProbe(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeProbe", reflect.TypeOf((*MockNodeServer)(nil).NodeProbe), arg0, arg1) } // NodePublishVolume mocks base method -func (m *MockNodeServer) NodePublishVolume(arg0 context.Context, arg1 *v0.NodePublishVolumeRequest) (*v0.NodePublishVolumeResponse, error) { +func (m *MockNodeServer) NodePublishVolume(arg0 context.Context, arg1 *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { ret := m.ctrl.Call(m, "NodePublishVolume", arg0, arg1) - ret0, _ := ret[0].(*v0.NodePublishVolumeResponse) + ret0, _ := ret[0].(*csi.NodePublishVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -314,23 +275,10 @@ func (mr *MockNodeServerMockRecorder) NodePublishVolume(arg0, arg1 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodePublishVolume", reflect.TypeOf((*MockNodeServer)(nil).NodePublishVolume), arg0, arg1) } -// NodeStageVolume mocks base method -func (m *MockNodeServer) NodeStageVolume(arg0 context.Context, arg1 *v0.NodeStageVolumeRequest) (*v0.NodeStageVolumeResponse, error) { - ret := m.ctrl.Call(m, "NodeStageVolume", arg0, arg1) - ret0, _ := ret[0].(*v0.NodeStageVolumeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NodeStageVolume indicates an expected call of NodeStageVolume -func (mr *MockNodeServerMockRecorder) NodeStageVolume(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeStageVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeStageVolume), arg0, arg1) -} - // NodeUnpublishVolume mocks base method -func (m *MockNodeServer) NodeUnpublishVolume(arg0 context.Context, arg1 *v0.NodeUnpublishVolumeRequest) (*v0.NodeUnpublishVolumeResponse, error) { +func (m *MockNodeServer) NodeUnpublishVolume(arg0 context.Context, arg1 *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { ret := m.ctrl.Call(m, "NodeUnpublishVolume", arg0, arg1) - ret0, _ := ret[0].(*v0.NodeUnpublishVolumeResponse) + ret0, _ := ret[0].(*csi.NodeUnpublishVolumeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -339,16 +287,3 @@ func (m *MockNodeServer) NodeUnpublishVolume(arg0 context.Context, arg1 *v0.Node func (mr *MockNodeServerMockRecorder) NodeUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUnpublishVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeUnpublishVolume), arg0, arg1) } - -// NodeUnstageVolume mocks base method -func (m *MockNodeServer) NodeUnstageVolume(arg0 context.Context, arg1 *v0.NodeUnstageVolumeRequest) (*v0.NodeUnstageVolumeResponse, error) { - ret := m.ctrl.Call(m, "NodeUnstageVolume", arg0, arg1) - ret0, _ := ret[0].(*v0.NodeUnstageVolumeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NodeUnstageVolume indicates an expected call of NodeUnstageVolume -func (mr *MockNodeServerMockRecorder) NodeUnstageVolume(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUnstageVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeUnstageVolume), arg0, arg1) -} diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/mock.go b/vendor/github.com/kubernetes-csi/csi-test/driver/mock.go deleted file mode 100644 index 9b051eee..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/driver/mock.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2017 Luis Pabón luis@portworx.com - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package driver - -import ( - "net" - - "github.com/kubernetes-csi/csi-test/utils" - "google.golang.org/grpc" -) - -type MockCSIDriverServers struct { - Controller *MockControllerServer - Identity *MockIdentityServer - Node *MockNodeServer -} - -type MockCSIDriver struct { - CSIDriver - conn *grpc.ClientConn -} - -func NewMockCSIDriver(servers *MockCSIDriverServers) *MockCSIDriver { - return &MockCSIDriver{ - CSIDriver: CSIDriver{ - servers: &CSIDriverServers{ - Controller: servers.Controller, - Node: servers.Node, - Identity: servers.Identity, - }, - }, - } -} - -func (m *MockCSIDriver) Start() error { - // Listen on a port assigned by the net package - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return err - } - - if err := m.CSIDriver.Start(l); err != nil { - l.Close() - return err - } - - return nil -} - -func (m *MockCSIDriver) Nexus() (*grpc.ClientConn, error) { - // Start server - err := m.Start() - if err != nil { - return nil, err - } - - // Create a client connection - m.conn, err = utils.Connect(m.Address()) - if err != nil { - return nil, err - } - - return m.conn, nil -} - -func (m *MockCSIDriver) Close() { - m.conn.Close() - m.server.Stop() -} diff --git a/vendor/github.com/kubernetes-csi/csi-test/glide.lock b/vendor/github.com/kubernetes-csi/csi-test/glide.lock new file mode 100644 index 00000000..58bd54a4 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/glide.lock @@ -0,0 +1,135 @@ +hash: f8f39aef239d83f930c5be2717e5bee5b2169902a3fd4a30a441a4e97ec60a07 +updated: 2017-12-13T08:17:19.928367307-05:00 +imports: +- name: github.com/container-storage-interface/spec + version: 4ac2d13f89360f2da40d188473d77f2ec56b9d0d + subpackages: + - lib/go/csi +- name: github.com/davecgh/go-spew + version: ecdeabc65495df2dec95d7c4a4c3e021903035e5 + subpackages: + - spew +- name: github.com/golang/mock + version: f67f7081ddcd0f92a20c1d58e7cd8b23253d15c7 + subpackages: + - gomock +- name: github.com/golang/protobuf + version: 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 + subpackages: + - proto + - protoc-gen-go/descriptor + - ptypes + - ptypes/any + - ptypes/duration + - ptypes/timestamp +- name: github.com/onsi/ginkgo + version: bc14b6691e7a788e12a21121abdaff1ccdcef9e9 + subpackages: + - config + - internal/codelocation + - internal/containernode + - internal/failer + - internal/leafnodes + - internal/remote + - internal/spec + - internal/spec_iterator + - internal/specrunner + - internal/suite + - internal/testingtproxy + - internal/writer + - reporters + - reporters/stenographer + - reporters/stenographer/support/go-colorable + - reporters/stenographer/support/go-isatty + - types +- name: github.com/onsi/gomega + version: c1fb6682134d162f37c13f42e7157653a7de7d2b + subpackages: + - format + - internal/assertion + - internal/asyncassertion + - internal/oraclematcher + - internal/testingtsupport + - matchers + - matchers/support/goraph/bipartitegraph + - matchers/support/goraph/edge + - matchers/support/goraph/node + - matchers/support/goraph/util + - types +- name: github.com/pmezard/go-difflib + version: 792786c7400a136282c1664665ae0a8db921c6c2 + subpackages: + - difflib +- name: github.com/stretchr/testify + version: 2aa2c176b9dab406a6970f6a55f513e8a8c8b18f + subpackages: + - assert +- name: golang.org/x/net + version: 5561cd9b4330353950f399814f427425c0a26fd2 + subpackages: + - context + - html + - html/atom + - html/charset + - http2 + - http2/hpack + - idna + - internal/timeseries + - lex/httplex + - trace +- name: golang.org/x/sys + version: d5840adf789d732bc8b00f37b26ca956a7cc8e79 + subpackages: + - unix +- name: golang.org/x/text + version: c01e4764d870b77f8abe5096ee19ad20d80e8075 + subpackages: + - encoding + - encoding/charmap + - encoding/htmlindex + - encoding/internal + - encoding/internal/identifier + - encoding/japanese + - encoding/korean + - encoding/simplifiedchinese + - encoding/traditionalchinese + - encoding/unicode + - internal/tag + - internal/utf8internal + - language + - runes + - secure/bidirule + - transform + - unicode/bidi + - unicode/norm +- name: google.golang.org/genproto + version: f676e0f3ac6395ff1a529ae59a6670878a8371a6 + subpackages: + - googleapis/rpc/status +- name: google.golang.org/grpc + version: 1687ce5770e998bcac6a136af6b52f079b9d902b + subpackages: + - balancer + - balancer/roundrobin + - codes + - connectivity + - credentials + - grpclb/grpc_lb_v1/messages + - grpclog + - internal + - keepalive + - metadata + - naming + - peer + - reflection + - reflection/grpc_reflection_v1alpha + - resolver + - resolver/dns + - resolver/passthrough + - stats + - status + - tap + - transport +- name: gopkg.in/yaml.v2 + version: 287cf08546ab5e7e37d55a84f7ed3fd1db036de5 +testImports: [] diff --git a/vendor/github.com/kubernetes-csi/csi-test/glide.yaml b/vendor/github.com/kubernetes-csi/csi-test/glide.yaml new file mode 100644 index 00000000..b04e40ed --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/glide.yaml @@ -0,0 +1,16 @@ +package: github.com/kubernetes-csi/csi-test +import: +- package: github.com/container-storage-interface/spec + subpackages: + - lib/go/csi +- package: google.golang.org/grpc + subpackages: + - reflection +testImport: +- package: github.com/golang/mock + subpackages: + - gomock + - mockgen +- package: golang.org/x/net + subpackages: + - context diff --git a/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh b/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh index 777250eb..a3ae9162 100755 --- a/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh +++ b/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh @@ -1,50 +1,32 @@ #!/bin/bash -TESTARGS=$@ -UDS="/tmp/e2e-csi-sanity.sock" -CSI_ENDPOINTS="$CSI_ENDPOINTS ${UDS}" -CSI_MOCK_VERSION="master" - -# -# $1 - endpoint for mock. -# $2 - endpoint for csi-sanity in Grpc format. -# See https://github.com/grpc/grpc/blob/master/doc/naming.md -runTest() -{ - CSI_ENDPOINT=$1 ./bin/mock & - local pid=$! - - ./cmd/csi-sanity/csi-sanity $TESTARGS --csi.endpoint=$2; ret=$? - kill -9 $pid +CSI_ENDPOINTS="tcp://127.0.0.1:9998" +CSI_ENDPOINTS="$CSI_ENDPOINTS /tmp/e2e-csi-sanity.sock" +CSI_ENDPOINTS="$CSI_ENDPOINTS unix:///tmp/e2e-csi-sanity.sock" - if [ $ret -ne 0 ] ; then - exit $ret +go get -u github.com/thecodeteam/gocsi/mock +cd cmd/csi-sanity + make clean install || exit 1 +cd ../.. + +for endpoint in $CSI_ENDPOINTS ; do + if ! echo $endpoint | grep tcp > /dev/null 2>&1 ; then + rm -f $endpoint fi -} -runTestWithCreds() -{ - CSI_ENDPOINT=$1 CSI_ENABLE_CREDS=true ./bin/mock & - local pid=$! + CSI_ENDPOINT=$endpoint mock & + pid=$! - ./cmd/csi-sanity/csi-sanity $TESTARGS --csi.endpoint=$2 --csi.secrets=mock/mocksecret.yaml; ret=$? + csi-sanity $@ --ginkgo.skip=MOCKERRORS --csi.endpoint=$endpoint ; ret=$? kill -9 $pid + if ! echo $endpoint | grep tcp > /dev/null 2>&1 ; then + rm -f $endpoint + fi + if [ $ret -ne 0 ] ; then exit $ret fi -} - -go build -o bin/mock ./mock || exit 1 - -cd cmd/csi-sanity - make clean install || exit 1 -cd ../.. - -runTest "${UDS}" "${UDS}" -rm -f $UDS - -runTestWithCreds "${UDS}" "${UDS}" -rm -f $UDS +done exit 0 diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/AUTHORS b/vendor/github.com/kubernetes-csi/csi-test/mock/AUTHORS deleted file mode 100644 index 23eabcd2..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/AUTHORS +++ /dev/null @@ -1,2 +0,0 @@ -TheCodeTeam -Kubernetes Authors diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/README.md b/vendor/github.com/kubernetes-csi/csi-test/mock/README.md deleted file mode 100644 index d35e2d26..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# Mock CSI Driver -Extremely simple mock driver used to test `csi-sanity` based on `rexray/gocsi/mock` diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go b/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go deleted file mode 100644 index 14343d04..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go +++ /dev/null @@ -1,89 +0,0 @@ -package cache - -import ( - "strings" - "sync" - - "github.com/container-storage-interface/spec/lib/go/csi/v0" -) - -type SnapshotCache interface { - Add(snapshot Snapshot) - - Delete(i int) - - List(status csi.SnapshotStatus_Type) []csi.Snapshot - - FindSnapshot(k, v string) (int, Snapshot) -} - -type Snapshot struct { - Name string - Parameters map[string]string - SnapshotCSI csi.Snapshot -} - -type snapshotCache struct { - snapshotsRWL sync.RWMutex - snapshots []Snapshot -} - -func NewSnapshotCache() SnapshotCache { - return &snapshotCache{ - snapshots: make([]Snapshot, 0), - } -} - -func (snap *snapshotCache) Add(snapshot Snapshot) { - snap.snapshotsRWL.Lock() - defer snap.snapshotsRWL.Unlock() - - snap.snapshots = append(snap.snapshots, snapshot) -} - -func (snap *snapshotCache) Delete(i int) { - snap.snapshotsRWL.Lock() - defer snap.snapshotsRWL.Unlock() - - copy(snap.snapshots[i:], snap.snapshots[i+1:]) - snap.snapshots = snap.snapshots[:len(snap.snapshots)-1] -} - -func (snap *snapshotCache) List(status csi.SnapshotStatus_Type) []csi.Snapshot { - snap.snapshotsRWL.RLock() - defer snap.snapshotsRWL.RUnlock() - - snapshots := make([]csi.Snapshot, 0) - for _, v := range snap.snapshots { - if v.SnapshotCSI.GetStatus() != nil && v.SnapshotCSI.GetStatus().Type == status { - snapshots = append(snapshots, v.SnapshotCSI) - } - } - - return snapshots -} - -func (snap *snapshotCache) FindSnapshot(k, v string) (int, Snapshot) { - snap.snapshotsRWL.RLock() - defer snap.snapshotsRWL.RUnlock() - - snapshotIdx := -1 - for i, vi := range snap.snapshots { - switch k { - case "id": - if strings.EqualFold(v, vi.SnapshotCSI.Id) { - return i, vi - } - case "sourceVolumeId": - if strings.EqualFold(v, vi.SnapshotCSI.SourceVolumeId) { - return i, vi - } - case "name": - if vi.Name == v { - return i, vi - } - } - } - - return snapshotIdx, Snapshot{} -} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/main.go b/vendor/github.com/kubernetes-csi/csi-test/mock/main.go deleted file mode 100644 index d66d1881..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/main.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2018 Kubernetes Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package main - -import ( - "fmt" - "net" - "os" - "os/signal" - "strings" - "syscall" - - "github.com/kubernetes-csi/csi-test/driver" - "github.com/kubernetes-csi/csi-test/mock/service" -) - -func main() { - endpoint := os.Getenv("CSI_ENDPOINT") - if len(endpoint) == 0 { - fmt.Println("CSI_ENDPOINT must be defined and must be a path") - os.Exit(1) - } - if strings.Contains(endpoint, ":") { - fmt.Println("CSI_ENDPOINT must be a unix path") - os.Exit(1) - } - - // Create mock driver - s := service.New() - servers := &driver.CSIDriverServers{ - Controller: s, - Identity: s, - Node: s, - } - d := driver.NewCSIDriver(servers) - - // If creds is enabled, set the default creds. - setCreds := os.Getenv("CSI_ENABLE_CREDS") - if len(setCreds) > 0 && setCreds == "true" { - d.SetDefaultCreds() - } - - // Listen - os.Remove(endpoint) - l, err := net.Listen("unix", endpoint) - if err != nil { - fmt.Printf("Error: Unable to listen on %s socket: %v\n", - endpoint, - err) - os.Exit(1) - } - defer os.Remove(endpoint) - - // Start server - if err := d.Start(l); err != nil { - fmt.Printf("Error: Unable to start mock CSI server: %v\n", - err) - os.Exit(1) - } - fmt.Println("mock driver started") - - // Wait for signal - sigc := make(chan os.Signal, 1) - sigs := []os.Signal{ - syscall.SIGTERM, - syscall.SIGHUP, - syscall.SIGINT, - syscall.SIGQUIT, - } - signal.Notify(sigc, sigs...) - - <-sigc - d.Stop() - fmt.Println("mock driver stopped") -} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml b/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml deleted file mode 100644 index e7c9f20d..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml +++ /dev/null @@ -1,16 +0,0 @@ -CreateVolumeSecret: - secretKey: secretval1 -DeleteVolumeSecret: - secretKey: secretval2 -ControllerPublishVolumeSecret: - secretKey: secretval3 -ControllerUnpublishVolumeSecret: - secretKey: secretval4 -NodeStageVolumeSecret: - secretKey: secretval5 -NodePublishVolumeSecret: - secretKey: secretval6 -CreateSnapshotSecret: - secretKey: secretval7 -DeleteSnapshotSecret: - secretKey: secretval8 diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go deleted file mode 100644 index 39176bdd..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go +++ /dev/null @@ -1,559 +0,0 @@ -package service - -import ( - "fmt" - "math" - "path" - "reflect" - "strconv" - - log "github.com/sirupsen/logrus" - "golang.org/x/net/context" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/container-storage-interface/spec/lib/go/csi/v0" -) - -const ( - MaxStorageCapacity = tib - ReadOnlyKey = "readonly" -) - -func (s *service) CreateVolume( - ctx context.Context, - req *csi.CreateVolumeRequest) ( - *csi.CreateVolumeResponse, error) { - - if len(req.Name) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume Name cannot be empty") - } - if req.VolumeCapabilities == nil { - return nil, status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty") - } - - // Check to see if the volume already exists. - if i, v := s.findVolByName(ctx, req.Name); i >= 0 { - // Requested volume name already exists, need to check if the existing volume's - // capacity is more or equal to new request's capacity. - if v.GetCapacityBytes() < req.GetCapacityRange().GetRequiredBytes() { - return nil, status.Error(codes.AlreadyExists, - fmt.Sprintf("Volume with name %s already exists", req.GetName())) - } - return &csi.CreateVolumeResponse{Volume: &v}, nil - } - - // If no capacity is specified then use 100GiB - capacity := gib100 - if cr := req.CapacityRange; cr != nil { - if rb := cr.RequiredBytes; rb > 0 { - capacity = rb - } - if lb := cr.LimitBytes; lb > 0 { - capacity = lb - } - } - // Check for maximum available capacity - if capacity >= MaxStorageCapacity { - return nil, status.Errorf(codes.OutOfRange, "Requested capacity %d exceeds maximum allowed %d", capacity, MaxStorageCapacity) - } - // Create the volume and add it to the service's in-mem volume slice. - v := s.newVolume(req.Name, capacity) - s.volsRWL.Lock() - defer s.volsRWL.Unlock() - s.vols = append(s.vols, v) - MockVolumes[v.Id] = Volume{ - VolumeCSI: v, - NodeID: "", - ISStaged: false, - ISPublished: false, - StageTargetPath: "", - TargetPath: "", - } - - return &csi.CreateVolumeResponse{Volume: &v}, nil -} - -func (s *service) DeleteVolume( - ctx context.Context, - req *csi.DeleteVolumeRequest) ( - *csi.DeleteVolumeResponse, error) { - - s.volsRWL.Lock() - defer s.volsRWL.Unlock() - - // If the volume is not specified, return error - if len(req.VolumeId) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") - } - - // If the volume does not exist then return an idempotent response. - i, _ := s.findVolNoLock("id", req.VolumeId) - if i < 0 { - return &csi.DeleteVolumeResponse{}, nil - } - - // This delete logic preserves order and prevents potential memory - // leaks. The slice's elements may not be pointers, but the structs - // themselves have fields that are. - copy(s.vols[i:], s.vols[i+1:]) - s.vols[len(s.vols)-1] = csi.Volume{} - s.vols = s.vols[:len(s.vols)-1] - log.WithField("volumeID", req.VolumeId).Debug("mock delete volume") - return &csi.DeleteVolumeResponse{}, nil -} - -func (s *service) ControllerPublishVolume( - ctx context.Context, - req *csi.ControllerPublishVolumeRequest) ( - *csi.ControllerPublishVolumeResponse, error) { - - if len(req.VolumeId) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") - } - if len(req.NodeId) == 0 { - return nil, status.Error(codes.InvalidArgument, "Node ID cannot be empty") - } - if req.VolumeCapability == nil { - return nil, status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty") - } - - if req.NodeId != s.nodeID { - return nil, status.Errorf(codes.NotFound, "Not matching Node ID %s to Mock Node ID %s", req.NodeId, s.nodeID) - } - - s.volsRWL.Lock() - defer s.volsRWL.Unlock() - - i, v := s.findVolNoLock("id", req.VolumeId) - if i < 0 { - return nil, status.Error(codes.NotFound, req.VolumeId) - } - - // devPathKey is the key in the volume's attributes that is set to a - // mock device path if the volume has been published by the controller - // to the specified node. - devPathKey := path.Join(req.NodeId, "dev") - - // Check to see if the volume is already published. - if device := v.Attributes[devPathKey]; device != "" { - var volRo bool - var roVal string - if ro, ok := v.Attributes[ReadOnlyKey]; ok { - roVal = ro - } - - if roVal == "true" { - volRo = true - } else { - volRo = false - } - - // Check if readonly flag is compatible with the publish request. - if req.GetReadonly() != volRo { - return nil, status.Error(codes.AlreadyExists, "Volume published but has incompatible readonly flag") - } - - return &csi.ControllerPublishVolumeResponse{ - PublishInfo: map[string]string{ - "device": device, - "readonly": roVal, - }, - }, nil - } - - var roVal string - if req.GetReadonly() { - roVal = "true" - } else { - roVal = "false" - } - - // Publish the volume. - device := "/dev/mock" - v.Attributes[devPathKey] = device - v.Attributes[ReadOnlyKey] = roVal - s.vols[i] = v - - return &csi.ControllerPublishVolumeResponse{ - PublishInfo: map[string]string{ - "device": device, - "readonly": roVal, - }, - }, nil -} - -func (s *service) ControllerUnpublishVolume( - ctx context.Context, - req *csi.ControllerUnpublishVolumeRequest) ( - *csi.ControllerUnpublishVolumeResponse, error) { - - if len(req.VolumeId) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") - } - nodeID := req.NodeId - if len(nodeID) == 0 { - // If node id is empty, no failure as per Spec - nodeID = s.nodeID - } - - if req.NodeId != s.nodeID { - return nil, status.Errorf(codes.NotFound, "Node ID %s does not match to expected Node ID %s", req.NodeId, s.nodeID) - } - - s.volsRWL.Lock() - defer s.volsRWL.Unlock() - - i, v := s.findVolNoLock("id", req.VolumeId) - if i < 0 { - return nil, status.Error(codes.NotFound, req.VolumeId) - } - - // devPathKey is the key in the volume's attributes that is set to a - // mock device path if the volume has been published by the controller - // to the specified node. - devPathKey := path.Join(nodeID, "dev") - - // Check to see if the volume is already unpublished. - if v.Attributes[devPathKey] == "" { - return &csi.ControllerUnpublishVolumeResponse{}, nil - } - - // Unpublish the volume. - delete(v.Attributes, devPathKey) - delete(v.Attributes, ReadOnlyKey) - s.vols[i] = v - - return &csi.ControllerUnpublishVolumeResponse{}, nil -} - -func (s *service) ValidateVolumeCapabilities( - ctx context.Context, - req *csi.ValidateVolumeCapabilitiesRequest) ( - *csi.ValidateVolumeCapabilitiesResponse, error) { - - if len(req.GetVolumeId()) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") - } - if len(req.VolumeCapabilities) == 0 { - return nil, status.Error(codes.InvalidArgument, req.VolumeId) - } - i, _ := s.findVolNoLock("id", req.VolumeId) - if i < 0 { - return nil, status.Error(codes.NotFound, req.VolumeId) - } - - return &csi.ValidateVolumeCapabilitiesResponse{ - Supported: true, - }, nil -} - -func (s *service) ListVolumes( - ctx context.Context, - req *csi.ListVolumesRequest) ( - *csi.ListVolumesResponse, error) { - - // Copy the mock volumes into a new slice in order to avoid - // locking the service's volume slice for the duration of the - // ListVolumes RPC. - var vols []csi.Volume - func() { - s.volsRWL.RLock() - defer s.volsRWL.RUnlock() - vols = make([]csi.Volume, len(s.vols)) - copy(vols, s.vols) - }() - - var ( - ulenVols = int32(len(vols)) - maxEntries = req.MaxEntries - startingToken int32 - ) - - if v := req.StartingToken; v != "" { - i, err := strconv.ParseUint(v, 10, 32) - if err != nil { - return nil, status.Errorf( - codes.InvalidArgument, - "startingToken=%d !< int32=%d", - startingToken, math.MaxUint32) - } - startingToken = int32(i) - } - - if startingToken > ulenVols { - return nil, status.Errorf( - codes.InvalidArgument, - "startingToken=%d > len(vols)=%d", - startingToken, ulenVols) - } - - // Discern the number of remaining entries. - rem := ulenVols - startingToken - - // If maxEntries is 0 or greater than the number of remaining entries then - // set maxEntries to the number of remaining entries. - if maxEntries == 0 || maxEntries > rem { - maxEntries = rem - } - - var ( - i int - j = startingToken - entries = make( - []*csi.ListVolumesResponse_Entry, - maxEntries) - ) - - for i = 0; i < len(entries); i++ { - entries[i] = &csi.ListVolumesResponse_Entry{ - Volume: &vols[j], - } - j++ - } - - var nextToken string - if n := startingToken + int32(i); n < ulenVols { - nextToken = fmt.Sprintf("%d", n) - } - - return &csi.ListVolumesResponse{ - Entries: entries, - NextToken: nextToken, - }, nil -} - -func (s *service) GetCapacity( - ctx context.Context, - req *csi.GetCapacityRequest) ( - *csi.GetCapacityResponse, error) { - - return &csi.GetCapacityResponse{ - AvailableCapacity: MaxStorageCapacity, - }, nil -} - -func (s *service) ControllerGetCapabilities( - ctx context.Context, - req *csi.ControllerGetCapabilitiesRequest) ( - *csi.ControllerGetCapabilitiesResponse, error) { - - return &csi.ControllerGetCapabilitiesResponse{ - Capabilities: []*csi.ControllerServiceCapability{ - { - Type: &csi.ControllerServiceCapability_Rpc{ - Rpc: &csi.ControllerServiceCapability_RPC{ - Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, - }, - }, - }, - { - Type: &csi.ControllerServiceCapability_Rpc{ - Rpc: &csi.ControllerServiceCapability_RPC{ - Type: csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, - }, - }, - }, - { - Type: &csi.ControllerServiceCapability_Rpc{ - Rpc: &csi.ControllerServiceCapability_RPC{ - Type: csi.ControllerServiceCapability_RPC_LIST_VOLUMES, - }, - }, - }, - { - Type: &csi.ControllerServiceCapability_Rpc{ - Rpc: &csi.ControllerServiceCapability_RPC{ - Type: csi.ControllerServiceCapability_RPC_GET_CAPACITY, - }, - }, - }, - { - Type: &csi.ControllerServiceCapability_Rpc{ - Rpc: &csi.ControllerServiceCapability_RPC{ - Type: csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, - }, - }, - }, - { - Type: &csi.ControllerServiceCapability_Rpc{ - Rpc: &csi.ControllerServiceCapability_RPC{ - Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, - }, - }, - }, - }, - }, nil -} - -func (s *service) CreateSnapshot(ctx context.Context, - req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { - // Check arguments - if len(req.GetName()) == 0 { - return nil, status.Error(codes.InvalidArgument, "Snapshot Name cannot be empty") - } - if len(req.GetSourceVolumeId()) == 0 { - return nil, status.Error(codes.InvalidArgument, "Snapshot SourceVolumeId cannot be empty") - } - - // Check to see if the snapshot already exists. - if i, v := s.snapshots.FindSnapshot("name", req.GetName()); i >= 0 { - // Requested snapshot name already exists - if v.SnapshotCSI.GetSourceVolumeId() != req.GetSourceVolumeId() || !reflect.DeepEqual(v.Parameters, req.GetParameters()) { - return nil, status.Error(codes.AlreadyExists, - fmt.Sprintf("Snapshot with name %s already exists", req.GetName())) - } - return &csi.CreateSnapshotResponse{Snapshot: &v.SnapshotCSI}, nil - } - - // Create the snapshot and add it to the service's in-mem snapshot slice. - snapshot := s.newSnapshot(req.GetName(), req.GetSourceVolumeId(), req.GetParameters()) - s.snapshots.Add(snapshot) - - return &csi.CreateSnapshotResponse{Snapshot: &snapshot.SnapshotCSI}, nil -} - -func (s *service) DeleteSnapshot(ctx context.Context, - req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { - - // If the snapshot is not specified, return error - if len(req.SnapshotId) == 0 { - return nil, status.Error(codes.InvalidArgument, "Snapshot ID cannot be empty") - } - - // If the snapshot does not exist then return an idempotent response. - i, _ := s.snapshots.FindSnapshot("id", req.SnapshotId) - if i < 0 { - return &csi.DeleteSnapshotResponse{}, nil - } - - // This delete logic preserves order and prevents potential memory - // leaks. The slice's elements may not be pointers, but the structs - // themselves have fields that are. - s.snapshots.Delete(i) - log.WithField("SnapshotId", req.SnapshotId).Debug("mock delete snapshot") - return &csi.DeleteSnapshotResponse{}, nil -} - -func (s *service) ListSnapshots(ctx context.Context, - req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { - - // case 1: SnapshotId is not empty, return snapshots that match the snapshot id. - if len(req.GetSnapshotId()) != 0 { - return getSnapshotById(s, req) - } - - // case 2: SourceVolumeId is not empty, return snapshots that match the source volume id. - if len(req.GetSourceVolumeId()) != 0 { - return getSnapshotByVolumeId(s, req) - } - - // case 3: no parameter is set, so we return all the snapshots. - return getAllSnapshots(s, req) -} - -func getSnapshotById(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { - if len(req.GetSnapshotId()) != 0 { - i, snapshot := s.snapshots.FindSnapshot("id", req.GetSnapshotId()) - if i < 0 { - return &csi.ListSnapshotsResponse{}, nil - } - - if len(req.GetSourceVolumeId()) != 0 { - if snapshot.SnapshotCSI.GetSourceVolumeId() != req.GetSourceVolumeId() { - return &csi.ListSnapshotsResponse{}, nil - } - } - - return &csi.ListSnapshotsResponse{ - Entries: []*csi.ListSnapshotsResponse_Entry{ - { - Snapshot: &snapshot.SnapshotCSI, - }, - }, - }, nil - } - return nil, nil -} - -func getSnapshotByVolumeId(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { - if len(req.GetSourceVolumeId()) != 0 { - i, snapshot := s.snapshots.FindSnapshot("sourceVolumeId", req.SourceVolumeId) - if i < 0 { - return &csi.ListSnapshotsResponse{}, nil - } - return &csi.ListSnapshotsResponse{ - Entries: []*csi.ListSnapshotsResponse_Entry{ - { - Snapshot: &snapshot.SnapshotCSI, - }, - }, - }, nil - } - return nil, nil -} - -func getAllSnapshots(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { - // Copy the mock snapshots into a new slice in order to avoid - // locking the service's snapshot slice for the duration of the - // ListSnapshots RPC. - snapshots := s.snapshots.List(csi.SnapshotStatus_READY) - - var ( - ulenSnapshots = int32(len(snapshots)) - maxEntries = req.MaxEntries - startingToken int32 - ) - - if v := req.StartingToken; v != "" { - i, err := strconv.ParseUint(v, 10, 32) - if err != nil { - return nil, status.Errorf( - codes.Aborted, - "startingToken=%d !< int32=%d", - startingToken, math.MaxUint32) - } - startingToken = int32(i) - } - - if startingToken > ulenSnapshots { - return nil, status.Errorf( - codes.Aborted, - "startingToken=%d > len(snapshots)=%d", - startingToken, ulenSnapshots) - } - - // Discern the number of remaining entries. - rem := ulenSnapshots - startingToken - - // If maxEntries is 0 or greater than the number of remaining entries then - // set maxEntries to the number of remaining entries. - if maxEntries == 0 || maxEntries > rem { - maxEntries = rem - } - - var ( - i int - j = startingToken - entries = make( - []*csi.ListSnapshotsResponse_Entry, - maxEntries) - ) - - for i = 0; i < len(entries); i++ { - entries[i] = &csi.ListSnapshotsResponse_Entry{ - Snapshot: &snapshots[j], - } - j++ - } - - var nextToken string - if n := startingToken + int32(i); n < ulenSnapshots { - nextToken = fmt.Sprintf("%d", n) - } - - return &csi.ListSnapshotsResponse{ - Entries: entries, - NextToken: nextToken, - }, nil -} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go deleted file mode 100644 index c66d3b62..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go +++ /dev/null @@ -1,48 +0,0 @@ -package service - -import ( - "golang.org/x/net/context" - - "github.com/container-storage-interface/spec/lib/go/csi/v0" - "github.com/golang/protobuf/ptypes/wrappers" -) - -func (s *service) GetPluginInfo( - ctx context.Context, - req *csi.GetPluginInfoRequest) ( - *csi.GetPluginInfoResponse, error) { - - return &csi.GetPluginInfoResponse{ - Name: Name, - VendorVersion: VendorVersion, - Manifest: Manifest, - }, nil -} - -func (s *service) Probe( - ctx context.Context, - req *csi.ProbeRequest) ( - *csi.ProbeResponse, error) { - - return &csi.ProbeResponse{ - Ready: &wrappers.BoolValue{Value: true}, - }, nil -} - -func (s *service) GetPluginCapabilities( - ctx context.Context, - req *csi.GetPluginCapabilitiesRequest) ( - *csi.GetPluginCapabilitiesResponse, error) { - - return &csi.GetPluginCapabilitiesResponse{ - Capabilities: []*csi.PluginCapability{ - { - Type: &csi.PluginCapability_Service_{ - Service: &csi.PluginCapability_Service{ - Type: csi.PluginCapability_Service_CONTROLLER_SERVICE, - }, - }, - }, - }, - }, nil -} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go deleted file mode 100644 index 0321c740..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go +++ /dev/null @@ -1,236 +0,0 @@ -package service - -import ( - "path" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "golang.org/x/net/context" - - "github.com/container-storage-interface/spec/lib/go/csi/v0" -) - -func (s *service) NodeStageVolume( - ctx context.Context, - req *csi.NodeStageVolumeRequest) ( - *csi.NodeStageVolumeResponse, error) { - - device, ok := req.PublishInfo["device"] - if !ok { - return nil, status.Error( - codes.InvalidArgument, - "stage volume info 'device' key required") - } - - if len(req.GetVolumeId()) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") - } - - if len(req.GetStagingTargetPath()) == 0 { - return nil, status.Error(codes.InvalidArgument, "Staging Target Path cannot be empty") - } - - if req.GetVolumeCapability() == nil { - return nil, status.Error(codes.InvalidArgument, "Volume Capability cannot be empty") - } - - s.volsRWL.Lock() - defer s.volsRWL.Unlock() - - i, v := s.findVolNoLock("id", req.VolumeId) - if i < 0 { - return nil, status.Error(codes.NotFound, req.VolumeId) - } - - // nodeStgPathKey is the key in the volume's attributes that is set to a - // mock stage path if the volume has been published by the node - nodeStgPathKey := path.Join(s.nodeID, req.StagingTargetPath) - - // Check to see if the volume has already been staged. - if v.Attributes[nodeStgPathKey] != "" { - // TODO: Check for the capabilities to be equal. Return "ALREADY_EXISTS" - // if the capabilities don't match. - return &csi.NodeStageVolumeResponse{}, nil - } - - // Stage the volume. - v.Attributes[nodeStgPathKey] = device - s.vols[i] = v - - return &csi.NodeStageVolumeResponse{}, nil -} - -func (s *service) NodeUnstageVolume( - ctx context.Context, - req *csi.NodeUnstageVolumeRequest) ( - *csi.NodeUnstageVolumeResponse, error) { - - if len(req.GetVolumeId()) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") - } - - if len(req.GetStagingTargetPath()) == 0 { - return nil, status.Error(codes.InvalidArgument, "Staging Target Path cannot be empty") - } - - s.volsRWL.Lock() - defer s.volsRWL.Unlock() - - i, v := s.findVolNoLock("id", req.VolumeId) - if i < 0 { - return nil, status.Error(codes.NotFound, req.VolumeId) - } - - // nodeStgPathKey is the key in the volume's attributes that is set to a - // mock stage path if the volume has been published by the node - nodeStgPathKey := path.Join(s.nodeID, req.StagingTargetPath) - - // Check to see if the volume has already been unstaged. - if v.Attributes[nodeStgPathKey] == "" { - return &csi.NodeUnstageVolumeResponse{}, nil - } - - // Unpublish the volume. - delete(v.Attributes, nodeStgPathKey) - s.vols[i] = v - - return &csi.NodeUnstageVolumeResponse{}, nil -} - -func (s *service) NodePublishVolume( - ctx context.Context, - req *csi.NodePublishVolumeRequest) ( - *csi.NodePublishVolumeResponse, error) { - - device, ok := req.PublishInfo["device"] - if !ok { - return nil, status.Error( - codes.InvalidArgument, - "publish volume info 'device' key required") - } - - if len(req.GetVolumeId()) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") - } - - if len(req.GetTargetPath()) == 0 { - return nil, status.Error(codes.InvalidArgument, "Target Path cannot be empty") - } - - if req.GetVolumeCapability() == nil { - return nil, status.Error(codes.InvalidArgument, "Volume Capability cannot be empty") - } - - s.volsRWL.Lock() - defer s.volsRWL.Unlock() - - i, v := s.findVolNoLock("id", req.VolumeId) - if i < 0 { - return nil, status.Error(codes.NotFound, req.VolumeId) - } - - // nodeMntPathKey is the key in the volume's attributes that is set to a - // mock mount path if the volume has been published by the node - nodeMntPathKey := path.Join(s.nodeID, req.TargetPath) - - // Check to see if the volume has already been published. - if v.Attributes[nodeMntPathKey] != "" { - - // Requests marked Readonly fail due to volumes published by - // the Mock driver supporting only RW mode. - if req.Readonly { - return nil, status.Error(codes.AlreadyExists, req.VolumeId) - } - - return &csi.NodePublishVolumeResponse{}, nil - } - - // Publish the volume. - if req.GetStagingTargetPath() != "" { - v.Attributes[nodeMntPathKey] = req.GetStagingTargetPath() - } else { - v.Attributes[nodeMntPathKey] = device - } - s.vols[i] = v - - return &csi.NodePublishVolumeResponse{}, nil -} - -func (s *service) NodeUnpublishVolume( - ctx context.Context, - req *csi.NodeUnpublishVolumeRequest) ( - *csi.NodeUnpublishVolumeResponse, error) { - - if len(req.GetVolumeId()) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") - } - if len(req.GetTargetPath()) == 0 { - return nil, status.Error(codes.InvalidArgument, "Target Path cannot be empty") - } - - s.volsRWL.Lock() - defer s.volsRWL.Unlock() - - i, v := s.findVolNoLock("id", req.VolumeId) - if i < 0 { - return nil, status.Error(codes.NotFound, req.VolumeId) - } - - // nodeMntPathKey is the key in the volume's attributes that is set to a - // mock mount path if the volume has been published by the node - nodeMntPathKey := path.Join(s.nodeID, req.TargetPath) - - // Check to see if the volume has already been unpublished. - if v.Attributes[nodeMntPathKey] == "" { - return &csi.NodeUnpublishVolumeResponse{}, nil - } - - // Unpublish the volume. - delete(v.Attributes, nodeMntPathKey) - s.vols[i] = v - - return &csi.NodeUnpublishVolumeResponse{}, nil -} - -func (s *service) NodeGetId( - ctx context.Context, - req *csi.NodeGetIdRequest) ( - *csi.NodeGetIdResponse, error) { - - return &csi.NodeGetIdResponse{ - NodeId: s.nodeID, - }, nil -} - -func (s *service) NodeGetCapabilities( - ctx context.Context, - req *csi.NodeGetCapabilitiesRequest) ( - *csi.NodeGetCapabilitiesResponse, error) { - - return &csi.NodeGetCapabilitiesResponse{ - Capabilities: []*csi.NodeServiceCapability{ - { - Type: &csi.NodeServiceCapability_Rpc{ - Rpc: &csi.NodeServiceCapability_RPC{ - Type: csi.NodeServiceCapability_RPC_UNKNOWN, - }, - }, - }, - { - Type: &csi.NodeServiceCapability_Rpc{ - Rpc: &csi.NodeServiceCapability_RPC{ - Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, - }, - }, - }, - }, - }, nil -} - -func (s *service) NodeGetInfo(ctx context.Context, - req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { - return &csi.NodeGetInfoResponse{ - NodeId: s.nodeID, - }, nil -} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go deleted file mode 100644 index c9f4f7b2..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go +++ /dev/null @@ -1,137 +0,0 @@ -package service - -import ( - "fmt" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/container-storage-interface/spec/lib/go/csi/v0" - "github.com/kubernetes-csi/csi-test/mock/cache" - "golang.org/x/net/context" -) - -const ( - // Name is the name of the CSI plug-in. - Name = "io.kubernetes.storage.mock" - - // VendorVersion is the version returned by GetPluginInfo. - VendorVersion = "0.3.0" -) - -// Manifest is the SP's manifest. -var Manifest = map[string]string{ - "url": "https://github.com/kubernetes-csi/csi-test/mock", -} - -// Service is the CSI Mock service provider. -type Service interface { - csi.ControllerServer - csi.IdentityServer - csi.NodeServer -} - -type service struct { - sync.Mutex - nodeID string - vols []csi.Volume - volsRWL sync.RWMutex - volsNID uint64 - snapshots cache.SnapshotCache - snapshotsNID uint64 -} - -type Volume struct { - sync.Mutex - VolumeCSI csi.Volume - NodeID string - ISStaged bool - ISPublished bool - StageTargetPath string - TargetPath string -} - -var MockVolumes map[string]Volume - -// New returns a new Service. -func New() Service { - s := &service{nodeID: Name} - s.snapshots = cache.NewSnapshotCache() - s.vols = []csi.Volume{ - s.newVolume("Mock Volume 1", gib100), - s.newVolume("Mock Volume 2", gib100), - s.newVolume("Mock Volume 3", gib100), - } - MockVolumes = map[string]Volume{} - - s.snapshots.Add(s.newSnapshot("Mock Snapshot 1", "1", map[string]string{"Description": "snapshot 1"})) - s.snapshots.Add(s.newSnapshot("Mock Snapshot 2", "2", map[string]string{"Description": "snapshot 2"})) - s.snapshots.Add(s.newSnapshot("Mock Snapshot 3", "3", map[string]string{"Description": "snapshot 3"})) - - return s -} - -const ( - kib int64 = 1024 - mib int64 = kib * 1024 - gib int64 = mib * 1024 - gib100 int64 = gib * 100 - tib int64 = gib * 1024 - tib100 int64 = tib * 100 -) - -func (s *service) newVolume(name string, capcity int64) csi.Volume { - return csi.Volume{ - Id: fmt.Sprintf("%d", atomic.AddUint64(&s.volsNID, 1)), - Attributes: map[string]string{"name": name}, - CapacityBytes: capcity, - } -} - -func (s *service) findVol(k, v string) (volIdx int, volInfo csi.Volume) { - s.volsRWL.RLock() - defer s.volsRWL.RUnlock() - return s.findVolNoLock(k, v) -} - -func (s *service) findVolNoLock(k, v string) (volIdx int, volInfo csi.Volume) { - volIdx = -1 - - for i, vi := range s.vols { - switch k { - case "id": - if strings.EqualFold(v, vi.Id) { - return i, vi - } - case "name": - if n, ok := vi.Attributes["name"]; ok && strings.EqualFold(v, n) { - return i, vi - } - } - } - - return -} - -func (s *service) findVolByName( - ctx context.Context, name string) (int, csi.Volume) { - - return s.findVol("name", name) -} - -func (s *service) newSnapshot(name, sourceVolumeId string, parameters map[string]string) cache.Snapshot { - return cache.Snapshot{ - Name: name, - Parameters: parameters, - SnapshotCSI: csi.Snapshot{ - Id: fmt.Sprintf("%d", atomic.AddUint64(&s.snapshotsNID, 1)), - CreatedAt: time.Now().UnixNano(), - SourceVolumeId: sourceVolumeId, - Status: &csi.SnapshotStatus{ - Type: csi.SnapshotStatus_READY, - Details: "snapshot ready", - }, - }, - } -} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md index de4ae501..f258382c 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md @@ -6,6 +6,15 @@ For CSI drivers written in Golang, the framework provides a simple API function to call to test the driver. Another way to run the test suite is to use the command line program [csi-sanity](https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity). +## Status +Although the project can be used immediately, it will not provide full +coverage since it is not yet finished. Below shows the percentage of +completion for each CSI service: + +* Identity Service: 95% +* Controller Service: 0% +* Node Service: 0% + ## For Golang CSI Drivers This framework leverages the Ginkgo BDD testing framework to deliver a descriptive test suite for your driver. To test your driver, simply call the API in one of your @@ -13,50 +22,13 @@ Golang `TestXXX` functions. For example: ```go func TestMyDriver(t *testing.T) { - // Setup the full driver and its environment - ... setup driver ... - config := &sanity.Config{ - TargetPath: ... - StagingPath: ... - Address: endpoint, - } - + // Setup the full driver and its environment + ... setup driver ... - // Now call the test suite - sanity.Test(t, config) + // Now call the test suite + sanity.Test(t, driverEndpointAddress) } ``` -Only one such test function is supported because under the hood a -Ginkgo test suite gets constructed and executed by the call. - -Alternatively, the tests can also be embedded inside a Ginkgo test -suite. In that case it is possible to define multiple tests with -different configurations: - -```go -var _ = Describe("MyCSIDriver", func () { - Context("Config A", func () { - var config &sanity.Config - - BeforeEach() { - ... setup driver and config... - } - - AfterEach() { - ...tear down driver... - } - - Describe("CSI sanity", func() { - sanity.GinkgoTest(config) - }) - }) - - Context("Config B", func () { - ... - }) -}) -``` - ## Command line program Please see [csi-sanity](https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go deleted file mode 100644 index 699efe7b..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go +++ /dev/null @@ -1,134 +0,0 @@ -/* -Copyright 2018 Intel Corporation - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sanity - -import ( - "context" - "log" - - "github.com/container-storage-interface/spec/lib/go/csi/v0" - - . "github.com/onsi/ginkgo" -) - -// VolumeInfo keeps track of the information needed to delete a volume. -type VolumeInfo struct { - // Node on which the volume was published, empty if none - // or publishing is not supported. - NodeID string - - // Volume ID assigned by CreateVolume. - VolumeID string -} - -// Cleanup keeps track of resources, in particular volumes, which need -// to be freed when testing is done. -type Cleanup struct { - Context *SanityContext - ControllerClient csi.ControllerClient - NodeClient csi.NodeClient - ControllerPublishSupported bool - NodeStageSupported bool - - // Maps from volume name to the node ID for which the volume - // is published and the volume ID. - volumes map[string]VolumeInfo -} - -// RegisterVolume adds or updates an entry for the volume with the -// given name. -func (cl *Cleanup) RegisterVolume(name string, info VolumeInfo) { - if cl.volumes == nil { - cl.volumes = make(map[string]VolumeInfo) - } - cl.volumes[name] = info -} - -// MaybeRegisterVolume adds or updates an entry for the volume with -// the given name if CreateVolume was successful. -func (cl *Cleanup) MaybeRegisterVolume(name string, vol *csi.CreateVolumeResponse, err error) { - if err == nil && vol.GetVolume().GetId() != "" { - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetId()}) - } -} - -// UnregisterVolume removes the entry for the volume with the -// given name, thus preventing all cleanup operations for it. -func (cl *Cleanup) UnregisterVolume(name string) { - if cl.volumes != nil { - delete(cl.volumes, name) - } -} - -// DeleteVolumes stops using the registered volumes and tries to delete all of them. -func (cl *Cleanup) DeleteVolumes() { - if cl.volumes == nil { - return - } - logger := log.New(GinkgoWriter, "cleanup: ", 0) - ctx := context.Background() - - for name, info := range cl.volumes { - logger.Printf("deleting %s = %s", name, info.VolumeID) - if _, err := cl.NodeClient.NodeUnpublishVolume( - ctx, - &csi.NodeUnpublishVolumeRequest{ - VolumeId: info.VolumeID, - TargetPath: cl.Context.Config.TargetPath, - }, - ); err != nil { - logger.Printf("warning: NodeUnpublishVolume: %s", err) - } - - if cl.NodeStageSupported { - if _, err := cl.NodeClient.NodeUnstageVolume( - ctx, - &csi.NodeUnstageVolumeRequest{ - VolumeId: info.VolumeID, - StagingTargetPath: cl.Context.Config.StagingPath, - }, - ); err != nil { - logger.Printf("warning: NodeUnstageVolume: %s", err) - } - } - - if cl.ControllerPublishSupported && info.NodeID != "" { - if _, err := cl.ControllerClient.ControllerUnpublishVolume( - ctx, - &csi.ControllerUnpublishVolumeRequest{ - VolumeId: info.VolumeID, - NodeId: info.NodeID, - ControllerUnpublishSecrets: cl.Context.Secrets.ControllerUnpublishVolumeSecret, - }, - ); err != nil { - logger.Printf("warning: ControllerUnpublishVolume: %s", err) - } - } - - if _, err := cl.ControllerClient.DeleteVolume( - ctx, - &csi.DeleteVolumeRequest{ - VolumeId: info.VolumeID, - ControllerDeleteSecrets: cl.Context.Secrets.DeleteVolumeSecret, - }, - ); err != nil { - logger.Printf("error: DeleteVolume: %s", err) - } - - cl.UnregisterVolume(name) - } -} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go index 294a1e0d..33e999b7 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go @@ -17,53 +17,33 @@ limitations under the License. package sanity import ( - "context" "fmt" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" + context "golang.org/x/net/context" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "strconv" ) -const ( - // DefTestVolumeSize defines the base size of dynamically - // provisioned volumes. 10GB by default, can be overridden by - // setting Config.TestVolumeSize. - DefTestVolumeSize int64 = 10 * 1024 * 1024 * 1024 -) - -func TestVolumeSize(sc *SanityContext) int64 { - if sc.Config.TestVolumeSize > 0 { - return sc.Config.TestVolumeSize - } - return DefTestVolumeSize -} - -func verifyVolumeInfo(v *csi.Volume) { +func verifyVolumeInfo(v *csi.VolumeInfo) { Expect(v).NotTo(BeNil()) Expect(v.GetId()).NotTo(BeEmpty()) } -func verifySnapshotInfo(snapshot *csi.Snapshot) { - Expect(snapshot).NotTo(BeNil()) - Expect(snapshot.GetId()).NotTo(BeEmpty()) - Expect(snapshot.GetSourceVolumeId()).NotTo(BeEmpty()) - Expect(snapshot.GetCreatedAt()).NotTo(BeZero()) -} - -func isControllerCapabilitySupported( +func isCapabilitySupported( c csi.ControllerClient, capType csi.ControllerServiceCapability_RPC_Type, ) bool { caps, err := c.ControllerGetCapabilities( context.Background(), - &csi.ControllerGetCapabilitiesRequest{}) + &csi.ControllerGetCapabilitiesRequest{ + Version: csiClientVersion, + }) Expect(err).NotTo(HaveOccurred()) Expect(caps).NotTo(BeNil()) Expect(caps.GetCapabilities()).NotTo(BeNil()) @@ -77,778 +57,204 @@ func isControllerCapabilitySupported( return false } -var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { +var _ = Describe("ControllerGetCapabilities [Controller Server]", func() { var ( c csi.ControllerClient - n csi.NodeClient - - cl *Cleanup ) BeforeEach(func() { - c = csi.NewControllerClient(sc.Conn) - n = csi.NewNodeClient(sc.Conn) - - cl = &Cleanup{ - NodeClient: n, - ControllerClient: c, - Context: sc, - } + c = csi.NewControllerClient(conn) }) - AfterEach(func() { - cl.DeleteVolumes() - }) + It("should fail when no version is provided", func() { + _, err := c.ControllerGetCapabilities( + context.Background(), + &csi.ControllerGetCapabilitiesRequest{}) + Expect(err).To(HaveOccurred()) - Describe("ControllerGetCapabilities", func() { - It("should return appropriate capabilities", func() { - caps, err := c.ControllerGetCapabilities( - context.Background(), - &csi.ControllerGetCapabilitiesRequest{}) - - By("checking successful response") - Expect(err).NotTo(HaveOccurred()) - Expect(caps).NotTo(BeNil()) - Expect(caps.GetCapabilities()).NotTo(BeNil()) - - for _, cap := range caps.GetCapabilities() { - Expect(cap.GetRpc()).NotTo(BeNil()) - - switch cap.GetRpc().GetType() { - case csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME: - case csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME: - case csi.ControllerServiceCapability_RPC_LIST_VOLUMES: - case csi.ControllerServiceCapability_RPC_GET_CAPACITY: - case csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT: - case csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS: - default: - Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) - } - } - }) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - Describe("GetCapacity", func() { - BeforeEach(func() { - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_GET_CAPACITY) { - Skip("GetCapacity not supported") - } - }) - - It("should return capacity (no optional values added)", func() { - _, err := c.GetCapacity( - context.Background(), - &csi.GetCapacityRequest{}) - Expect(err).NotTo(HaveOccurred()) - - // Since capacity is int64 we will not be checking it - // The value of zero is a possible value. - }) - }) + It("should return appropriate capabilities", func() { + caps, err := c.ControllerGetCapabilities( + context.Background(), + &csi.ControllerGetCapabilitiesRequest{ + Version: csiClientVersion, + }) - Describe("ListVolumes", func() { - BeforeEach(func() { - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_VOLUMES) { - Skip("ListVolumes not supported") - } - }) + By("checking successful response") + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) + Expect(caps.GetCapabilities()).NotTo(BeNil()) - It("should return appropriate values (no optional values added)", func() { - vols, err := c.ListVolumes( - context.Background(), - &csi.ListVolumesRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(vols).NotTo(BeNil()) + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetRpc()).NotTo(BeNil()) - for _, vol := range vols.GetEntries() { - verifyVolumeInfo(vol.GetVolume()) + switch cap.GetRpc().GetType() { + case csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME: + case csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME: + case csi.ControllerServiceCapability_RPC_LIST_VOLUMES: + case csi.ControllerServiceCapability_RPC_GET_CAPACITY: + default: + Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) } - }) - - // TODO: Add test to test for tokens - - // TODO: Add test which checks list of volume is there when created, - // and not there when deleted. + } }) +}) - Describe("CreateVolume", func() { - BeforeEach(func() { - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { - Skip("CreateVolume not supported") - } - }) - - It("should fail when no name is provided", func() { - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - ControllerCreateSecrets: sc.Secrets.CreateVolumeSecret, - }, - ) - cl.MaybeRegisterVolume("", vol, err) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) - - It("should fail when no volume capabilities are provided", func() { - name := uniqueString("sanity-controller-create-no-volume-capabilities") - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Name: name, - ControllerCreateSecrets: sc.Secrets.CreateVolumeSecret, - }, - ) - cl.MaybeRegisterVolume(name, vol, err) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) +var _ = Describe("GetCapacity [Controller Server]", func() { + var ( + c csi.ControllerClient + ) - It("should return appropriate values SingleNodeWriter NoCapacity Type:Mount", func() { - - By("creating a volume") - name := uniqueString("sanity-controller-create-single-no-capacity") - - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - ControllerCreateSecrets: sc.Secrets.CreateVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetId()}) - - By("cleaning up deleting the volume") - - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - ControllerDeleteSecrets: sc.Secrets.DeleteVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - cl.UnregisterVolume(name) - }) + BeforeEach(func() { + c = csi.NewControllerClient(conn) - It("should return appropriate values SingleNodeWriter WithCapacity 1Gi Type:Mount", func() { - - By("creating a volume") - name := uniqueString("sanity-controller-create-single-with-capacity") - - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: TestVolumeSize(sc), - }, - ControllerCreateSecrets: sc.Secrets.CreateVolumeSecret, - }, - ) - if serverError, ok := status.FromError(err); ok && - (serverError.Code() == codes.OutOfRange || serverError.Code() == codes.Unimplemented) { - Skip("Required bytes not supported") - } - Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetId()}) - Expect(vol.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", TestVolumeSize(sc))) - - By("cleaning up deleting the volume") - - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - ControllerDeleteSecrets: sc.Secrets.DeleteVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - cl.UnregisterVolume(name) - }) - It("should not fail when requesting to create a volume with already exisiting name and same capacity.", func() { - - By("creating a volume") - name := uniqueString("sanity-controller-create-twice") - size := TestVolumeSize(sc) - - vol1, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: size, - }, - ControllerCreateSecrets: sc.Secrets.CreateVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - Expect(vol1).NotTo(BeNil()) - Expect(vol1.GetVolume()).NotTo(BeNil()) - Expect(vol1.GetVolume().GetId()).NotTo(BeEmpty()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol1.GetVolume().GetId()}) - Expect(vol1.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) - - vol2, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: size, - }, - ControllerCreateSecrets: sc.Secrets.CreateVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - Expect(vol2).NotTo(BeNil()) - Expect(vol2.GetVolume()).NotTo(BeNil()) - Expect(vol2.GetVolume().GetId()).NotTo(BeEmpty()) - Expect(vol2.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) - Expect(vol1.GetVolume().GetId()).To(Equal(vol2.GetVolume().GetId())) - - By("cleaning up deleting the volume") - - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - VolumeId: vol1.GetVolume().GetId(), - ControllerDeleteSecrets: sc.Secrets.DeleteVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - cl.UnregisterVolume(name) - }) - It("should fail when requesting to create a volume with already exisiting name and different capacity.", func() { - - By("creating a volume") - name := uniqueString("sanity-controller-create-twice-different") - size1 := TestVolumeSize(sc) - - vol1, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: size1, - LimitBytes: size1, - }, - ControllerCreateSecrets: sc.Secrets.CreateVolumeSecret, - }, - ) - Expect(err).ToNot(HaveOccurred()) - Expect(vol1).NotTo(BeNil()) - Expect(vol1.GetVolume()).NotTo(BeNil()) - Expect(vol1.GetVolume().GetId()).NotTo(BeEmpty()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol1.GetVolume().GetId()}) - size2 := 2 * TestVolumeSize(sc) - - _, err = c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: size2, - LimitBytes: size2, - }, - ControllerCreateSecrets: sc.Secrets.CreateVolumeSecret, - }, - ) - Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) - - By("cleaning up deleting the volume") - - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - VolumeId: vol1.GetVolume().GetId(), - ControllerDeleteSecrets: sc.Secrets.DeleteVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - cl.UnregisterVolume(name) - }) + if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_GET_CAPACITY) { + Skip("GetCapacity not supported") + } }) - Describe("DeleteVolume", func() { - BeforeEach(func() { - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { - Skip("DeleteVolume not supported") - } - }) - - It("should fail when no volume id is provided", func() { - - _, err := c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - ControllerDeleteSecrets: sc.Secrets.DeleteVolumeSecret, - }, - ) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) - - It("should succeed when an invalid volume id is used", func() { + It("should fail when no version is provided", func() { - _, err := c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - VolumeId: "reallyfakevolumeid", - ControllerDeleteSecrets: sc.Secrets.DeleteVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - }) + By("failing when there is no version") + _, err := c.GetCapacity( + context.Background(), + &csi.GetCapacityRequest{}) + Expect(err).To(HaveOccurred()) - It("should return appropriate values (no optional values added)", func() { - - // Create Volume First - By("creating a volume") - name := uniqueString("sanity-controller-create-appropriate") - - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - ControllerCreateSecrets: sc.Secrets.CreateVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetId()}) - - // Delete Volume - By("deleting a volume") - - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - ControllerDeleteSecrets: sc.Secrets.DeleteVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - cl.UnregisterVolume(name) - }) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - Describe("ValidateVolumeCapabilities", func() { - It("should fail when no volume id is provided", func() { - - _, err := c.ValidateVolumeCapabilities( - context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{}) - Expect(err).To(HaveOccurred()) + It("should return capacity (no optional values added)", func() { + _, err := c.GetCapacity( + context.Background(), + &csi.GetCapacityRequest{ + Version: csiClientVersion, + }) + Expect(err).NotTo(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + // Since capacity is uint64 we will not be checking it + // The value of zero is a possible value. + }) +}) - It("should fail when no volume capabilities are provided", func() { +var _ = Describe("ListVolumes [Controller Server]", func() { + var ( + c csi.ControllerClient + ) - _, err := c.ValidateVolumeCapabilities( - context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{ - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) + BeforeEach(func() { + c = csi.NewControllerClient(conn) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_VOLUMES) { + Skip("ListVolumes not supported") + } + }) - It("should return appropriate values (no optional values added)", func() { - - // Create Volume First - By("creating a single node writer volume") - name := uniqueString("sanity-controller-validate") - - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - ControllerCreateSecrets: sc.Secrets.CreateVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetId()}) - - // ValidateVolumeCapabilities - By("validating volume capabilities") - valivolcap, err := c.ValidateVolumeCapabilities( - context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{ - VolumeId: vol.GetVolume().GetId(), - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(valivolcap).NotTo(BeNil()) - Expect(valivolcap.GetSupported()).To(BeTrue()) - - By("cleaning up deleting the volume") - - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - ControllerDeleteSecrets: sc.Secrets.DeleteVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - cl.UnregisterVolume(name) - }) + It("should fail when no version is provided", func() { - It("should fail when the requested volume does not exist", func() { - - _, err := c.ValidateVolumeCapabilities( - context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{ - VolumeId: "some-vol-id", - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - }, - ) - Expect(err).To(HaveOccurred()) + By("failing when there is no version") + _, err := c.ListVolumes( + context.Background(), + &csi.ListVolumesRequest{}) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.NotFound)) - }) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - Describe("ControllerPublishVolume", func() { - BeforeEach(func() { - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { - Skip("ControllerPublishVolume not supported") - } - }) + It("should return appropriate values (no optional values added)", func() { + vols, err := c.ListVolumes( + context.Background(), + &csi.ListVolumesRequest{ + Version: csiClientVersion, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(vols).NotTo(BeNil()) + Expect(vols.GetEntries()).NotTo(BeNil()) - It("should fail when no volume id is provided", func() { + for _, vol := range vols.GetEntries() { + verifyVolumeInfo(vol.GetVolumeInfo()) + } + }) - _, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - ControllerPublishSecrets: sc.Secrets.ControllerPublishVolumeSecret, - }, - ) - Expect(err).To(HaveOccurred()) + // TODO: Add test to test for tokens - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + // TODO: Add test which checks list of volume is there when created, + // and not there when deleted. +}) - It("should fail when no node id is provided", func() { +var _ = Describe("CreateVolume [Controller Server]", func() { + var ( + c csi.ControllerClient + ) - _, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - VolumeId: "id", - ControllerPublishSecrets: sc.Secrets.ControllerPublishVolumeSecret, - }, - ) - Expect(err).To(HaveOccurred()) + BeforeEach(func() { + c = csi.NewControllerClient(conn) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { + Skip("CreateVolume not supported") + } + }) - It("should fail when no volume capability is provided", func() { + It("should fail when no version is provided", func() { - _, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - VolumeId: "id", - NodeId: "fakenode", - ControllerPublishSecrets: sc.Secrets.ControllerPublishVolumeSecret, - }, - ) - Expect(err).To(HaveOccurred()) + _, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{}) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - It("should return appropriate values (no optional values added)", func() { - - // Create Volume First - By("creating a single node writer volume") - name := uniqueString("sanity-controller-publish") - - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - ControllerCreateSecrets: sc.Secrets.CreateVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetId()}) - - By("getting a node id") - nid, err := n.NodeGetId( - context.Background(), - &csi.NodeGetIdRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) - - // ControllerPublishVolume - By("calling controllerpublish on that volume") - - conpubvol, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - NodeId: nid.GetNodeId(), - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - Readonly: false, - ControllerPublishSecrets: sc.Secrets.ControllerPublishVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetId(), NodeID: nid.GetNodeId()}) - Expect(conpubvol).NotTo(BeNil()) - - By("cleaning up unpublishing the volume") - - conunpubvol, err := c.ControllerUnpublishVolume( - context.Background(), - &csi.ControllerUnpublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - // NodeID is optional in ControllerUnpublishVolume - NodeId: nid.GetNodeId(), - ControllerUnpublishSecrets: sc.Secrets.ControllerUnpublishVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - Expect(conunpubvol).NotTo(BeNil()) + It("should fail when no name is provided", func() { - By("cleaning up deleting the volume") + _, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Version: csiClientVersion, + }) + Expect(err).To(HaveOccurred()) - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - ControllerDeleteSecrets: sc.Secrets.DeleteVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - cl.UnregisterVolume(name) - }) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - It("should fail when the volume does not exist", func() { + It("should fail when no volume capabilities are provided", func() { - By("calling controller publish on a non-existent volume") + _, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Version: csiClientVersion, + Name: "name", + }) + Expect(err).To(HaveOccurred()) - conpubvol, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - VolumeId: "some-vol-id", - NodeId: "some-node-id", - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - Readonly: false, - ControllerPublishSecrets: sc.Secrets.ControllerPublishVolumeSecret, - }, - ) - Expect(err).To(HaveOccurred()) - Expect(conpubvol).To(BeNil()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.NotFound)) - }) + It("should return appropriate values SingleNodeWriter NoCapacity Type:Mount", func() { - It("should fail when the node does not exist", func() { - - // Create Volume First - By("creating a single node writer volume") - name := uniqueString("sanity-controller-wrong-node") - - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - ControllerCreateSecrets: sc.Secrets.CreateVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetId()}) - - // ControllerPublishVolume - By("calling controllerpublish on that volume") - - conpubvol, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - NodeId: "some-fake-node-id", - VolumeCapability: &csi.VolumeCapability{ + By("creating a volume") + name := "sanity" + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Version: csiClientVersion, + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, }, @@ -856,197 +262,36 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, - Readonly: false, - ControllerPublishSecrets: sc.Secrets.ControllerPublishVolumeSecret, - }, - ) - Expect(err).To(HaveOccurred()) - Expect(conpubvol).To(BeNil()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.NotFound)) - - By("cleaning up deleting the volume") - - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - ControllerDeleteSecrets: sc.Secrets.DeleteVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - cl.UnregisterVolume(name) - }) - - It("should fail when the volume is already published but is incompatible", func() { - - // Create Volume First - By("creating a single node writer volume") - name := uniqueString("sanity-controller-published-incompatible") - - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - ControllerCreateSecrets: sc.Secrets.CreateVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetId()}) - - By("getting a node id") - nid, err := n.NodeGetId( - context.Background(), - &csi.NodeGetIdRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) - - // ControllerPublishVolume - By("calling controllerpublish on that volume") - - pubReq := &csi.ControllerPublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - NodeId: nid.GetNodeId(), - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - Readonly: false, - ControllerPublishSecrets: sc.Secrets.ControllerPublishVolumeSecret, - } - - conpubvol, err := c.ControllerPublishVolume(context.Background(), pubReq) - Expect(err).NotTo(HaveOccurred()) - Expect(conpubvol).NotTo(BeNil()) - - // Publish again with different attributes. - pubReq.Readonly = true - - conpubvol, err = c.ControllerPublishVolume(context.Background(), pubReq) - Expect(err).To(HaveOccurred()) - Expect(conpubvol).To(BeNil()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) - - By("cleaning up unpublishing the volume") - - conunpubvol, err := c.ControllerUnpublishVolume( - context.Background(), - &csi.ControllerUnpublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - // NodeID is optional in ControllerUnpublishVolume - NodeId: nid.GetNodeId(), - ControllerUnpublishSecrets: sc.Secrets.ControllerUnpublishVolumeSecret, }, - ) - - Expect(err).NotTo(HaveOccurred()) - Expect(conunpubvol).NotTo(BeNil()) - - By("cleaning up deleting the volume") + }) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolumeInfo()).NotTo(BeNil()) + Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - ControllerDeleteSecrets: sc.Secrets.DeleteVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - cl.UnregisterVolume(name) - }) + By("cleaning up deleting the volume") + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + }) + Expect(err).NotTo(HaveOccurred()) }) - Describe("ControllerUnpublishVolume", func() { - BeforeEach(func() { - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { - Skip("ControllerUnpublishVolume not supported") - } - }) - - It("should fail when no volume id is provided", func() { - - _, err := c.ControllerUnpublishVolume( - context.Background(), - &csi.ControllerUnpublishVolumeRequest{ - ControllerUnpublishSecrets: sc.Secrets.ControllerUnpublishVolumeSecret, - }, - ) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + // Pending fix in mock file + It("[MOCKERRORS] should return appropriate values SingleNodeWriter WithCapacity 1Gi Type:Mount", func() { - It("should return appropriate values (no optional values added)", func() { - - // Create Volume First - By("creating a single node writer volume") - name := uniqueString("sanity-controller-unpublish") - - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - ControllerCreateSecrets: sc.Secrets.CreateVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetId()}) - - By("getting a node id") - nid, err := n.NodeGetId( - context.Background(), - &csi.NodeGetIdRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) - - // ControllerPublishVolume - By("calling controllerpublish on that volume") - - conpubvol, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - NodeId: nid.GetNodeId(), - VolumeCapability: &csi.VolumeCapability{ + By("creating a volume") + name := "sanity" + size := uint64(1 * 1024 * 1024 * 1024) + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Version: csiClientVersion, + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, }, @@ -1054,360 +299,261 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, - Readonly: false, - ControllerPublishSecrets: sc.Secrets.ControllerPublishVolumeSecret, }, - ) - Expect(err).NotTo(HaveOccurred()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetId(), NodeID: nid.GetNodeId()}) - Expect(conpubvol).NotTo(BeNil()) - - // ControllerUnpublishVolume - By("calling controllerunpublish on that volume") - - conunpubvol, err := c.ControllerUnpublishVolume( - context.Background(), - &csi.ControllerUnpublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - // NodeID is optional in ControllerUnpublishVolume - NodeId: nid.GetNodeId(), - ControllerUnpublishSecrets: sc.Secrets.ControllerUnpublishVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - Expect(conunpubvol).NotTo(BeNil()) - - By("cleaning up deleting the volume") - - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - ControllerDeleteSecrets: sc.Secrets.DeleteVolumeSecret, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size, }, - ) - Expect(err).NotTo(HaveOccurred()) - cl.UnregisterVolume(name) - }) - }) -}) - -var _ = DescribeSanity("ListSnapshots [Controller Server]", func(sc *SanityContext) { - var ( - c csi.ControllerClient - ) - - BeforeEach(func() { - c = csi.NewControllerClient(sc.Conn) - - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS) { - Skip("ListSnapshots not supported") - } - }) - - It("should return appropriate values (no optional values added)", func() { - snapshots, err := c.ListSnapshots( - context.Background(), - &csi.ListSnapshotsRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(snapshots).NotTo(BeNil()) - - for _, snapshot := range snapshots.GetEntries() { - verifySnapshotInfo(snapshot.GetSnapshot()) - } - }) - - It("should return snapshots that match the specify snapshot id", func() { - - By("creating a volume") - volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-1") - volume, err := c.CreateVolume(context.Background(), volReq) - Expect(err).NotTo(HaveOccurred()) - - By("creating a snapshot") - snapshotReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-1", volume.GetVolume().GetId(), nil) - snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) - Expect(err).NotTo(HaveOccurred()) - - snapshots, err := c.ListSnapshots( - context.Background(), - &csi.ListSnapshotsRequest{SnapshotId: snapshot.GetSnapshot().GetId()}) - Expect(err).NotTo(HaveOccurred()) - Expect(snapshots).NotTo(BeNil()) - Expect(len(snapshots.GetEntries())).To(BeNumerically("==", 1)) - verifySnapshotInfo(snapshots.GetEntries()[0].GetSnapshot()) - Expect(snapshots.GetEntries()[0].GetSnapshot().GetId()).To(Equal(snapshot.GetSnapshot().GetId())) - - By("cleaning up deleting the volume") - delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetId()) - _, err = c.DeleteVolume(context.Background(), delVolReq) - Expect(err).NotTo(HaveOccurred()) - - By("cleaning up deleting the snapshot") - delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetId()) - _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + }) Expect(err).NotTo(HaveOccurred()) - }) - - It("should return empty when the specify snapshot id is not exist", func() { + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolumeInfo()).NotTo(BeNil()) + Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) + Expect(vol.GetVolumeInfo().GetCapacityBytes()).To(Equal(size)) - snapshots, err := c.ListSnapshots( + By("cleaning up deleting the volume") + _, err = c.DeleteVolume( context.Background(), - &csi.ListSnapshotsRequest{SnapshotId: "none-exist-id"}) + &csi.DeleteVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + }) Expect(err).NotTo(HaveOccurred()) - Expect(snapshots).NotTo(BeNil()) - Expect(snapshots.GetEntries()).To(BeEmpty()) }) +}) - It("should return snapshots that match the specify source volume id)", func() { - - By("creating a volume") - volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-2") - volume, err := c.CreateVolume(context.Background(), volReq) - Expect(err).NotTo(HaveOccurred()) +var _ = Describe("DeleteVolume [Controller Server]", func() { + var ( + c csi.ControllerClient + ) - By("creating a snapshot") - snapshotReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-2", volume.GetVolume().GetId(), nil) - snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) - Expect(err).NotTo(HaveOccurred()) + BeforeEach(func() { + c = csi.NewControllerClient(conn) - snapshots, err := c.ListSnapshots( - context.Background(), - &csi.ListSnapshotsRequest{SourceVolumeId: snapshot.GetSnapshot().GetSourceVolumeId()}) - Expect(err).NotTo(HaveOccurred()) - Expect(snapshots).NotTo(BeNil()) - for _, snap := range snapshots.GetEntries() { - verifySnapshotInfo(snap.GetSnapshot()) - Expect(snap.GetSnapshot().GetSourceVolumeId()).To(Equal(snapshot.GetSnapshot().GetSourceVolumeId())) + if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { + Skip("DeleteVolume not supported") } - - By("cleaning up deleting the snapshot") - delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetId()) - _, err = c.DeleteSnapshot(context.Background(), delSnapReq) - Expect(err).NotTo(HaveOccurred()) - - By("cleaning up deleting the volume") - delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetId()) - _, err = c.DeleteVolume(context.Background(), delVolReq) - Expect(err).NotTo(HaveOccurred()) }) - It("should return empty when the specify source volume id is not exist", func() { - - snapshots, err := c.ListSnapshots( - context.Background(), - &csi.ListSnapshotsRequest{SourceVolumeId: "none-exist-volume-id"}) - Expect(err).NotTo(HaveOccurred()) - Expect(snapshots).NotTo(BeNil()) - Expect(snapshots.GetEntries()).To(BeEmpty()) - }) + It("should fail when no version is provided", func() { - It("should fail when an invalid starting_token is passed", func() { - vols, err := c.ListSnapshots( + _, err := c.DeleteVolume( context.Background(), - &csi.ListSnapshotsRequest{ - StartingToken: "invalid-token", - }, - ) + &csi.DeleteVolumeRequest{}) Expect(err).To(HaveOccurred()) - Expect(vols).To(BeNil()) serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.Aborted)) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should fail when the starting_token is greater than total number of snapshots", func() { - // Get total number of snapshots. - snapshots, err := c.ListSnapshots( - context.Background(), - &csi.ListSnapshotsRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(snapshots).NotTo(BeNil()) - - totalSnapshots := len(snapshots.GetEntries()) + It("should fail when no volume id is provided", func() { - // Send starting_token that is greater than the total number of snapshots. - snapshots, err = c.ListSnapshots( + _, err := c.DeleteVolume( context.Background(), - &csi.ListSnapshotsRequest{ - StartingToken: strconv.Itoa(totalSnapshots + 5), - }, - ) + &csi.DeleteVolumeRequest{ + Version: csiClientVersion, + }) Expect(err).To(HaveOccurred()) - Expect(snapshots).To(BeNil()) serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.Aborted)) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("check the presence of new snapshots in the snapshot list", func() { - // List Snapshots before creating new snapshots. - snapshots, err := c.ListSnapshots( + It("should succeed when an invalid volume id is used", func() { + + _, err := c.DeleteVolume( context.Background(), - &csi.ListSnapshotsRequest{}) + &csi.DeleteVolumeRequest{ + Version: csiClientVersion, + VolumeId: "reallyfakevolumeid", + }) Expect(err).NotTo(HaveOccurred()) - Expect(snapshots).NotTo(BeNil()) + }) - totalSnapshots := len(snapshots.GetEntries()) + It("should return appropriate values (no optional values added)", func() { + // Create Volume First By("creating a volume") - volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-3") - volume, err := c.CreateVolume(context.Background(), volReq) - Expect(err).NotTo(HaveOccurred()) - - By("creating a snapshot") - snapReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-3", volume.GetVolume().GetId(), nil) - snapshot, err := c.CreateSnapshot(context.Background(), snapReq) - Expect(err).NotTo(HaveOccurred()) - Expect(snapshot).NotTo(BeNil()) - verifySnapshotInfo(snapshot.GetSnapshot()) - - snapshots, err = c.ListSnapshots( + name := "sanity" + vol, err := c.CreateVolume( context.Background(), - &csi.ListSnapshotsRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(snapshots).NotTo(BeNil()) - Expect(len(snapshots.GetEntries())).To(Equal(totalSnapshots + 1)) - - By("cleaning up deleting the snapshot") - delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetId()) - _, err = c.DeleteSnapshot(context.Background(), delSnapReq) - Expect(err).NotTo(HaveOccurred()) + &csi.CreateVolumeRequest{ + Version: csiClientVersion, + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + }) - By("cleaning up deleting the volume") - delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetId()) - _, err = c.DeleteVolume(context.Background(), delVolReq) Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolumeInfo()).NotTo(BeNil()) + Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) - // List snapshots and check if the deleted snapshot exists in the snapshot list. - snapshots, err = c.ListSnapshots( + // Delete Volume + By("deleting a volume") + _, err = c.DeleteVolume( context.Background(), - &csi.ListSnapshotsRequest{}) + &csi.DeleteVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + }) Expect(err).NotTo(HaveOccurred()) - Expect(snapshots).NotTo(BeNil()) - Expect(len(snapshots.GetEntries())).To(Equal(totalSnapshots)) }) +}) - It("should return next token when a limited number of entries are requested", func() { - // minSnapshotCount is the minimum number of snapshots expected to exist, - // based on which paginated snapshot listing is performed. - minSnapshotCount := 5 - // maxEntried is the maximum entries in list snapshot request. - maxEntries := 2 - // currentTotalVols is the total number of volumes at a given time. It - // is used to verify that all the snapshots have been listed. - currentTotalSnapshots := 0 - - // Get the number of existing volumes. - snapshots, err := c.ListSnapshots( - context.Background(), - &csi.ListSnapshotsRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(snapshots).NotTo(BeNil()) - - initialTotalSnapshots := len(snapshots.GetEntries()) - currentTotalSnapshots = initialTotalSnapshots - - createVols := make([]*csi.Volume, 0) - createSnapshots := make([]*csi.Snapshot, 0) +var _ = Describe("ValidateVolumeCapabilities [Controller Server]", func() { + var ( + c csi.ControllerClient + ) - // Ensure minimum minVolCount volumes exist. - if initialTotalSnapshots < minSnapshotCount { + BeforeEach(func() { + c = csi.NewControllerClient(conn) + }) - By("creating required new volumes") - requiredSnapshots := minSnapshotCount - initialTotalSnapshots + It("should fail when no version is provided", func() { - for i := 1; i <= requiredSnapshots; i++ { - volReq := MakeCreateVolumeReq(sc, "volume"+strconv.Itoa(i)) - volume, err := c.CreateVolume(context.Background(), volReq) - Expect(err).NotTo(HaveOccurred()) - Expect(volume).NotTo(BeNil()) - createVols = append(createVols, volume.GetVolume()) + _, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{}) + Expect(err).To(HaveOccurred()) - snapReq := MakeCreateSnapshotReq(sc, "snapshot"+strconv.Itoa(i), volume.GetVolume().GetId(), nil) - snapshot, err := c.CreateSnapshot(context.Background(), snapReq) - Expect(err).NotTo(HaveOccurred()) - Expect(snapshot).NotTo(BeNil()) - verifySnapshotInfo(snapshot.GetSnapshot()) - createSnapshots = append(createSnapshots, snapshot.GetSnapshot()) - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - // Update the current total snapshots count. - currentTotalSnapshots += requiredSnapshots - } + It("should fail when no volume id is provided", func() { - // Request list snapshots with max entries maxEntries. - snapshots, err = c.ListSnapshots( + _, err := c.ValidateVolumeCapabilities( context.Background(), - &csi.ListSnapshotsRequest{ - MaxEntries: int32(maxEntries), + &csi.ValidateVolumeCapabilitiesRequest{ + Version: csiClientVersion, }) - Expect(err).NotTo(HaveOccurred()) - Expect(snapshots).NotTo(BeNil()) + Expect(err).To(HaveOccurred()) - nextToken := snapshots.GetNextToken() + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - Expect(nextToken).To(Equal(strconv.Itoa(maxEntries))) - Expect(len(snapshots.GetEntries())).To(Equal(maxEntries)) + It("should fail when no volume capabilities are provided", func() { - // Request list snapshots with starting_token and no max entries. - snapshots, err = c.ListSnapshots( + _, err := c.ValidateVolumeCapabilities( context.Background(), - &csi.ListSnapshotsRequest{ - StartingToken: nextToken, + &csi.ValidateVolumeCapabilitiesRequest{ + Version: csiClientVersion, + VolumeId: "id", }) - Expect(err).NotTo(HaveOccurred()) - Expect(snapshots).NotTo(BeNil()) + Expect(err).To(HaveOccurred()) - // Ensure that all the remaining entries are returned at once. - Expect(len(snapshots.GetEntries())).To(Equal(currentTotalSnapshots - maxEntries)) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - if initialTotalSnapshots < minSnapshotCount { + It("should return appropriate values (no optional values added)", func() { - By("cleaning up deleting the snapshots") + // Create Volume First + By("creating a single node writer volume") + name := "sanity" + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Version: csiClientVersion, + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + }) - for _, snap := range createSnapshots { - delSnapReq := MakeDeleteSnapshotReq(sc, snap.GetId()) - _, err = c.DeleteSnapshot(context.Background(), delSnapReq) - Expect(err).NotTo(HaveOccurred()) - } + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolumeInfo()).NotTo(BeNil()) + Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) - By("cleaning up deleting the volumes") + // ValidateVolumeCapabilities + By("validating volume capabilities") + valivolcap, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + VolumeCapabilities: []*csi.VolumeCapability{ + &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(valivolcap).NotTo(BeNil()) + Expect(valivolcap.GetSupported()).To(BeTrue()) - for _, vol := range createVols { - delVolReq := MakeDeleteVolumeReq(sc, vol.GetId()) - _, err = c.DeleteVolume(context.Background(), delVolReq) - Expect(err).NotTo(HaveOccurred()) - } - } + By("cleaning up deleting the volume") + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + }) + Expect(err).NotTo(HaveOccurred()) }) - }) -var _ = DescribeSanity("DeleteSnapshot [Controller Server]", func(sc *SanityContext) { +var _ = Describe("ControllerPublishVolume [Controller Server]", func() { var ( c csi.ControllerClient + n csi.NodeClient ) BeforeEach(func() { - c = csi.NewControllerClient(sc.Conn) + c = csi.NewControllerClient(conn) + n = csi.NewNodeClient(conn) - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT) { - Skip("DeleteSnapshot not supported") + if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { + Skip("ControllerPublishVolume not supported") } }) - It("should fail when no snapshot id is provided", func() { + It("should fail when no version is provided", func() { - req := &csi.DeleteSnapshotRequest{} + _, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{}) + Expect(err).To(HaveOccurred()) - if sc.Secrets != nil { - req.DeleteSnapshotSecrets = sc.Secrets.DeleteSnapshotSecret - } + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when no volume id is provided", func() { - _, err := c.DeleteSnapshot(context.Background(), req) + _, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + Version: csiClientVersion, + }) Expect(err).To(HaveOccurred()) serverError, ok := status.FromError(err) @@ -1415,212 +561,235 @@ var _ = DescribeSanity("DeleteSnapshot [Controller Server]", func(sc *SanityCont Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should succeed when an invalid snapshot id is used", func() { + It("should fail when no node id is provided", func() { - req := MakeDeleteSnapshotReq(sc, "reallyfakesnapshotid") - _, err := c.DeleteSnapshot(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) + _, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + Version: csiClientVersion, + VolumeId: "id", + }) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when no volume capability is provided", func() { + + _, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + Version: csiClientVersion, + VolumeId: "id", + NodeId: "fakenode", + }) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) It("should return appropriate values (no optional values added)", func() { - By("creating a volume") - volReq := MakeCreateVolumeReq(sc, "DeleteSnapshot-volume-1") - volume, err := c.CreateVolume(context.Background(), volReq) + // Create Volume First + By("creating a single node writer volume") + name := "sanity" + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Version: csiClientVersion, + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolumeInfo()).NotTo(BeNil()) + Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) + + By("getting a node id") + nid, err := n.GetNodeID( + context.Background(), + &csi.GetNodeIDRequest{ + Version: csiClientVersion, + }) Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) - // Create Snapshot First - By("creating a snapshot") - snapshotReq := MakeCreateSnapshotReq(sc, "DeleteSnapshot-snapshot-1", volume.GetVolume().GetId(), nil) - snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) + // ControllerPublishVolume + By("calling controllerpublish on that volume") + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + }) Expect(err).NotTo(HaveOccurred()) - Expect(snapshot).NotTo(BeNil()) - verifySnapshotInfo(snapshot.GetSnapshot()) + Expect(conpubvol).NotTo(BeNil()) - By("cleaning up deleting the snapshot") - delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetId()) - _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + By("cleaning up unpublishing the volume") + conunpubvol, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + // NodeID is optional in ControllerUnpublishVolume + NodeId: nid.GetNodeId(), + }) Expect(err).NotTo(HaveOccurred()) + Expect(conunpubvol).NotTo(BeNil()) By("cleaning up deleting the volume") - delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetId()) - _, err = c.DeleteVolume(context.Background(), delVolReq) + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + }) Expect(err).NotTo(HaveOccurred()) }) }) -var _ = DescribeSanity("CreateSnapshot [Controller Server]", func(sc *SanityContext) { +var _ = Describe("ControllerUnpublishVolume [Controller Server]", func() { var ( c csi.ControllerClient + n csi.NodeClient ) BeforeEach(func() { - c = csi.NewControllerClient(sc.Conn) + c = csi.NewControllerClient(conn) + n = csi.NewNodeClient(conn) - if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT) { - Skip("CreateSnapshot not supported") + if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { + Skip("ControllerUnpublishVolume not supported") } }) - It("should fail when no name is provided", func() { - - req := &csi.CreateSnapshotRequest{ - SourceVolumeId: "testId", - } - - if sc.Secrets != nil { - req.CreateSnapshotSecrets = sc.Secrets.CreateSnapshotSecret - } + It("should fail when no version is provided", func() { - _, err := c.CreateSnapshot(context.Background(), req) + _, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{}) Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should fail when no source volume id is provided", func() { - - req := &csi.CreateSnapshotRequest{ - Name: "name", - } - - if sc.Secrets != nil { - req.CreateSnapshotSecrets = sc.Secrets.CreateSnapshotSecret - } + It("should fail when no volume id is provided", func() { - _, err := c.CreateSnapshot(context.Background(), req) + _, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + Version: csiClientVersion, + }) Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should not fail when requesting to create a snapshot with already existing name and same SourceVolumeId.", func() { - - By("creating a volume") - volReq := MakeCreateVolumeReq(sc, "CreateSnapshot-volume-1") - volume, err := c.CreateVolume(context.Background(), volReq) - Expect(err).NotTo(HaveOccurred()) - - By("creating a snapshot") - snapReq1 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-1", volume.GetVolume().GetId(), nil) - snap1, err := c.CreateSnapshot(context.Background(), snapReq1) - Expect(err).NotTo(HaveOccurred()) - Expect(snap1).NotTo(BeNil()) - verifySnapshotInfo(snap1.GetSnapshot()) - - snap2, err := c.CreateSnapshot(context.Background(), snapReq1) - Expect(err).NotTo(HaveOccurred()) - Expect(snap2).NotTo(BeNil()) - verifySnapshotInfo(snap2.GetSnapshot()) + It("should return appropriate values (no optional values added)", func() { - By("cleaning up deleting the snapshot") - delSnapReq := MakeDeleteSnapshotReq(sc, snap1.GetSnapshot().GetId()) - _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + // Create Volume First + By("creating a single node writer volume") + name := "sanity" + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Version: csiClientVersion, + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + }) Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolumeInfo()).NotTo(BeNil()) + Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) - By("cleaning up deleting the volume") - delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetId()) - _, err = c.DeleteVolume(context.Background(), delVolReq) + By("getting a node id") + nid, err := n.GetNodeID( + context.Background(), + &csi.GetNodeIDRequest{ + Version: csiClientVersion, + }) Expect(err).NotTo(HaveOccurred()) - }) - - It("should fail when requesting to create a snapshot with already existing name and different SourceVolumeId.", func() { + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) - By("creating a volume") - volume, err := c.CreateVolume(context.Background(), MakeCreateVolumeReq(sc, "CreateSnapshot-volume-2")) - Expect(err).ToNot(HaveOccurred()) - - By("creating a snapshot with the created volume source id") - req1 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-2", volume.GetVolume().GetId(), nil) - snap1, err := c.CreateSnapshot(context.Background(), req1) + // ControllerPublishVolume + By("calling controllerpublish on that volume") + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + }) Expect(err).NotTo(HaveOccurred()) - Expect(snap1).NotTo(BeNil()) - verifySnapshotInfo(snap1.GetSnapshot()) - - By("creating a snapshot with the same name but different volume source id") - req2 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-2", "test001", nil) - _, err = c.CreateSnapshot(context.Background(), req2) - Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) + Expect(conpubvol).NotTo(BeNil()) - By("cleaning up deleting the snapshot") - delSnapReq := MakeDeleteSnapshotReq(sc, snap1.GetSnapshot().GetId()) - _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + // ControllerUnpublishVolume + By("calling controllerunpublish on that volume") + conunpubvol, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + // NodeID is optional in ControllerUnpublishVolume + NodeId: nid.GetNodeId(), + }) Expect(err).NotTo(HaveOccurred()) + Expect(conunpubvol).NotTo(BeNil()) By("cleaning up deleting the volume") - delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetId()) - _, err = c.DeleteVolume(context.Background(), delVolReq) + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + }) Expect(err).NotTo(HaveOccurred()) }) }) - -func MakeCreateVolumeReq(sc *SanityContext, name string) *csi.CreateVolumeRequest { - size1 := TestVolumeSize(sc) - - req := &csi.CreateVolumeRequest{ - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: size1, - LimitBytes: size1, - }, - } - - if sc.Secrets != nil { - req.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret - } - - return req -} - -func MakeCreateSnapshotReq(sc *SanityContext, name, sourceVolumeId string, parameters map[string]string) *csi.CreateSnapshotRequest { - req := &csi.CreateSnapshotRequest{ - Name: name, - SourceVolumeId: sourceVolumeId, - Parameters: parameters, - } - - if sc.Secrets != nil { - req.CreateSnapshotSecrets = sc.Secrets.CreateSnapshotSecret - } - - return req -} - -func MakeDeleteSnapshotReq(sc *SanityContext, id string) *csi.DeleteSnapshotRequest { - delSnapReq := &csi.DeleteSnapshotRequest{ - SnapshotId: id, - } - - if sc.Secrets != nil { - delSnapReq.DeleteSnapshotSecrets = sc.Secrets.DeleteSnapshotSecret - } - - return delSnapReq -} - -func MakeDeleteVolumeReq(sc *SanityContext, id string) *csi.DeleteVolumeRequest { - delVolReq := &csi.DeleteVolumeRequest{ - VolumeId: id, - } - - if sc.Secrets != nil { - delVolReq.ControllerDeleteSecrets = sc.Secrets.DeleteVolumeSecret - } - - return delVolReq -} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go index e60439b3..facdf39d 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go @@ -17,83 +17,86 @@ limitations under the License. package sanity import ( - "context" - "fmt" "regexp" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" + context "golang.org/x/net/context" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = DescribeSanity("Identity Service", func(sc *SanityContext) { +var ( + csiClientVersion = &csi.Version{ + Major: 0, + Minor: 1, + Patch: 0, + } +) + +var _ = Describe("GetSupportedVersions [Identity Server]", func() { var ( c csi.IdentityClient ) BeforeEach(func() { - c = csi.NewIdentityClient(sc.Conn) + c = csi.NewIdentityClient(conn) }) - Describe("GetPluginCapabilities", func() { - It("should return appropriate capabilities", func() { - req := &csi.GetPluginCapabilitiesRequest{} - res, err := c.GetPluginCapabilities(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(res).NotTo(BeNil()) - - By("checking successful response") - Expect(res.GetCapabilities()).NotTo(BeNil()) - for _, cap := range res.GetCapabilities() { - switch cap.GetService().GetType() { - case csi.PluginCapability_Service_CONTROLLER_SERVICE: - case csi.PluginCapability_Service_ACCESSIBILITY_CONSTRAINTS: - default: - Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetService().GetType())) - } - } - - }) + It("should return an array of supported versions", func() { + res, err := c.GetSupportedVersions( + context.Background(), + &csi.GetSupportedVersionsRequest{}) + + By("checking response to have supported versions list") + Expect(err).NotTo(HaveOccurred()) + Expect(res.GetSupportedVersions()).NotTo(BeNil()) + Expect(len(res.GetSupportedVersions()) >= 1).To(BeTrue()) + + By("checking each version") + for _, version := range res.GetSupportedVersions() { + Expect(version).NotTo(BeNil()) + Expect(version.GetMajor()).To(BeNumerically("<", 100)) + Expect(version.GetMinor()).To(BeNumerically("<", 100)) + Expect(version.GetPatch()).To(BeNumerically("<", 100)) + } + }) +}) + +var _ = Describe("GetPluginInfo [Identity Server]", func() { + var ( + c csi.IdentityClient + ) + BeforeEach(func() { + c = csi.NewIdentityClient(conn) }) - Describe("Probe", func() { - It("should return appropriate information", func() { - req := &csi.ProbeRequest{} - res, err := c.Probe(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(res).NotTo(BeNil()) - - By("verifying return status") - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code() == codes.FailedPrecondition || - serverError.Code() == codes.OK).To(BeTrue()) - - if res.GetReady() != nil { - Expect(res.GetReady().GetValue() == true || - res.GetReady().GetValue() == false).To(BeTrue()) - } - }) + It("should fail when no version is provided", func() { + _, err := c.GetPluginInfo(context.Background(), &csi.GetPluginInfoRequest{}) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - Describe("GetPluginInfo", func() { - It("should return appropriate information", func() { - req := &csi.GetPluginInfoRequest{} - res, err := c.GetPluginInfo(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(res).NotTo(BeNil()) - - By("verifying name size and characters") - Expect(res.GetName()).ToNot(HaveLen(0)) - Expect(len(res.GetName())).To(BeNumerically("<=", 63)) - Expect(regexp. - MustCompile("^[a-zA-Z][A-Za-z0-9-\\.\\_]{0,61}[a-zA-Z]$"). - MatchString(res.GetName())).To(BeTrue()) - }) + It("should return appropriate information", func() { + req := &csi.GetPluginInfoRequest{ + Version: csiClientVersion, + } + res, err := c.GetPluginInfo(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeNil()) + + By("verifying name size and characters") + Expect(res.GetName()).ToNot(HaveLen(0)) + Expect(len(res.GetName())).To(BeNumerically("<=", 63)) + Expect(regexp. + MustCompile("^[a-zA-Z][A-Za-z0-9-\\.\\_]{0,61}[a-zA-Z]$"). + MatchString(res.GetName())).To(BeTrue()) }) }) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go index a98f5151..2d4734df 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go @@ -17,248 +17,215 @@ limitations under the License. package sanity import ( - "context" "fmt" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" + context "golang.org/x/net/context" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -func isNodeCapabilitySupported(c csi.NodeClient, - capType csi.NodeServiceCapability_RPC_Type, -) bool { - - caps, err := c.NodeGetCapabilities( - context.Background(), - &csi.NodeGetCapabilitiesRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(caps).NotTo(BeNil()) - - for _, cap := range caps.GetCapabilities() { - Expect(cap.GetRpc()).NotTo(BeNil()) - if cap.GetRpc().GetType() == capType { - return true - } - } - return false -} - -func isPluginCapabilitySupported(c csi.IdentityClient, - capType csi.PluginCapability_Service_Type, -) bool { - - caps, err := c.GetPluginCapabilities( - context.Background(), - &csi.GetPluginCapabilitiesRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(caps).NotTo(BeNil()) - Expect(caps.GetCapabilities()).NotTo(BeNil()) - - for _, cap := range caps.GetCapabilities() { - Expect(cap.GetService()).NotTo(BeNil()) - if cap.GetService().GetType() == capType { - return true - } - } - return false -} +var ( + csiTargetPath = "/mnt/csi" +) -var _ = DescribeSanity("Node Service", func(sc *SanityContext) { +var _ = Describe("NodeGetCapabilities [Node Server]", func() { var ( - cl *Cleanup - c csi.NodeClient - s csi.ControllerClient - - controllerPublishSupported bool - nodeStageSupported bool + c csi.NodeClient ) BeforeEach(func() { - c = csi.NewNodeClient(sc.Conn) - s = csi.NewControllerClient(sc.Conn) - - controllerPublishSupported = isControllerCapabilitySupported( - s, - csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) - nodeStageSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME) - if nodeStageSupported { - err := createMountTargetLocation(sc.Config.StagingPath) - Expect(err).NotTo(HaveOccurred()) - } - cl = &Cleanup{ - Context: sc, - NodeClient: c, - ControllerClient: s, - ControllerPublishSupported: controllerPublishSupported, - NodeStageSupported: nodeStageSupported, - } + c = csi.NewNodeClient(conn) }) - AfterEach(func() { - cl.DeleteVolumes() + It("should fail when no version is provided", func() { + _, err := c.NodeGetCapabilities( + context.Background(), + &csi.NodeGetCapabilitiesRequest{}) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - Describe("NodeGetCapabilities", func() { - It("should return appropriate capabilities", func() { - caps, err := c.NodeGetCapabilities( - context.Background(), - &csi.NodeGetCapabilitiesRequest{}) + It("should return appropriate capabilities", func() { + caps, err := c.NodeGetCapabilities( + context.Background(), + &csi.NodeGetCapabilitiesRequest{ + Version: csiClientVersion, + }) - By("checking successful response") - Expect(err).NotTo(HaveOccurred()) - Expect(caps).NotTo(BeNil()) + By("checking successful response") + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) + Expect(caps.GetCapabilities()).NotTo(BeNil()) - for _, cap := range caps.GetCapabilities() { - Expect(cap.GetRpc()).NotTo(BeNil()) + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetRpc()).NotTo(BeNil()) - switch cap.GetRpc().GetType() { - case csi.NodeServiceCapability_RPC_UNKNOWN: - case csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME: - default: - Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) - } + switch cap.GetRpc().GetType() { + case csi.NodeServiceCapability_RPC_UNKNOWN: + default: + Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) } - }) + } }) +}) - Describe("NodeGetId", func() { - It("should return appropriate values", func() { - nid, err := c.NodeGetId( - context.Background(), - &csi.NodeGetIdRequest{}) +var _ = Describe("NodeProbe [Node Server]", func() { + var ( + c csi.NodeClient + ) - Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) - }) + BeforeEach(func() { + c = csi.NewNodeClient(conn) }) - Describe("NodeGetInfo", func() { - var ( - i csi.IdentityClient - accessibilityConstraintSupported bool - ) + It("should fail when no version is provided", func() { + _, err := c.NodeProbe( + context.Background(), + &csi.NodeProbeRequest{}) + Expect(err).To(HaveOccurred()) - BeforeEach(func() { - i = csi.NewIdentityClient(sc.Conn) - accessibilityConstraintSupported = isPluginCapabilitySupported(i, csi.PluginCapability_Service_ACCESSIBILITY_CONSTRAINTS) - }) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - It("should return approproate values", func() { - ninfo, err := c.NodeGetInfo( - context.Background(), - &csi.NodeGetInfoRequest{}) + It("should return appropriate values", func() { + pro, err := c.NodeProbe( + context.Background(), + &csi.NodeProbeRequest{ + Version: csiClientVersion, + }) - Expect(err).NotTo(HaveOccurred()) - Expect(ninfo).NotTo(BeNil()) - Expect(ninfo.GetNodeId()).NotTo(BeEmpty()) - Expect(ninfo.GetMaxVolumesPerNode()).NotTo(BeNumerically("<", 0)) + Expect(err).NotTo(HaveOccurred()) + Expect(pro).NotTo(BeNil()) + }) +}) - if accessibilityConstraintSupported { - Expect(ninfo.GetAccessibleTopology()).NotTo(BeNil()) - } - }) +var _ = Describe("GetNodeID [Node Server]", func() { + var ( + c csi.NodeClient + ) + + BeforeEach(func() { + c = csi.NewNodeClient(conn) }) - Describe("NodePublishVolume", func() { - It("should fail when no volume id is provided", func() { - _, err := c.NodePublishVolume( - context.Background(), - &csi.NodePublishVolumeRequest{ - NodePublishSecrets: sc.Secrets.NodePublishVolumeSecret, - }, - ) - Expect(err).To(HaveOccurred()) + It("should fail when no version is provided", func() { + _, err := c.GetNodeID( + context.Background(), + &csi.GetNodeIDRequest{}) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - It("should fail when no target path is provided", func() { - _, err := c.NodePublishVolume( - context.Background(), - &csi.NodePublishVolumeRequest{ - VolumeId: "id", - NodePublishSecrets: sc.Secrets.NodePublishVolumeSecret, - }, - ) - Expect(err).To(HaveOccurred()) + It("should return appropriate values", func() { + nid, err := c.GetNodeID( + context.Background(), + &csi.GetNodeIDRequest{ + Version: csiClientVersion, + }) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) + }) +}) - It("should fail when no volume capability is provided", func() { - _, err := c.NodePublishVolume( - context.Background(), - &csi.NodePublishVolumeRequest{ - VolumeId: "id", - TargetPath: sc.Config.TargetPath, - NodePublishSecrets: sc.Secrets.NodePublishVolumeSecret, - }, - ) - Expect(err).To(HaveOccurred()) +var _ = Describe("NodePublishVolume [Node Server]", func() { + var ( + s csi.ControllerClient + c csi.NodeClient + controllerPublishSupported bool + ) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + BeforeEach(func() { + s = csi.NewControllerClient(conn) + c = csi.NewNodeClient(conn) + controllerPublishSupported = isCapabilitySupported( + s, + csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) }) - Describe("NodeUnpublishVolume", func() { - It("should fail when no volume id is provided", func() { + It("should fail when no version is provided", func() { - _, err := c.NodeUnpublishVolume( - context.Background(), - &csi.NodeUnpublishVolumeRequest{}) - Expect(err).To(HaveOccurred()) + _, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{}) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - It("should fail when no target path is provided", func() { + It("should fail when no volume id is provided", func() { - _, err := c.NodeUnpublishVolume( - context.Background(), - &csi.NodeUnpublishVolumeRequest{ - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) + _, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + Version: csiClientVersion, + }) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - Describe("NodeStageVolume", func() { - var ( - device string - ) + It("should fail when no target path is provided", func() { - BeforeEach(func() { - if !nodeStageSupported { - Skip("NodeStageVolume not supported") - } + _, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + Version: csiClientVersion, + VolumeId: "id", + }) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - device = "/dev/mock" - }) + It("should fail when no volume capability is provided", func() { - It("should fail when no volume id is provided", func() { - _, err := c.NodeStageVolume( - context.Background(), - &csi.NodeStageVolumeRequest{ - StagingTargetPath: sc.Config.StagingPath, - VolumeCapability: &csi.VolumeCapability{ + _, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + Version: csiClientVersion, + VolumeId: "id", + TargetPath: csiTargetPath, + }) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a single node writer volume") + name := "sanity" + vol, err := s.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Version: csiClientVersion, + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, }, @@ -266,24 +233,32 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, - PublishInfo: map[string]string{ - "device": device, - }, - NodeStageSecrets: sc.Secrets.NodeStageVolumeSecret, }, - ) - Expect(err).To(HaveOccurred()) + }) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolumeInfo()).NotTo(BeNil()) + Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + By("getting a node id") + nid, err := c.GetNodeID( + context.Background(), + &csi.GetNodeIDRequest{ + Version: csiClientVersion, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) - It("should fail when no staging target path is provided", func() { - _, err := c.NodeStageVolume( + var conpubvol *csi.ControllerPublishVolumeResponse + if controllerPublishSupported { + By("controller publishing volume") + conpubvol, err = s.ControllerPublishVolume( context.Background(), - &csi.NodeStageVolumeRequest{ - VolumeId: "id", + &csi.ControllerPublishVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + NodeId: nid.GetNodeId(), VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, @@ -292,86 +267,138 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, - PublishInfo: map[string]string{ - "device": device, - }, - NodeStageSecrets: sc.Secrets.NodeStageVolumeSecret, + Readonly: false, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(conpubvol).NotTo(BeNil()) + } + + // NodePublishVolume + By("publishing the volume on a node") + nodepubvolRequest := &csi.NodePublishVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + TargetPath: csiTargetPath, + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, - ) - Expect(err).To(HaveOccurred()) + }, + } + if controllerPublishSupported { + nodepubvolRequest.PublishVolumeInfo = conpubvol.GetPublishVolumeInfo() + } + nodepubvol, err := c.NodePublishVolume(context.Background(), nodepubvolRequest) + Expect(err).NotTo(HaveOccurred()) + Expect(nodepubvol).NotTo(BeNil()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + // NodeUnpublishVolume + By("cleaning up calling nodeunpublish") + nodeunpubvol, err := c.NodeUnpublishVolume( + context.Background(), + &csi.NodeUnpublishVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + TargetPath: csiTargetPath, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(nodeunpubvol).NotTo(BeNil()) - It("should fail when no volume capability is provided", func() { - _, err := c.NodeStageVolume( + if controllerPublishSupported { + By("cleaning up calling controllerunpublishing the volume") + nodeunpubvol, err := c.NodeUnpublishVolume( context.Background(), - &csi.NodeStageVolumeRequest{ - VolumeId: "id", - StagingTargetPath: sc.Config.StagingPath, - PublishInfo: map[string]string{ - "device": device, - }, - NodeStageSecrets: sc.Secrets.NodeStageVolumeSecret, - }, - ) - Expect(err).To(HaveOccurred()) + &csi.NodeUnpublishVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + TargetPath: csiTargetPath, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(nodeunpubvol).NotTo(BeNil()) + } - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + By("cleaning up deleting the volume") + _, err = s.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + }) + Expect(err).NotTo(HaveOccurred()) }) +}) - Describe("NodeUnstageVolume", func() { - BeforeEach(func() { - if !nodeStageSupported { - Skip("NodeUnstageVolume not supported") - } - }) +var _ = Describe("NodeUnpublishVolume [Node Server]", func() { + var ( + s csi.ControllerClient + c csi.NodeClient + controllerPublishSupported bool + ) - It("should fail when no volume id is provided", func() { + BeforeEach(func() { + s = csi.NewControllerClient(conn) + c = csi.NewNodeClient(conn) + controllerPublishSupported = isCapabilitySupported( + s, + csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) + }) - _, err := c.NodeUnstageVolume( - context.Background(), - &csi.NodeUnstageVolumeRequest{ - StagingTargetPath: sc.Config.StagingPath, - }) - Expect(err).To(HaveOccurred()) + It("should fail when no version is provided", func() { - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + _, err := c.NodeUnpublishVolume( + context.Background(), + &csi.NodeUnpublishVolumeRequest{}) + Expect(err).To(HaveOccurred()) - It("should fail when no staging target path is provided", func() { + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - _, err := c.NodeUnstageVolume( - context.Background(), - &csi.NodeUnstageVolumeRequest{ - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) + It("should fail when no volume id is provided", func() { - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + _, err := c.NodeUnpublishVolume( + context.Background(), + &csi.NodeUnpublishVolumeRequest{ + Version: csiClientVersion, + }) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should work", func() { - name := uniqueString("sanity-node-full") + It("should fail when no target path is provided", func() { + + _, err := c.NodeUnpublishVolume( + context.Background(), + &csi.NodeUnpublishVolumeRequest{ + Version: csiClientVersion, + VolumeId: "id", + }) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values (no optional values added)", func() { // Create Volume First By("creating a single node writer volume") + name := "sanity" vol, err := s.CreateVolume( context.Background(), &csi.CreateVolumeRequest{ - Name: name, + Version: csiClientVersion, + Name: name, VolumeCapabilities: []*csi.VolumeCapability{ - { + &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, }, @@ -380,32 +407,22 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { }, }, }, - ControllerCreateSecrets: sc.Secrets.CreateVolumeSecret, - }, - ) + }) Expect(err).NotTo(HaveOccurred()) Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolume()).NotTo(BeNil()) - Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetId()}) - - By("getting a node id") - nid, err := c.NodeGetId( - context.Background(), - &csi.NodeGetIdRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) + Expect(vol.GetVolumeInfo()).NotTo(BeNil()) + Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) + // ControllerPublishVolume var conpubvol *csi.ControllerPublishVolumeResponse if controllerPublishSupported { - By("controller publishing volume") - + By("calling controllerpublish on the volume") conpubvol, err = s.ControllerPublishVolume( context.Background(), &csi.ControllerPublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - NodeId: nid.GetNodeId(), + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + NodeId: "foobar", VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, @@ -414,115 +431,65 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, - VolumeAttributes: vol.GetVolume().GetAttributes(), - Readonly: false, - ControllerPublishSecrets: sc.Secrets.ControllerPublishVolumeSecret, - }, - ) + Readonly: false, + }) Expect(err).NotTo(HaveOccurred()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetId(), NodeID: nid.GetNodeId()}) Expect(conpubvol).NotTo(BeNil()) } - // NodeStageVolume - if nodeStageSupported { - By("node staging volume") - nodestagevol, err := c.NodeStageVolume( - context.Background(), - &csi.NodeStageVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - StagingTargetPath: sc.Config.StagingPath, - VolumeAttributes: vol.GetVolume().GetAttributes(), - PublishInfo: conpubvol.GetPublishInfo(), - NodeStageSecrets: sc.Secrets.NodeStageVolumeSecret, - }, - ) - Expect(err).NotTo(HaveOccurred()) - Expect(nodestagevol).NotTo(BeNil()) - } + // NodePublishVolume By("publishing the volume on a node") - var stagingPath string - if nodeStageSupported { - stagingPath = sc.Config.StagingPath - } - nodepubvol, err := c.NodePublishVolume( - context.Background(), - &csi.NodePublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - TargetPath: sc.Config.TargetPath, - StagingTargetPath: stagingPath, - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, + nodepubvolRequest := &csi.NodePublishVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + TargetPath: csiTargetPath, + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, - VolumeAttributes: vol.GetVolume().GetAttributes(), - PublishInfo: conpubvol.GetPublishInfo(), - NodePublishSecrets: sc.Secrets.NodePublishVolumeSecret, }, - ) + } + if controllerPublishSupported { + nodepubvolRequest.PublishVolumeInfo = conpubvol.GetPublishVolumeInfo() + } + nodepubvol, err := c.NodePublishVolume(context.Background(), nodepubvolRequest) Expect(err).NotTo(HaveOccurred()) Expect(nodepubvol).NotTo(BeNil()) // NodeUnpublishVolume - By("cleaning up calling nodeunpublish") nodeunpubvol, err := c.NodeUnpublishVolume( context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - TargetPath: sc.Config.TargetPath, + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + TargetPath: csiTargetPath, }) Expect(err).NotTo(HaveOccurred()) Expect(nodeunpubvol).NotTo(BeNil()) - if nodeStageSupported { - By("cleaning up calling nodeunstage") - nodeunstagevol, err := c.NodeUnstageVolume( - context.Background(), - &csi.NodeUnstageVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - StagingTargetPath: sc.Config.StagingPath, - }, - ) - Expect(err).NotTo(HaveOccurred()) - Expect(nodeunstagevol).NotTo(BeNil()) - } - if controllerPublishSupported { - By("cleaning up calling controllerunpublishing") - - controllerunpubvol, err := s.ControllerUnpublishVolume( + By("cleaning up unpublishing the volume") + nodeunpubvol, err := c.NodeUnpublishVolume( context.Background(), - &csi.ControllerUnpublishVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - NodeId: nid.GetNodeId(), - ControllerUnpublishSecrets: sc.Secrets.ControllerUnpublishVolumeSecret, - }, - ) + &csi.NodeUnpublishVolumeRequest{ + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + TargetPath: csiTargetPath, + }) Expect(err).NotTo(HaveOccurred()) - Expect(controllerunpubvol).NotTo(BeNil()) + Expect(nodeunpubvol).NotTo(BeNil()) } By("cleaning up deleting the volume") - _, err = s.DeleteVolume( context.Background(), &csi.DeleteVolumeRequest{ - VolumeId: vol.GetVolume().GetId(), - ControllerDeleteSecrets: sc.Secrets.DeleteVolumeSecret, - }, - ) + Version: csiClientVersion, + VolumeId: vol.GetVolumeInfo().GetId(), + }) Expect(err).NotTo(HaveOccurred()) }) }) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go index 9a4de8be..ecf88b19 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go @@ -17,14 +17,10 @@ limitations under the License. package sanity import ( - "crypto/rand" - "fmt" - "io/ioutil" - "os" + "sync" "testing" "github.com/kubernetes-csi/csi-test/utils" - yaml "gopkg.in/yaml.v2" "google.golang.org/grpc" @@ -32,132 +28,28 @@ import ( . "github.com/onsi/gomega" ) -// CSISecrets consists of secrets used in CSI credentials. -type CSISecrets struct { - CreateVolumeSecret map[string]string `yaml:"CreateVolumeSecret"` - DeleteVolumeSecret map[string]string `yaml:"DeleteVolumeSecret"` - ControllerPublishVolumeSecret map[string]string `yaml:"ControllerPublishVolumeSecret"` - ControllerUnpublishVolumeSecret map[string]string `yaml:"ControllerUnpublishVolumeSecret"` - NodeStageVolumeSecret map[string]string `yaml:"NodeStageVolumeSecret"` - NodePublishVolumeSecret map[string]string `yaml:"NodePublishVolumeSecret"` - CreateSnapshotSecret map[string]string `yaml:"CreateSnapshotSecret"` - DeleteSnapshotSecret map[string]string `yaml:"DeleteSnapshotSecret"` -} - -// Config provides the configuration for the sanity tests. It -// needs to be initialized by the user of the sanity package. -type Config struct { - TargetPath string - StagingPath string - Address string - SecretsFile string - TestVolumeSize int64 -} - -// SanityContext holds the variables that each test can depend on. It -// gets initialized before each test block runs. -type SanityContext struct { - Config *Config - Conn *grpc.ClientConn - Secrets *CSISecrets -} +var ( + driverAddress string + conn *grpc.ClientConn + lock sync.Mutex +) -// Test will test the CSI driver at the specified address by -// setting up a Ginkgo suite and running it. -func Test(t *testing.T, reqConfig *Config) { - sc := &SanityContext{ - Config: reqConfig, - } +// Test will test the CSI driver at the specified address +func Test(t *testing.T, address string) { + lock.Lock() + defer lock.Unlock() - registerTestsInGinkgo(sc) + driverAddress = address RegisterFailHandler(Fail) RunSpecs(t, "CSI Driver Test Suite") } -func GinkgoTest(reqConfig *Config) { - sc := &SanityContext{ - Config: reqConfig, - } - - registerTestsInGinkgo(sc) -} - -func (sc *SanityContext) setup() { +var _ = BeforeSuite(func() { var err error - - if len(sc.Config.SecretsFile) > 0 { - sc.Secrets, err = loadSecrets(sc.Config.SecretsFile) - Expect(err).NotTo(HaveOccurred()) - } else { - sc.Secrets = &CSISecrets{} - } - - By("connecting to CSI driver") - sc.Conn, err = utils.Connect(sc.Config.Address) + conn, err = utils.Connect(driverAddress) Expect(err).NotTo(HaveOccurred()) +}) - By("creating mount and staging directories") - err = createMountTargetLocation(sc.Config.TargetPath) - Expect(err).NotTo(HaveOccurred()) - if len(sc.Config.StagingPath) > 0 { - err = createMountTargetLocation(sc.Config.StagingPath) - Expect(err).NotTo(HaveOccurred()) - } -} - -func (sc *SanityContext) teardown() { - if sc.Conn != nil { - sc.Conn.Close() - sc.Conn = nil - } -} - -func createMountTargetLocation(targetPath string) error { - fileInfo, err := os.Stat(targetPath) - if err != nil && os.IsNotExist(err) { - return os.MkdirAll(targetPath, 0755) - } else if err != nil { - return err - } - if !fileInfo.IsDir() { - return fmt.Errorf("Target location %s is not a directory", targetPath) - } - - return nil -} - -func loadSecrets(path string) (*CSISecrets, error) { - var creds CSISecrets - - yamlFile, err := ioutil.ReadFile(path) - if err != nil { - return &creds, fmt.Errorf("failed to read file %q: #%v", path, err) - } - - err = yaml.Unmarshal(yamlFile, &creds) - if err != nil { - return &creds, fmt.Errorf("error unmarshaling yaml: #%v", err) - } - - return &creds, nil -} - -var uniqueSuffix = "-" + pseudoUUID() - -// pseudoUUID returns a unique string generated from random -// bytes, empty string in case of error. -func pseudoUUID() string { - b := make([]byte, 8) - if _, err := rand.Read(b); err != nil { - // Shouldn't happen?! - return "" - } - return fmt.Sprintf("%08X-%08X", b[0:4], b[4:8]) -} - -// uniqueString returns a unique string by appending a random -// number. In case of an error, just the prefix is returned, so it -// alone should already be fairly unique. -func uniqueString(prefix string) string { - return prefix + uniqueSuffix -} +var _ = AfterSuite(func() { + conn.Close() +}) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go deleted file mode 100644 index 47763b75..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2018 Intel Corporation - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sanity - -import ( - . "github.com/onsi/ginkgo" -) - -type test struct { - text string - body func(*SanityContext) -} - -var tests []test - -// DescribeSanity must be used instead of the usual Ginkgo Describe to -// register a test block. The difference is that the body function -// will be called multiple times with the right context (when -// setting up a Ginkgo suite or a testing.T test, with the right -// configuration). -func DescribeSanity(text string, body func(*SanityContext)) bool { - tests = append(tests, test{text, body}) - return true -} - -// registerTestsInGinkgo invokes the actual Gingko Describe -// for the tests registered earlier with DescribeSanity. -func registerTestsInGinkgo(sc *SanityContext) { - for _, test := range tests { - Describe(test.text, func() { - BeforeEach(func() { - sc.setup() - }) - - test.body(sc) - - AfterEach(func() { - sc.teardown() - }) - }) - } -} diff --git a/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go b/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go index 5a2bbe27..49ad8283 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go +++ b/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go @@ -16,16 +16,13 @@ limitations under the License. package test import ( - "context" - "fmt" - "reflect" "testing" - "github.com/container-storage-interface/spec/lib/go/csi/v0" - "github.com/golang/mock/gomock" - "github.com/golang/protobuf/proto" + "github.com/container-storage-interface/spec/lib/go/csi" + gomock "github.com/golang/mock/gomock" mock_driver "github.com/kubernetes-csi/csi-test/driver" mock_utils "github.com/kubernetes-csi/csi-test/utils" + "golang.org/x/net/context" ) func TestPluginInfoResponse(t *testing.T) { @@ -36,7 +33,13 @@ func TestPluginInfoResponse(t *testing.T) { driver := mock_driver.NewMockIdentityServer(m) // Setup input - in := &csi.GetPluginInfoRequest{} + in := &csi.GetPluginInfoRequest{ + Version: &csi.Version{ + Major: 0, + Minor: 1, + Patch: 0, + }, + } // Setup mock outout out := &csi.GetPluginInfoResponse{ @@ -61,24 +64,6 @@ func TestPluginInfoResponse(t *testing.T) { } } -type pbMatcher struct { - x proto.Message -} - -func (p pbMatcher) Matches(x interface{}) bool { - y := x.(proto.Message) - return proto.Equal(p.x, y) -} - -func (p pbMatcher) String() string { - return fmt.Sprintf("pb equal to %v", p.x) -} - -func pbMatch(x interface{}) gomock.Matcher { - v := x.(proto.Message) - return &pbMatcher{v} -} - func TestGRPCGetPluginInfoReponse(t *testing.T) { // Setup mock @@ -87,7 +72,13 @@ func TestGRPCGetPluginInfoReponse(t *testing.T) { driver := mock_driver.NewMockIdentityServer(m) // Setup input - in := &csi.GetPluginInfoRequest{} + in := &csi.GetPluginInfoRequest{ + Version: &csi.Version{ + Major: 0, + Minor: 1, + Patch: 0, + }, + } // Setup mock outout out := &csi.GetPluginInfoResponse{ @@ -100,7 +91,7 @@ func TestGRPCGetPluginInfoReponse(t *testing.T) { // Setup expectation // !IMPORTANT!: Must set context expected value to gomock.Any() to match any value - driver.EXPECT().GetPluginInfo(gomock.Any(), pbMatch(in)).Return(out, nil).Times(1) + driver.EXPECT().GetPluginInfo(gomock.Any(), in).Return(out, nil).Times(1) // Create a new RPC server := mock_driver.NewMockCSIDriver(&mock_driver.MockCSIDriverServers{ @@ -124,65 +115,3 @@ func TestGRPCGetPluginInfoReponse(t *testing.T) { t.Errorf("Unknown name: %s\n", name) } } - -func TestGRPCAttach(t *testing.T) { - - // Setup mock - m := gomock.NewController(&mock_utils.SafeGoroutineTester{}) - defer m.Finish() - driver := mock_driver.NewMockControllerServer(m) - - // Setup input - defaultVolumeID := "myname" - defaultNodeID := "MyNodeID" - defaultCaps := &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, - }, - } - publishVolumeInfo := map[string]string{ - "first": "foo", - "second": "bar", - "third": "baz", - } - defaultRequest := &csi.ControllerPublishVolumeRequest{ - VolumeId: defaultVolumeID, - NodeId: defaultNodeID, - VolumeCapability: defaultCaps, - Readonly: false, - } - - // Setup mock outout - out := &csi.ControllerPublishVolumeResponse{ - PublishInfo: publishVolumeInfo, - } - - // Setup expectation - // !IMPORTANT!: Must set context expected value to gomock.Any() to match any value - driver.EXPECT().ControllerPublishVolume(gomock.Any(), pbMatch(defaultRequest)).Return(out, nil).Times(1) - - // Create a new RPC - server := mock_driver.NewMockCSIDriver(&mock_driver.MockCSIDriverServers{ - Controller: driver, - }) - conn, err := server.Nexus() - if err != nil { - t.Errorf("Error: %s", err.Error()) - } - defer server.Close() - - // Make call - c := csi.NewControllerClient(conn) - r, err := c.ControllerPublishVolume(context.Background(), defaultRequest) - if err != nil { - t.Errorf("Error: %s", err.Error()) - } - - info := r.GetPublishInfo() - if !reflect.DeepEqual(info, publishVolumeInfo) { - t.Errorf("Invalid publish info: %v", info) - } -} diff --git a/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go b/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go index 82080eb3..a0cf555a 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go +++ b/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go @@ -21,7 +21,7 @@ import ( "sync" "testing" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/kubernetes-csi/csi-test/utils" "google.golang.org/grpc" "google.golang.org/grpc/reflection" @@ -36,13 +36,17 @@ type simpleDriver struct { wg sync.WaitGroup } -func (s *simpleDriver) GetPluginCapabilities(context.Context, *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { - // TODO: Return some simple Plugin Capabilities - return &csi.GetPluginCapabilitiesResponse{}, nil -} - -func (s *simpleDriver) Probe(context.Context, *csi.ProbeRequest) (*csi.ProbeResponse, error) { - return &csi.ProbeResponse{}, nil +func (s *simpleDriver) GetSupportedVersions( + context.Context, *csi.GetSupportedVersionsRequest) (*csi.GetSupportedVersionsResponse, error) { + return &csi.GetSupportedVersionsResponse{ + SupportedVersions: []*csi.Version{ + &csi.Version{ + Major: 0, + Minor: 1, + Patch: 0, + }, + }, + }, nil } func (s *simpleDriver) GetPluginInfo( From 24cb3657584669f663e2cb840ef28b82c23590f3 Mon Sep 17 00:00:00 2001 From: Serguei Bezverkhi Date: Wed, 14 Nov 2018 18:23:49 -0500 Subject: [PATCH 2/3] Code and dependency changes Signed-off-by: Serguei Bezverkhi --- Gopkg.lock | 49 ++++++++++++++++++++++++++++-------- Gopkg.toml | 2 +- cmd/livenessprobe_test.go | 2 +- pkg/connection/connection.go | 2 +- 4 files changed, 41 insertions(+), 14 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index de9a2c79..d2150de1 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -2,24 +2,31 @@ [[projects]] + digest = "1:44982bd5390cb16b8a4dc65976503f907a9ffd07879fbddd3c06a9ef7e5b05c3" name = "github.com/container-storage-interface/spec" - packages = ["lib/go/csi/v0"] - revision = "2178fdeea87f1150a17a63252eee28d4d8141f72" - version = "v0.3.0" + packages = ["lib/go/csi"] + pruneopts = "" + revision = "8efcc85c45550571fba8134182013ed7dc34038a" + version = "v1.0.0-rc2" [[projects]] branch = "master" + digest = "1:107b233e45174dbab5b1324201d092ea9448e58243ab9f039e4c0f332e121e3a" name = "github.com/golang/glog" packages = ["."] + pruneopts = "" revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" [[projects]] + digest = "1:73a7106c799f98af4f3da7552906efc6a2570329f4cd2d2f5fb8f9d6c053ff2f" name = "github.com/golang/mock" packages = ["gomock"] + pruneopts = "" revision = "c34cdb4725f4c3844d095133c6e40e448b86589b" version = "v1.1.1" [[projects]] + digest = "1:f958a1c137db276e52f0b50efee41a1a389dcdded59a69711f3e872757dab34b" name = "github.com/golang/protobuf" packages = [ "proto", @@ -28,22 +35,26 @@ "ptypes/any", "ptypes/duration", "ptypes/timestamp", - "ptypes/wrappers" + "ptypes/wrappers", ] + pruneopts = "" revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" version = "v1.1.0" [[projects]] - branch = "master" + digest = "1:d17a296973591f13eed8399016d5ec748cfb9c92086a84e00b33139b92ba4858" name = "github.com/kubernetes-csi/csi-test" packages = [ "driver", - "utils" + "utils", ] - revision = "e11d328ecca7fe91939284a8e878ebe77df8756d" + pruneopts = "" + revision = "31b2baed861ae5c166922808be5d1982346adf7d" + version = "v0.1.0-2" [[projects]] branch = "master" + digest = "1:98219b20d296a0031fdb434d30ca6e109623a09530a76cf57e41c94bd1e391a0" name = "golang.org/x/net" packages = [ "context", @@ -52,17 +63,21 @@ "http2/hpack", "idna", "internal/timeseries", - "trace" + "trace", ] + pruneopts = "" revision = "c39426892332e1bb5ec0a434a079bf82f5d30c54" [[projects]] branch = "master" + digest = "1:b779cc85de245422bf70d8a21e6afcf3c0591eca64dc507feb9f054f64b21ab9" name = "golang.org/x/sys" packages = ["unix"] + pruneopts = "" revision = "4e1fef5609515ec7a2cee7b5de30ba6d9b438cbf" [[projects]] + digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4" name = "golang.org/x/text" packages = [ "collate", @@ -78,18 +93,22 @@ "unicode/bidi", "unicode/cldr", "unicode/norm", - "unicode/rangetable" + "unicode/rangetable", ] + pruneopts = "" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" [[projects]] branch = "master" + digest = "1:3970150423e3f47a6d354b2868f7db632b665463755194aac1a30c4fb341be57" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] + pruneopts = "" revision = "383e8b2c3b9e36c4076b235b32537292176bae20" [[projects]] + digest = "1:ca75b3775a5d4e5d1fb48f57ef0865b4aaa8b3f00e6b52be68db991c4594e0a7" name = "google.golang.org/grpc" packages = [ ".", @@ -119,14 +138,22 @@ "resolver/passthrough", "stats", "status", - "tap" + "tap", ] + pruneopts = "" revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455" version = "v1.14.0" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "2f6e76998bb9d9f92c42332508edd9511082dc4b2a5bac1f3eba207120dd1b25" + input-imports = [ + "github.com/container-storage-interface/spec/lib/go/csi", + "github.com/golang/glog", + "github.com/golang/mock/gomock", + "github.com/kubernetes-csi/csi-test/driver", + "google.golang.org/grpc", + "google.golang.org/grpc/connectivity", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 182d704c..0544e7d6 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -23,7 +23,7 @@ [[constraint]] name = "github.com/container-storage-interface/spec" - version = "~0.3.0" + version = "1.0.0-rc2" [[constraint]] branch = "master" diff --git a/cmd/livenessprobe_test.go b/cmd/livenessprobe_test.go index bc17e898..12e2e190 100644 --- a/cmd/livenessprobe_test.go +++ b/cmd/livenessprobe_test.go @@ -21,7 +21,7 @@ import ( "testing" "time" - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" + csi "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/mock/gomock" "github.com/kubernetes-csi/csi-test/driver" "github.com/kubernetes-csi/livenessprobe/pkg/connection" diff --git a/pkg/connection/connection.go b/pkg/connection/connection.go index 3f2b5216..d1512ecc 100644 --- a/pkg/connection/connection.go +++ b/pkg/connection/connection.go @@ -23,7 +23,7 @@ import ( "strings" "time" - "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/glog" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" From dce91cc62a043a34a53bef9bf63e7b1b771e0314 Mon Sep 17 00:00:00 2001 From: Serguei Bezverkhi Date: Wed, 14 Nov 2018 21:32:26 -0500 Subject: [PATCH 3/3] second part of vendor files Signed-off-by: Serguei Bezverkhi --- Gopkg.lock | 6 +- .../kubernetes-csi/csi-test/.gitignore | 11 +- .../kubernetes-csi/csi-test/.travis.yml | 19 +- .../kubernetes-csi/csi-test/CONTRIBUTING.md | 22 + .../kubernetes-csi/csi-test/Dockerfile.mock | 6 + .../kubernetes-csi/csi-test/Gopkg.lock | 237 ++ .../kubernetes-csi/csi-test/Gopkg.toml | 62 + .../kubernetes-csi/csi-test/Makefile | 52 + .../github.com/kubernetes-csi/csi-test/OWNERS | 4 + .../kubernetes-csi/csi-test/README.md | 33 +- .../kubernetes-csi/csi-test/SECURITY_CONTACTS | 14 + .../csi-test/cmd/csi-sanity/Makefile | 14 +- .../csi-test/cmd/csi-sanity/README.md | 30 + .../csi-test/cmd/csi-sanity/sanity_test.go | 18 +- .../kubernetes-csi/csi-test/driver/driver.go | 250 ++- .../csi-test/driver/driver.mock.go | 145 +- .../kubernetes-csi/csi-test/driver/mock.go | 83 + .../kubernetes-csi/csi-test/glide.lock | 135 -- .../kubernetes-csi/csi-test/glide.yaml | 16 - .../csi-test/hack/_apitest/api_test.go | 18 + .../csi-test/hack/_embedded/embedded_test.go | 42 + .../kubernetes-csi/csi-test/hack/e2e.sh | 72 +- .../kubernetes-csi/csi-test/mock/AUTHORS | 2 + .../kubernetes-csi/csi-test/mock/README.md | 22 + .../csi-test/mock/cache/SnapshotCache.go | 89 + .../kubernetes-csi/csi-test/mock/main.go | 95 + .../csi-test/mock/mocksecret.yaml | 16 + .../csi-test/mock/service/controller.go | 577 +++++ .../csi-test/mock/service/identity.go | 48 + .../csi-test/mock/service/node.go | 244 ++ .../csi-test/mock/service/service.go | 147 ++ .../csi-test/pkg/sanity/README.md | 54 +- .../csi-test/pkg/sanity/cleanup.go | 134 ++ .../csi-test/pkg/sanity/controller.go | 1994 ++++++++++++----- .../csi-test/pkg/sanity/identity.go | 113 +- .../csi-test/pkg/sanity/node.go | 688 +++--- .../csi-test/pkg/sanity/sanity.go | 173 +- .../csi-test/pkg/sanity/tests.go | 56 + .../kubernetes-csi/csi-test/test/co_test.go | 105 +- .../csi-test/test/driver_test.go | 18 +- .../csi-test/utils/safegoroutinetester.go | 2 +- 41 files changed, 4576 insertions(+), 1290 deletions(-) create mode 100644 vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/Dockerfile.mock create mode 100644 vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock create mode 100644 vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml create mode 100644 vendor/github.com/kubernetes-csi/csi-test/Makefile create mode 100644 vendor/github.com/kubernetes-csi/csi-test/OWNERS create mode 100644 vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS create mode 100644 vendor/github.com/kubernetes-csi/csi-test/driver/mock.go delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/glide.lock delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/glide.yaml create mode 100644 vendor/github.com/kubernetes-csi/csi-test/hack/_apitest/api_test.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/hack/_embedded/embedded_test.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/AUTHORS create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/README.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/main.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go diff --git a/Gopkg.lock b/Gopkg.lock index d2150de1..cbd68e0f 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -42,15 +42,15 @@ version = "v1.1.0" [[projects]] - digest = "1:d17a296973591f13eed8399016d5ec748cfb9c92086a84e00b33139b92ba4858" + branch = "master" + digest = "1:79848f850d0d15b2413a7285441a7fd13ed8a901ebe57d59f1e49759c103b930" name = "github.com/kubernetes-csi/csi-test" packages = [ "driver", "utils", ] pruneopts = "" - revision = "31b2baed861ae5c166922808be5d1982346adf7d" - version = "v0.1.0-2" + revision = "619da6853e10bef67ddcc8f1c2b68b73154bf11d" [[projects]] branch = "master" diff --git a/vendor/github.com/kubernetes-csi/csi-test/.gitignore b/vendor/github.com/kubernetes-csi/csi-test/.gitignore index fc211aec..81c985c4 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/.gitignore +++ b/vendor/github.com/kubernetes-csi/csi-test/.gitignore @@ -9,8 +9,11 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ -vendor/ +bin/mock cmd/csi-sanity/csi-sanity + +# JetBrains GoLand +.idea + +# Vim +*.swp diff --git a/vendor/github.com/kubernetes-csi/csi-test/.travis.yml b/vendor/github.com/kubernetes-csi/csi-test/.travis.yml index 349982d2..7a817191 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/.travis.yml +++ b/vendor/github.com/kubernetes-csi/csi-test/.travis.yml @@ -1,12 +1,15 @@ language: go -install: - - curl https://glide.sh/get | sh - - glide install -v +sudo: required +services: + - docker matrix: include: - - go: 1.9.2 + - go: 1.10.3 script: -- go fmt $(go list ./... | grep -v vendor) | wc -l | grep 0 -- go vet $(go list ./... | grep -v vendor) -- go test $(go list ./... | grep -v vendor | grep -v "cmd/csi-sanity") -- ./hack/e2e.sh +- make test +after_success: + - if [ "${TRAVIS_BRANCH}" == "master" ] && [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then + make container + docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" quay.io; + make push; + fi diff --git a/vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md b/vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md new file mode 100644 index 00000000..41b73b76 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + +## Contact Information + +- [Slack channel](https://kubernetes.slack.com/messages/sig-storage) +- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-storage) diff --git a/vendor/github.com/kubernetes-csi/csi-test/Dockerfile.mock b/vendor/github.com/kubernetes-csi/csi-test/Dockerfile.mock new file mode 100644 index 00000000..72697712 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/Dockerfile.mock @@ -0,0 +1,6 @@ +FROM alpine +LABEL maintainers="Kubernetes Authors" +LABEL description="CSI Mock Driver" + +COPY ./bin/mock mock +ENTRYPOINT ["/mock"] diff --git a/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock new file mode 100644 index 00000000..443ad970 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock @@ -0,0 +1,237 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:26ee2356254e58b9872ba736f66aff1c54a26f08c7d16afbf49695131a87d454" + name = "github.com/container-storage-interface/spec" + packages = ["lib/go/csi"] + pruneopts = "UT" + revision = "8efcc85c45550571fba8134182013ed7dc34038a" + version = "v1.0.0-rc2" + +[[projects]] + digest = "1:bc38c7c481812e178d85160472e231c5e1c9a7f5845d67e23ee4e706933c10d8" + name = "github.com/golang/mock" + packages = ["gomock"] + pruneopts = "UT" + revision = "c34cdb4725f4c3844d095133c6e40e448b86589b" + version = "v1.1.1" + +[[projects]] + digest = "1:588beb9f80d2b0afddf05663b32d01c867da419458b560471d81cca0286e76b8" + name = "github.com/golang/protobuf" + packages = [ + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "UT" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" + +[[projects]] + digest = "1:72f35d3e412bc67b121e15ea4c88a3b3da8bcbc2264339e7ffa4a1865799840c" + name = "github.com/onsi/ginkgo" + packages = [ + ".", + "config", + "internal/codelocation", + "internal/containernode", + "internal/failer", + "internal/leafnodes", + "internal/remote", + "internal/spec", + "internal/spec_iterator", + "internal/specrunner", + "internal/suite", + "internal/testingtproxy", + "internal/writer", + "reporters", + "reporters/stenographer", + "reporters/stenographer/support/go-colorable", + "reporters/stenographer/support/go-isatty", + "types", + ] + pruneopts = "UT" + revision = "fa5fabab2a1bfbd924faf4c067d07ae414e2aedf" + version = "v1.5.0" + +[[projects]] + digest = "1:d0c2c4e2d0006cd28c220a549cda1de8e67abc65ed4c572421492bbf0492ceaf" + name = "github.com/onsi/gomega" + packages = [ + ".", + "format", + "internal/assertion", + "internal/asyncassertion", + "internal/oraclematcher", + "internal/testingtsupport", + "matchers", + "matchers/support/goraph/bipartitegraph", + "matchers/support/goraph/edge", + "matchers/support/goraph/node", + "matchers/support/goraph/util", + "types", + ] + pruneopts = "UT" + revision = "62bff4df71bdbc266561a0caee19f0594b17c240" + version = "v1.4.0" + +[[projects]] + digest = "1:9e9193aa51197513b3abcb108970d831fbcf40ef96aa845c4f03276e1fa316d2" + name = "github.com/sirupsen/logrus" + packages = ["."] + pruneopts = "UT" + revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" + version = "v1.0.5" + +[[projects]] + branch = "master" + digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + pruneopts = "UT" + revision = "8ac0e0d97ce45cd83d1d7243c060cb8461dda5e9" + +[[projects]] + branch = "master" + digest = "1:0bb2e6ef036484991ed446a6c698698b8901766981d4d22cc8e53fedb09709ac" + name = "golang.org/x/net" + packages = [ + "context", + "html", + "html/atom", + "html/charset", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "1e491301e022f8f977054da4c2d852decd59571f" + +[[projects]] + branch = "master" + digest = "1:8fbfc6ea1a8a078697633be97f07dd83a83d32a96959d42195464c13c25be374" + name = "golang.org/x/sys" + packages = [ + "unix", + "windows", + ] + pruneopts = "UT" + revision = "9527bec2660bd847c050fda93a0f0c6dee0800bb" + +[[projects]] + digest = "1:436b24586f8fee329e0dd65fd67c817681420cda1d7f934345c13fe78c212a73" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "encoding", + "encoding/charmap", + "encoding/htmlindex", + "encoding/internal", + "encoding/internal/identifier", + "encoding/japanese", + "encoding/korean", + "encoding/simplifiedchinese", + "encoding/traditionalchinese", + "encoding/unicode", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "internal/utf8internal", + "language", + "runes", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + digest = "1:601e63e7d4577f907118bec825902505291918859d223bce015539e79f1160e3" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + pruneopts = "UT" + revision = "32ee49c4dd805befd833990acba36cb75042378c" + +[[projects]] + digest = "1:7a977fdcd5abff03e94f92e7b374ef37e91c7c389581e5c4348fa98616e6c6be" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "channelz", + "codes", + "connectivity", + "credentials", + "encoding", + "encoding/proto", + "grpclb/grpc_lb_v1/messages", + "grpclog", + "internal", + "keepalive", + "metadata", + "naming", + "peer", + "reflection", + "reflection/grpc_reflection_v1alpha", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + "transport", + ] + pruneopts = "UT" + revision = "7a6a684ca69eb4cae85ad0a484f2e531598c047b" + version = "v1.12.2" + +[[projects]] + digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" + version = "v2.2.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/container-storage-interface/spec/lib/go/csi", + "github.com/golang/mock/gomock", + "github.com/golang/protobuf/proto", + "github.com/golang/protobuf/ptypes", + "github.com/golang/protobuf/ptypes/wrappers", + "github.com/onsi/ginkgo", + "github.com/onsi/gomega", + "github.com/sirupsen/logrus", + "golang.org/x/net/context", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/connectivity", + "google.golang.org/grpc/reflection", + "google.golang.org/grpc/status", + "gopkg.in/yaml.v2", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml new file mode 100644 index 00000000..4e0836d0 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml @@ -0,0 +1,62 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + + +[[constraint]] + name = "github.com/container-storage-interface/spec" + version = "v1.0.0-rc2" + +[[constraint]] + name = "github.com/golang/mock" + version = "1.0.0" + +[[constraint]] + name = "github.com/golang/protobuf" + version = "v1.2.0" + +[[constraint]] + name = "github.com/onsi/ginkgo" + version = "1.4.0" + +[[constraint]] + name = "github.com/onsi/gomega" + version = "1.3.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/net" + +[[constraint]] + name = "google.golang.org/grpc" + version = "1.9.2" + +[[constraint]] + name = "gopkg.in/yaml.v2" + version = "v2.1.1" + +[prune] + go-tests = true + unused-packages = true diff --git a/vendor/github.com/kubernetes-csi/csi-test/Makefile b/vendor/github.com/kubernetes-csi/csi-test/Makefile new file mode 100644 index 00000000..7fb42c87 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/Makefile @@ -0,0 +1,52 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +IMAGE_NAME = quay.io/k8scsi/mock-driver +IMAGE_VERSION = canary +APP := ./bin/mock + + +ifdef V +TESTARGS = -v -args -alsologtostderr -v 5 +else +TESTARGS = +endif + +all: $(APP) + +$(APP): + mkdir -p bin + CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o $(APP) ./mock/main.go + +clean: + rm -rf bin + +container: $(APP) + docker build -f Dockerfile.mock -t $(IMAGE_NAME):$(IMAGE_VERSION) . + +push: container + docker push $(IMAGE_NAME):$(IMAGE_VERSION) + +test: $(APP) + files=$$(find ./ -name '*.go' | grep -v '^./vendor' ); \ + if [ $$(gofmt -d $$files | wc -l) -ne 0 ]; then \ + echo "formatting errors:"; \ + gofmt -d $$files; \ + false; \ + fi + go vet $$(go list ./... | grep -v vendor) + go test $$(go list ./... | grep -v vendor | grep -v "cmd/csi-sanity") + ./hack/e2e.sh + +.PHONY: all clean container push test diff --git a/vendor/github.com/kubernetes-csi/csi-test/OWNERS b/vendor/github.com/kubernetes-csi/csi-test/OWNERS new file mode 100644 index 00000000..a780cce6 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/OWNERS @@ -0,0 +1,4 @@ +approvers: +- saad-ali +- lpabon +- pohly diff --git a/vendor/github.com/kubernetes-csi/csi-test/README.md b/vendor/github.com/kubernetes-csi/csi-test/README.md index e21d8399..36dce60b 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/README.md +++ b/vendor/github.com/kubernetes-csi/csi-test/README.md @@ -1,15 +1,42 @@ [![Build Status](https://travis-ci.org/kubernetes-csi/csi-test.svg?branch=master)](https://travis-ci.org/kubernetes-csi/csi-test) +[![Docker Repository on Quay](https://quay.io/repository/k8scsi/mock-driver/status "Docker Repository on +Quay")](https://quay.io/repository/k8scsi/mock-driver) + # csi-test csi-test houses packages and libraries to help test CSI client and plugins. -## For Container Orchestration Unit Tests +## For Container Orchestration Tests CO developers can use this framework to create drivers based on the [Golang mock](https://github.com/golang/mock) framework. Please see [co_test.go](test/co_test.go) for an example. -## For CSI Driver Unit Tests -To test drivers please take a look at [pkg/sanity](https://github.com/kubernetes-csi/csi-test/tree/master/pkg/sanity) +### Mock driver for testing +We also provide a container called `quay.io/k8scsi/mock-driver:canary` which can be used as an in-memory mock driver. +It follows the same release cycle as other containers, so the latest release is `quay.io/k8scsi/mock-driver:v0.3.0`. + +You will need to setup the environment variable `CSI_ENDPOINT` for the mock driver to know where to create the unix +domain socket. + +## For CSI Driver Tests +To test drivers please take a look at [pkg/sanity](https://github.com/kubernetes-csi/csi-test/tree/master/pkg/sanity). +This package and [csi-sanity](https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity) are meant to test +the CSI API capability of a driver. They are meant to be an additional test to the unit, functional, and e2e tests of a +CSI driver. ### Note +* Master is for CSI v0.4.0. Please see the branches for other CSI releases. * Only Golang 1.9+ supported. See [gRPC issue](https://github.com/grpc/grpc-go/issues/711#issuecomment-326626790) + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +You can reach the maintainers of this project at: + +- [Slack channel](https://kubernetes.slack.com/messages/sig-storage) +- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-storage) + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). diff --git a/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS b/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS new file mode 100644 index 00000000..00e28e4e --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS @@ -0,0 +1,14 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +saad-ali +lpabon diff --git a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/Makefile b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/Makefile index b0ecbeac..520c2153 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/Makefile +++ b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/Makefile @@ -1,5 +1,6 @@ APP_NAME := csi-sanity VER :=$(shell git describe) +RELEASEVER := $(shell git describe --abbrev=0) BRANCH := $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) SHA := $(shell git rev-parse --short HEAD) ARCH := $(shell go env GOARCH) @@ -17,7 +18,7 @@ endif endif LDFLAGS :=-ldflags "-w -X github.com/kubernetes-csi/csi-test/cmd/csi-sanity.VERSION=$(VERSION) -extldflags '-z relro -z now'" -PACKAGE :=$(DIR)/dist/$(APP_NAME)-$(VERSION).$(GOOS).$(ARCH).tar.gz +PACKAGE :=$(DIR)/dist/$(APP_NAME)-$(RELEASEVER).$(GOOS).$(ARCH).tar.gz all: $(APP_NAME) @@ -29,9 +30,11 @@ install: $(APP_NAME) clean: rm -f csi-sanity + +dist-clean: rm -rf $(DIR)/dist -dist: $(PACKAGE) +dist: clean $(PACKAGE) $(PACKAGE): $(APP_NAME) @echo Packaging Binaries... @@ -46,16 +49,13 @@ $(PACKAGE): $(APP_NAME) linux_amd64_dist: GOOS=linux GOARCH=amd64 $(MAKE) dist -linux_arm_dist: - GOOS=linux GOARCH=arm $(MAKE) dist - linux_arm64_dist: GOOS=linux GOARCH=arm64 $(MAKE) dist darwin_amd64_dist: GOOS=darwin GOARCH=amd64 $(MAKE) dist -release: darwin_amd64_dist linux_arm_dist linux_amd64_dist linux_arm64_dist +release: dist-clean darwin_amd64_dist linux_amd64_dist linux_arm64_dist .PHONY: release darwin_amd64_dist linux_arm64_dist linux_amd64_dist \ - linux_arm_dist linux_amd64_dist clean + linux_arm_dist linux_amd64_dist clean dist-clean diff --git a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/README.md b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/README.md index 36c282ad..dade1018 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/README.md +++ b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/README.md @@ -7,12 +7,42 @@ Example: $ csi-sanity --csi.endpoint= ``` +If you want to specify a mount point: + +``` +$ csi-sanity --csi.endpoint= --csi.mountpoint=/mnt +``` + For verbose type: ``` $ csi-sanity --ginkgo.v --csi.endpoint= ``` +For csi-credentials, create a secrets file with all the secrets in it: +```yaml +CreateVolumeSecret: + secretKey: secretval1 +DeleteVolumeSecret: + secretKey: secretval2 +ControllerPublishVolumeSecret: + secretKey: secretval3 +ControllerUnpublishVolumeSecret: + secretKey: secretval4 +NodeStageVolumeSecret: + secretKey: secretval5 +NodePublishVolumeSecret: + secretKey: secretval6 +``` + +Pass the file path to csi-sanity as: +``` +$ csi-sanity --csi.endpoint= --csi.secrets= +``` + +Replace the keys and values of the credentials appropriately. Since the whole +secret is passed in the request, multiple key-val pairs can be used. + ### Help The full Ginkgo and golang unit test parameters are available. Type diff --git a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go index 88793f96..4b2d352c 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go +++ b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go @@ -18,6 +18,7 @@ package sanity import ( "flag" "fmt" + "os" "testing" "github.com/kubernetes-csi/csi-test/pkg/sanity" @@ -28,14 +29,19 @@ const ( ) var ( - VERSION = "(dev)" - endpoint string - version bool + VERSION = "(dev)" + version bool + config sanity.Config ) func init() { - flag.StringVar(&endpoint, prefix+"endpoint", "", "CSI endpoint") + flag.StringVar(&config.Address, prefix+"endpoint", "", "CSI endpoint") flag.BoolVar(&version, prefix+"version", false, "Version of this program") + flag.StringVar(&config.TargetPath, prefix+"mountdir", os.TempDir()+"/csi", "Mount point for NodePublish") + flag.StringVar(&config.StagingPath, prefix+"stagingdir", os.TempDir()+"/csi", "Mount point for NodeStage if staging is supported") + flag.StringVar(&config.SecretsFile, prefix+"secrets", "", "CSI secrets file") + flag.Int64Var(&config.TestVolumeSize, prefix+"testvolumesize", sanity.DefTestVolumeSize, "Base volume size used for provisioned volumes") + flag.StringVar(&config.TestVolumeParametersFile, prefix+"testvolumeparameters", "", "YAML file of volume parameters for provisioned volumes") flag.Parse() } @@ -44,8 +50,8 @@ func TestSanity(t *testing.T) { fmt.Printf("Version = %s\n", VERSION) return } - if len(endpoint) == 0 { + if len(config.Address) == 0 { t.Fatalf("--%sendpoint must be provided with an CSI endpoint", prefix) } - sanity.Test(t, endpoint) + sanity.Test(t, &config) } diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go index 94145df2..01224a3a 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go @@ -19,122 +19,258 @@ limitations under the License. package driver import ( + "context" + "encoding/json" + "errors" + "fmt" "net" "sync" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/kubernetes-csi/csi-test/utils" "google.golang.org/grpc" "google.golang.org/grpc/reflection" ) -type MockCSIDriverServers struct { - Controller *MockControllerServer - Identity *MockIdentityServer - Node *MockNodeServer +var ( + // ErrNoCredentials is the error when a secret is enabled but not passed in the request. + ErrNoCredentials = errors.New("secret must be provided") + // ErrAuthFailed is the error when the secret is incorrect. + ErrAuthFailed = errors.New("authentication failed") +) + +type CSIDriverServers struct { + Controller csi.ControllerServer + Identity csi.IdentityServer + Node csi.NodeServer +} + +// This is the key name in all the CSI secret objects. +const secretField = "secretKey" + +// CSICreds is a driver specific secret type. Drivers can have a key-val pair of +// secrets. This mock driver has a single string secret with secretField as the +// key. +type CSICreds struct { + CreateVolumeSecret string + DeleteVolumeSecret string + ControllerPublishVolumeSecret string + ControllerUnpublishVolumeSecret string + NodeStageVolumeSecret string + NodePublishVolumeSecret string + CreateSnapshotSecret string + DeleteSnapshotSecret string } -type MockCSIDriver struct { +type CSIDriver struct { listener net.Listener server *grpc.Server - conn *grpc.ClientConn - servers *MockCSIDriverServers + servers *CSIDriverServers wg sync.WaitGroup running bool lock sync.Mutex + creds *CSICreds } -func NewMockCSIDriver(servers *MockCSIDriverServers) *MockCSIDriver { - return &MockCSIDriver{ +func NewCSIDriver(servers *CSIDriverServers) *CSIDriver { + return &CSIDriver{ servers: servers, } } -func (m *MockCSIDriver) goServe(started chan<- bool) { - m.wg.Add(1) +func (c *CSIDriver) goServe(started chan<- bool) { + c.wg.Add(1) go func() { - defer m.wg.Done() + defer c.wg.Done() started <- true - err := m.server.Serve(m.listener) + err := c.server.Serve(c.listener) if err != nil { panic(err.Error()) } }() } -func (m *MockCSIDriver) Address() string { - return m.listener.Addr().String() +func (c *CSIDriver) Address() string { + return c.listener.Addr().String() } -func (m *MockCSIDriver) Start() error { - m.lock.Lock() - defer m.lock.Unlock() +func (c *CSIDriver) Start(l net.Listener) error { + c.lock.Lock() + defer c.lock.Unlock() - // Listen on a port assigned by the net package - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return err - } - m.listener = l + // Set listener + c.listener = l // Create a new grpc server - m.server = grpc.NewServer() + c.server = grpc.NewServer( + grpc.UnaryInterceptor(c.callInterceptor), + ) // Register Mock servers - if m.servers.Controller != nil { - csi.RegisterControllerServer(m.server, m.servers.Controller) + if c.servers.Controller != nil { + csi.RegisterControllerServer(c.server, c.servers.Controller) } - if m.servers.Identity != nil { - csi.RegisterIdentityServer(m.server, m.servers.Identity) + if c.servers.Identity != nil { + csi.RegisterIdentityServer(c.server, c.servers.Identity) } - if m.servers.Node != nil { - csi.RegisterNodeServer(m.server, m.servers.Node) + if c.servers.Node != nil { + csi.RegisterNodeServer(c.server, c.servers.Node) } - reflection.Register(m.server) + reflection.Register(c.server) // Start listening for requests waitForServer := make(chan bool) - m.goServe(waitForServer) + c.goServe(waitForServer) <-waitForServer - m.running = true + c.running = true return nil } -func (m *MockCSIDriver) Nexus() (*grpc.ClientConn, error) { - // Start server - err := m.Start() - if err != nil { - return nil, err +func (c *CSIDriver) Stop() { + c.lock.Lock() + defer c.lock.Unlock() + + if !c.running { + return } - // Create a client connection - m.conn, err = utils.Connect(m.Address()) + c.server.Stop() + c.wg.Wait() +} + +func (c *CSIDriver) Close() { + c.server.Stop() +} + +func (c *CSIDriver) IsRunning() bool { + c.lock.Lock() + defer c.lock.Unlock() + + return c.running +} + +// SetDefaultCreds sets the default secrets for CSI creds. +func (c *CSIDriver) SetDefaultCreds() { + c.creds = &CSICreds{ + CreateVolumeSecret: "secretval1", + DeleteVolumeSecret: "secretval2", + ControllerPublishVolumeSecret: "secretval3", + ControllerUnpublishVolumeSecret: "secretval4", + NodeStageVolumeSecret: "secretval5", + NodePublishVolumeSecret: "secretval6", + CreateSnapshotSecret: "secretval7", + DeleteSnapshotSecret: "secretval8", + } +} + +func (c *CSIDriver) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + err := c.authInterceptor(req) if err != nil { + logGRPC(info.FullMethod, req, nil, err) return nil, err } + rsp, err := handler(ctx, req) + logGRPC(info.FullMethod, req, rsp, err) + return rsp, err +} - return m.conn, nil +func (c *CSIDriver) authInterceptor(req interface{}) error { + if c.creds != nil { + authenticated, authErr := isAuthenticated(req, c.creds) + if !authenticated { + if authErr == ErrNoCredentials { + return status.Error(codes.InvalidArgument, authErr.Error()) + } + if authErr == ErrAuthFailed { + return status.Error(codes.Unauthenticated, authErr.Error()) + } + } + } + return nil } -func (m *MockCSIDriver) Stop() { - m.lock.Lock() - defer m.lock.Unlock() +func logGRPC(method string, request, reply interface{}, err error) { + // Log JSON with the request and response for easier parsing + logMessage := struct { + Method string + Request interface{} + Response interface{} + Error string + }{ + Method: method, + Request: request, + Response: reply, + } + if err != nil { + logMessage.Error = err.Error() + } + msg, _ := json.Marshal(logMessage) + fmt.Printf("gRPCCall: %s\n", msg) +} - if !m.running { - return +func isAuthenticated(req interface{}, creds *CSICreds) (bool, error) { + switch r := req.(type) { + case *csi.CreateVolumeRequest: + return authenticateCreateVolume(r, creds) + case *csi.DeleteVolumeRequest: + return authenticateDeleteVolume(r, creds) + case *csi.ControllerPublishVolumeRequest: + return authenticateControllerPublishVolume(r, creds) + case *csi.ControllerUnpublishVolumeRequest: + return authenticateControllerUnpublishVolume(r, creds) + case *csi.NodeStageVolumeRequest: + return authenticateNodeStageVolume(r, creds) + case *csi.NodePublishVolumeRequest: + return authenticateNodePublishVolume(r, creds) + case *csi.CreateSnapshotRequest: + return authenticateCreateSnapshot(r, creds) + case *csi.DeleteSnapshotRequest: + return authenticateDeleteSnapshot(r, creds) + default: + return true, nil } +} - m.server.Stop() - m.wg.Wait() +func authenticateCreateVolume(req *csi.CreateVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.CreateVolumeSecret) } -func (m *MockCSIDriver) Close() { - m.conn.Close() - m.server.Stop() +func authenticateDeleteVolume(req *csi.DeleteVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.DeleteVolumeSecret) } -func (m *MockCSIDriver) IsRunning() bool { - m.lock.Lock() - defer m.lock.Unlock() +func authenticateControllerPublishVolume(req *csi.ControllerPublishVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.ControllerPublishVolumeSecret) +} + +func authenticateControllerUnpublishVolume(req *csi.ControllerUnpublishVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.ControllerUnpublishVolumeSecret) +} - return m.running +func authenticateNodeStageVolume(req *csi.NodeStageVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.NodeStageVolumeSecret) +} + +func authenticateNodePublishVolume(req *csi.NodePublishVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.NodePublishVolumeSecret) +} + +func authenticateCreateSnapshot(req *csi.CreateSnapshotRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.CreateSnapshotSecret) +} + +func authenticateDeleteSnapshot(req *csi.DeleteSnapshotRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.DeleteSnapshotSecret) +} + +func credsCheck(secrets map[string]string, secretVal string) (bool, error) { + if len(secrets) == 0 { + return false, ErrNoCredentials + } + + if secrets[secretField] != secretVal { + return false, ErrAuthFailed + } + return true, nil } diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go index ed14e019..c54acaad 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go @@ -34,6 +34,19 @@ func (m *MockIdentityServer) EXPECT() *MockIdentityServerMockRecorder { return m.recorder } +// GetPluginCapabilities mocks base method +func (m *MockIdentityServer) GetPluginCapabilities(arg0 context.Context, arg1 *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "GetPluginCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.GetPluginCapabilitiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPluginCapabilities indicates an expected call of GetPluginCapabilities +func (mr *MockIdentityServerMockRecorder) GetPluginCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPluginCapabilities", reflect.TypeOf((*MockIdentityServer)(nil).GetPluginCapabilities), arg0, arg1) +} + // GetPluginInfo mocks base method func (m *MockIdentityServer) GetPluginInfo(arg0 context.Context, arg1 *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { ret := m.ctrl.Call(m, "GetPluginInfo", arg0, arg1) @@ -47,17 +60,17 @@ func (mr *MockIdentityServerMockRecorder) GetPluginInfo(arg0, arg1 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPluginInfo", reflect.TypeOf((*MockIdentityServer)(nil).GetPluginInfo), arg0, arg1) } -// GetSupportedVersions mocks base method -func (m *MockIdentityServer) GetSupportedVersions(arg0 context.Context, arg1 *csi.GetSupportedVersionsRequest) (*csi.GetSupportedVersionsResponse, error) { - ret := m.ctrl.Call(m, "GetSupportedVersions", arg0, arg1) - ret0, _ := ret[0].(*csi.GetSupportedVersionsResponse) +// Probe mocks base method +func (m *MockIdentityServer) Probe(arg0 context.Context, arg1 *csi.ProbeRequest) (*csi.ProbeResponse, error) { + ret := m.ctrl.Call(m, "Probe", arg0, arg1) + ret0, _ := ret[0].(*csi.ProbeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetSupportedVersions indicates an expected call of GetSupportedVersions -func (mr *MockIdentityServerMockRecorder) GetSupportedVersions(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSupportedVersions", reflect.TypeOf((*MockIdentityServer)(nil).GetSupportedVersions), arg0, arg1) +// Probe indicates an expected call of Probe +func (mr *MockIdentityServerMockRecorder) Probe(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Probe", reflect.TypeOf((*MockIdentityServer)(nil).Probe), arg0, arg1) } // MockControllerServer is a mock of ControllerServer interface @@ -96,19 +109,6 @@ func (mr *MockControllerServerMockRecorder) ControllerGetCapabilities(arg0, arg1 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerGetCapabilities", reflect.TypeOf((*MockControllerServer)(nil).ControllerGetCapabilities), arg0, arg1) } -// ControllerProbe mocks base method -func (m *MockControllerServer) ControllerProbe(arg0 context.Context, arg1 *csi.ControllerProbeRequest) (*csi.ControllerProbeResponse, error) { - ret := m.ctrl.Call(m, "ControllerProbe", arg0, arg1) - ret0, _ := ret[0].(*csi.ControllerProbeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ControllerProbe indicates an expected call of ControllerProbe -func (mr *MockControllerServerMockRecorder) ControllerProbe(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerProbe", reflect.TypeOf((*MockControllerServer)(nil).ControllerProbe), arg0, arg1) -} - // ControllerPublishVolume mocks base method func (m *MockControllerServer) ControllerPublishVolume(arg0 context.Context, arg1 *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { ret := m.ctrl.Call(m, "ControllerPublishVolume", arg0, arg1) @@ -135,6 +135,19 @@ func (mr *MockControllerServerMockRecorder) ControllerUnpublishVolume(arg0, arg1 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerUnpublishVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerUnpublishVolume), arg0, arg1) } +// CreateSnapshot mocks base method +func (m *MockControllerServer) CreateSnapshot(arg0 context.Context, arg1 *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { + ret := m.ctrl.Call(m, "CreateSnapshot", arg0, arg1) + ret0, _ := ret[0].(*csi.CreateSnapshotResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateSnapshot indicates an expected call of CreateSnapshot +func (mr *MockControllerServerMockRecorder) CreateSnapshot(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSnapshot", reflect.TypeOf((*MockControllerServer)(nil).CreateSnapshot), arg0, arg1) +} + // CreateVolume mocks base method func (m *MockControllerServer) CreateVolume(arg0 context.Context, arg1 *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { ret := m.ctrl.Call(m, "CreateVolume", arg0, arg1) @@ -148,6 +161,19 @@ func (mr *MockControllerServerMockRecorder) CreateVolume(arg0, arg1 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVolume", reflect.TypeOf((*MockControllerServer)(nil).CreateVolume), arg0, arg1) } +// DeleteSnapshot mocks base method +func (m *MockControllerServer) DeleteSnapshot(arg0 context.Context, arg1 *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { + ret := m.ctrl.Call(m, "DeleteSnapshot", arg0, arg1) + ret0, _ := ret[0].(*csi.DeleteSnapshotResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteSnapshot indicates an expected call of DeleteSnapshot +func (mr *MockControllerServerMockRecorder) DeleteSnapshot(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSnapshot", reflect.TypeOf((*MockControllerServer)(nil).DeleteSnapshot), arg0, arg1) +} + // DeleteVolume mocks base method func (m *MockControllerServer) DeleteVolume(arg0 context.Context, arg1 *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { ret := m.ctrl.Call(m, "DeleteVolume", arg0, arg1) @@ -174,6 +200,19 @@ func (mr *MockControllerServerMockRecorder) GetCapacity(arg0, arg1 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCapacity", reflect.TypeOf((*MockControllerServer)(nil).GetCapacity), arg0, arg1) } +// ListSnapshots mocks base method +func (m *MockControllerServer) ListSnapshots(arg0 context.Context, arg1 *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + ret := m.ctrl.Call(m, "ListSnapshots", arg0, arg1) + ret0, _ := ret[0].(*csi.ListSnapshotsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSnapshots indicates an expected call of ListSnapshots +func (mr *MockControllerServerMockRecorder) ListSnapshots(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSnapshots", reflect.TypeOf((*MockControllerServer)(nil).ListSnapshots), arg0, arg1) +} + // ListVolumes mocks base method func (m *MockControllerServer) ListVolumes(arg0 context.Context, arg1 *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { ret := m.ctrl.Call(m, "ListVolumes", arg0, arg1) @@ -223,19 +262,6 @@ func (m *MockNodeServer) EXPECT() *MockNodeServerMockRecorder { return m.recorder } -// GetNodeID mocks base method -func (m *MockNodeServer) GetNodeID(arg0 context.Context, arg1 *csi.GetNodeIDRequest) (*csi.GetNodeIDResponse, error) { - ret := m.ctrl.Call(m, "GetNodeID", arg0, arg1) - ret0, _ := ret[0].(*csi.GetNodeIDResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetNodeID indicates an expected call of GetNodeID -func (mr *MockNodeServerMockRecorder) GetNodeID(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeID", reflect.TypeOf((*MockNodeServer)(nil).GetNodeID), arg0, arg1) -} - // NodeGetCapabilities mocks base method func (m *MockNodeServer) NodeGetCapabilities(arg0 context.Context, arg1 *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { ret := m.ctrl.Call(m, "NodeGetCapabilities", arg0, arg1) @@ -249,17 +275,30 @@ func (mr *MockNodeServerMockRecorder) NodeGetCapabilities(arg0, arg1 interface{} return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetCapabilities", reflect.TypeOf((*MockNodeServer)(nil).NodeGetCapabilities), arg0, arg1) } -// NodeProbe mocks base method -func (m *MockNodeServer) NodeProbe(arg0 context.Context, arg1 *csi.NodeProbeRequest) (*csi.NodeProbeResponse, error) { - ret := m.ctrl.Call(m, "NodeProbe", arg0, arg1) - ret0, _ := ret[0].(*csi.NodeProbeResponse) +// NodeGetInfo mocks base method +func (m *MockNodeServer) NodeGetInfo(arg0 context.Context, arg1 *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { + ret := m.ctrl.Call(m, "NodeGetInfo", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetInfoResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// NodeProbe indicates an expected call of NodeProbe -func (mr *MockNodeServerMockRecorder) NodeProbe(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeProbe", reflect.TypeOf((*MockNodeServer)(nil).NodeProbe), arg0, arg1) +// NodeGetInfo indicates an expected call of NodeGetInfo +func (mr *MockNodeServerMockRecorder) NodeGetInfo(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetInfo", reflect.TypeOf((*MockNodeServer)(nil).NodeGetInfo), arg0, arg1) +} + +// NodeGetVolumeStats mocks base method +func (m *MockNodeServer) NodeGetVolumeStats(arg0 context.Context, arg1 *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { + ret := m.ctrl.Call(m, "NodeGetVolumeStats", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetVolumeStatsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeGetVolumeStats indicates an expected call of NodeGetVolumeStats +func (mr *MockNodeServerMockRecorder) NodeGetVolumeStats(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetVolumeStats", reflect.TypeOf((*MockNodeServer)(nil).NodeGetVolumeStats), arg0, arg1) } // NodePublishVolume mocks base method @@ -275,6 +314,19 @@ func (mr *MockNodeServerMockRecorder) NodePublishVolume(arg0, arg1 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodePublishVolume", reflect.TypeOf((*MockNodeServer)(nil).NodePublishVolume), arg0, arg1) } +// NodeStageVolume mocks base method +func (m *MockNodeServer) NodeStageVolume(arg0 context.Context, arg1 *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeStageVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeStageVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeStageVolume indicates an expected call of NodeStageVolume +func (mr *MockNodeServerMockRecorder) NodeStageVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeStageVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeStageVolume), arg0, arg1) +} + // NodeUnpublishVolume mocks base method func (m *MockNodeServer) NodeUnpublishVolume(arg0 context.Context, arg1 *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { ret := m.ctrl.Call(m, "NodeUnpublishVolume", arg0, arg1) @@ -287,3 +339,16 @@ func (m *MockNodeServer) NodeUnpublishVolume(arg0 context.Context, arg1 *csi.Nod func (mr *MockNodeServerMockRecorder) NodeUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUnpublishVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeUnpublishVolume), arg0, arg1) } + +// NodeUnstageVolume mocks base method +func (m *MockNodeServer) NodeUnstageVolume(arg0 context.Context, arg1 *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeUnstageVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeUnstageVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeUnstageVolume indicates an expected call of NodeUnstageVolume +func (mr *MockNodeServerMockRecorder) NodeUnstageVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUnstageVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeUnstageVolume), arg0, arg1) +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/mock.go b/vendor/github.com/kubernetes-csi/csi-test/driver/mock.go new file mode 100644 index 00000000..9b051eee --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/mock.go @@ -0,0 +1,83 @@ +/* +Copyright 2017 Luis Pabón luis@portworx.com + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "net" + + "github.com/kubernetes-csi/csi-test/utils" + "google.golang.org/grpc" +) + +type MockCSIDriverServers struct { + Controller *MockControllerServer + Identity *MockIdentityServer + Node *MockNodeServer +} + +type MockCSIDriver struct { + CSIDriver + conn *grpc.ClientConn +} + +func NewMockCSIDriver(servers *MockCSIDriverServers) *MockCSIDriver { + return &MockCSIDriver{ + CSIDriver: CSIDriver{ + servers: &CSIDriverServers{ + Controller: servers.Controller, + Node: servers.Node, + Identity: servers.Identity, + }, + }, + } +} + +func (m *MockCSIDriver) Start() error { + // Listen on a port assigned by the net package + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return err + } + + if err := m.CSIDriver.Start(l); err != nil { + l.Close() + return err + } + + return nil +} + +func (m *MockCSIDriver) Nexus() (*grpc.ClientConn, error) { + // Start server + err := m.Start() + if err != nil { + return nil, err + } + + // Create a client connection + m.conn, err = utils.Connect(m.Address()) + if err != nil { + return nil, err + } + + return m.conn, nil +} + +func (m *MockCSIDriver) Close() { + m.conn.Close() + m.server.Stop() +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/glide.lock b/vendor/github.com/kubernetes-csi/csi-test/glide.lock deleted file mode 100644 index 58bd54a4..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/glide.lock +++ /dev/null @@ -1,135 +0,0 @@ -hash: f8f39aef239d83f930c5be2717e5bee5b2169902a3fd4a30a441a4e97ec60a07 -updated: 2017-12-13T08:17:19.928367307-05:00 -imports: -- name: github.com/container-storage-interface/spec - version: 4ac2d13f89360f2da40d188473d77f2ec56b9d0d - subpackages: - - lib/go/csi -- name: github.com/davecgh/go-spew - version: ecdeabc65495df2dec95d7c4a4c3e021903035e5 - subpackages: - - spew -- name: github.com/golang/mock - version: f67f7081ddcd0f92a20c1d58e7cd8b23253d15c7 - subpackages: - - gomock -- name: github.com/golang/protobuf - version: 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 - subpackages: - - proto - - protoc-gen-go/descriptor - - ptypes - - ptypes/any - - ptypes/duration - - ptypes/timestamp -- name: github.com/onsi/ginkgo - version: bc14b6691e7a788e12a21121abdaff1ccdcef9e9 - subpackages: - - config - - internal/codelocation - - internal/containernode - - internal/failer - - internal/leafnodes - - internal/remote - - internal/spec - - internal/spec_iterator - - internal/specrunner - - internal/suite - - internal/testingtproxy - - internal/writer - - reporters - - reporters/stenographer - - reporters/stenographer/support/go-colorable - - reporters/stenographer/support/go-isatty - - types -- name: github.com/onsi/gomega - version: c1fb6682134d162f37c13f42e7157653a7de7d2b - subpackages: - - format - - internal/assertion - - internal/asyncassertion - - internal/oraclematcher - - internal/testingtsupport - - matchers - - matchers/support/goraph/bipartitegraph - - matchers/support/goraph/edge - - matchers/support/goraph/node - - matchers/support/goraph/util - - types -- name: github.com/pmezard/go-difflib - version: 792786c7400a136282c1664665ae0a8db921c6c2 - subpackages: - - difflib -- name: github.com/stretchr/testify - version: 2aa2c176b9dab406a6970f6a55f513e8a8c8b18f - subpackages: - - assert -- name: golang.org/x/net - version: 5561cd9b4330353950f399814f427425c0a26fd2 - subpackages: - - context - - html - - html/atom - - html/charset - - http2 - - http2/hpack - - idna - - internal/timeseries - - lex/httplex - - trace -- name: golang.org/x/sys - version: d5840adf789d732bc8b00f37b26ca956a7cc8e79 - subpackages: - - unix -- name: golang.org/x/text - version: c01e4764d870b77f8abe5096ee19ad20d80e8075 - subpackages: - - encoding - - encoding/charmap - - encoding/htmlindex - - encoding/internal - - encoding/internal/identifier - - encoding/japanese - - encoding/korean - - encoding/simplifiedchinese - - encoding/traditionalchinese - - encoding/unicode - - internal/tag - - internal/utf8internal - - language - - runes - - secure/bidirule - - transform - - unicode/bidi - - unicode/norm -- name: google.golang.org/genproto - version: f676e0f3ac6395ff1a529ae59a6670878a8371a6 - subpackages: - - googleapis/rpc/status -- name: google.golang.org/grpc - version: 1687ce5770e998bcac6a136af6b52f079b9d902b - subpackages: - - balancer - - balancer/roundrobin - - codes - - connectivity - - credentials - - grpclb/grpc_lb_v1/messages - - grpclog - - internal - - keepalive - - metadata - - naming - - peer - - reflection - - reflection/grpc_reflection_v1alpha - - resolver - - resolver/dns - - resolver/passthrough - - stats - - status - - tap - - transport -- name: gopkg.in/yaml.v2 - version: 287cf08546ab5e7e37d55a84f7ed3fd1db036de5 -testImports: [] diff --git a/vendor/github.com/kubernetes-csi/csi-test/glide.yaml b/vendor/github.com/kubernetes-csi/csi-test/glide.yaml deleted file mode 100644 index b04e40ed..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/glide.yaml +++ /dev/null @@ -1,16 +0,0 @@ -package: github.com/kubernetes-csi/csi-test -import: -- package: github.com/container-storage-interface/spec - subpackages: - - lib/go/csi -- package: google.golang.org/grpc - subpackages: - - reflection -testImport: -- package: github.com/golang/mock - subpackages: - - gomock - - mockgen -- package: golang.org/x/net - subpackages: - - context diff --git a/vendor/github.com/kubernetes-csi/csi-test/hack/_apitest/api_test.go b/vendor/github.com/kubernetes-csi/csi-test/hack/_apitest/api_test.go new file mode 100644 index 00000000..10ea5f35 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/hack/_apitest/api_test.go @@ -0,0 +1,18 @@ +package apitest + +import ( + "os" + "testing" + + "github.com/kubernetes-csi/csi-test/pkg/sanity" +) + +func TestMyDriver(t *testing.T) { + config := &sanity.Config{ + TargetPath: os.TempDir() + "/csi", + StagingPath: os.TempDir() + "/csi", + Address: "/tmp/e2e-csi-sanity.sock", + } + + sanity.Test(t, config) +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/hack/_embedded/embedded_test.go b/vendor/github.com/kubernetes-csi/csi-test/hack/_embedded/embedded_test.go new file mode 100644 index 00000000..bca267cb --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/hack/_embedded/embedded_test.go @@ -0,0 +1,42 @@ +package embedded + +import ( + "os" + "testing" + + "github.com/kubernetes-csi/csi-test/pkg/sanity" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestMyDriverGinkgo(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "CSI Sanity Test Suite") +} + +// The test suite into which the sanity tests get embedded may already +// have before/after suite functions. There can only be one such +// function. Here we define empty ones because then Ginkgo +// will start complaining at runtime when invoking the embedded case +// in hack/e2e.sh if a PR adds back such functions in the sanity test +// code. +var _ = BeforeSuite(func() {}) +var _ = AfterSuite(func() {}) + +var _ = Describe("MyCSIDriver", func() { + Context("Config A", func() { + config := &sanity.Config{ + TargetPath: os.TempDir() + "/csi", + StagingPath: os.TempDir() + "/csi", + Address: "/tmp/e2e-csi-sanity.sock", + } + + BeforeEach(func() {}) + + AfterEach(func() {}) + + Describe("CSI Driver Test Suite", func() { + sanity.GinkgoTest(config) + }) + }) +}) diff --git a/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh b/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh index a3ae9162..baf4c304 100755 --- a/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh +++ b/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh @@ -1,32 +1,72 @@ #!/bin/bash -CSI_ENDPOINTS="tcp://127.0.0.1:9998" -CSI_ENDPOINTS="$CSI_ENDPOINTS /tmp/e2e-csi-sanity.sock" -CSI_ENDPOINTS="$CSI_ENDPOINTS unix:///tmp/e2e-csi-sanity.sock" +TESTARGS=$@ +UDS="/tmp/e2e-csi-sanity.sock" +CSI_ENDPOINTS="$CSI_ENDPOINTS ${UDS}" +CSI_MOCK_VERSION="master" -go get -u github.com/thecodeteam/gocsi/mock -cd cmd/csi-sanity - make clean install || exit 1 -cd ../.. +# +# $1 - endpoint for mock. +# $2 - endpoint for csi-sanity in Grpc format. +# See https://github.com/grpc/grpc/blob/master/doc/naming.md +runTest() +{ + CSI_ENDPOINT=$1 ./bin/mock & + local pid=$! -for endpoint in $CSI_ENDPOINTS ; do - if ! echo $endpoint | grep tcp > /dev/null 2>&1 ; then - rm -f $endpoint + ./cmd/csi-sanity/csi-sanity $TESTARGS --csi.endpoint=$2; ret=$? + kill -9 $pid + + if [ $ret -ne 0 ] ; then + exit $ret fi +} - CSI_ENDPOINT=$endpoint mock & - pid=$! +runTestWithCreds() +{ + CSI_ENDPOINT=$1 CSI_ENABLE_CREDS=true ./bin/mock & + local pid=$! - csi-sanity $@ --ginkgo.skip=MOCKERRORS --csi.endpoint=$endpoint ; ret=$? + ./cmd/csi-sanity/csi-sanity $TESTARGS --csi.endpoint=$2 --csi.secrets=mock/mocksecret.yaml; ret=$? kill -9 $pid - if ! echo $endpoint | grep tcp > /dev/null 2>&1 ; then - rm -f $endpoint + if [ $ret -ne 0 ] ; then + exit $ret fi +} + +runTestAPI() +{ + CSI_ENDPOINT=$1 ./bin/mock & + local pid=$! + + GOCACHE=off go test -v ./hack/_apitest/api_test.go; ret=$? if [ $ret -ne 0 ] ; then exit $ret fi -done + + GOCACHE=off go test -v ./hack/_embedded/embedded_test.go; ret=$? + kill -9 $pid + + if [ $ret -ne 0 ] ; then + exit $ret + fi +} + +make + +cd cmd/csi-sanity + make clean install || exit 1 +cd ../.. + +runTest "${UDS}" "${UDS}" +rm -f $UDS + +runTestWithCreds "${UDS}" "${UDS}" +rm -f $UDS + +runTestAPI "${UDS}" +rm -f $UDS exit 0 diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/AUTHORS b/vendor/github.com/kubernetes-csi/csi-test/mock/AUTHORS new file mode 100644 index 00000000..23eabcd2 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/AUTHORS @@ -0,0 +1,2 @@ +TheCodeTeam +Kubernetes Authors diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/README.md b/vendor/github.com/kubernetes-csi/csi-test/mock/README.md new file mode 100644 index 00000000..8274aa2c --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/README.md @@ -0,0 +1,22 @@ +# Mock CSI Driver +Extremely simple mock driver used to test `csi-sanity` based on `rexray/gocsi/mock`. +It can be used for testing of Container Orchestrators that implement client side +of CSI interface. + +``` +Usage of mock: + -disable-attach + Disables RPC_PUBLISH_UNPUBLISH_VOLUME capability. + -name string + CSI driver name. (default "io.kubernetes.storage.mock") +``` + +It prints all received CSI messages to stdout encoded as json, so a test can check that +CO sent the right CSI message. + +Example of such output: + +``` +gRPCCall: {"Method":"/csi.v0.Controller/ControllerGetCapabilities","Request":{},"Response":{"capabilities":[{"Type":{"Rpc":{"type":1}}},{"Type":{"Rpc":{"type":3}}},{"Type":{"Rpc":{"type":4}}},{"Type":{"Rpc":{"type":6}}},{"Type":{"Rpc":{"type":5}}},{"Type":{"Rpc":{"type":2}}}]},"Error":""} +gRPCCall: {"Method":"/csi.v0.Controller/ControllerPublishVolume","Request":{"volume_id":"12","node_id":"some-fake-node-id","volume_capability":{"AccessType":{"Mount":{}},"access_mode":{"mode":1}}},"Response":null,"Error":"rpc error: code = NotFound desc = Not matching Node ID some-fake-node-id to Mock Node ID io.kubernetes.storage.mock"} +``` diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go b/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go new file mode 100644 index 00000000..89835e11 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go @@ -0,0 +1,89 @@ +package cache + +import ( + "strings" + "sync" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +type SnapshotCache interface { + Add(snapshot Snapshot) + + Delete(i int) + + List(ready bool) []csi.Snapshot + + FindSnapshot(k, v string) (int, Snapshot) +} + +type Snapshot struct { + Name string + Parameters map[string]string + SnapshotCSI csi.Snapshot +} + +type snapshotCache struct { + snapshotsRWL sync.RWMutex + snapshots []Snapshot +} + +func NewSnapshotCache() SnapshotCache { + return &snapshotCache{ + snapshots: make([]Snapshot, 0), + } +} + +func (snap *snapshotCache) Add(snapshot Snapshot) { + snap.snapshotsRWL.Lock() + defer snap.snapshotsRWL.Unlock() + + snap.snapshots = append(snap.snapshots, snapshot) +} + +func (snap *snapshotCache) Delete(i int) { + snap.snapshotsRWL.Lock() + defer snap.snapshotsRWL.Unlock() + + copy(snap.snapshots[i:], snap.snapshots[i+1:]) + snap.snapshots = snap.snapshots[:len(snap.snapshots)-1] +} + +func (snap *snapshotCache) List(ready bool) []csi.Snapshot { + snap.snapshotsRWL.RLock() + defer snap.snapshotsRWL.RUnlock() + + snapshots := make([]csi.Snapshot, 0) + for _, v := range snap.snapshots { + if v.SnapshotCSI.GetReadyToUse() { + snapshots = append(snapshots, v.SnapshotCSI) + } + } + + return snapshots +} + +func (snap *snapshotCache) FindSnapshot(k, v string) (int, Snapshot) { + snap.snapshotsRWL.RLock() + defer snap.snapshotsRWL.RUnlock() + + snapshotIdx := -1 + for i, vi := range snap.snapshots { + switch k { + case "id": + if strings.EqualFold(v, vi.SnapshotCSI.GetSnapshotId()) { + return i, vi + } + case "sourceVolumeId": + if strings.EqualFold(v, vi.SnapshotCSI.SourceVolumeId) { + return i, vi + } + case "name": + if vi.Name == v { + return i, vi + } + } + } + + return snapshotIdx, Snapshot{} +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/main.go b/vendor/github.com/kubernetes-csi/csi-test/mock/main.go new file mode 100644 index 00000000..486d383b --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/main.go @@ -0,0 +1,95 @@ +/* +Copyright 2018 Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import ( + "flag" + "fmt" + "net" + "os" + "os/signal" + "strings" + "syscall" + + "github.com/kubernetes-csi/csi-test/driver" + "github.com/kubernetes-csi/csi-test/mock/service" +) + +func main() { + var config service.Config + flag.BoolVar(&config.DisableAttach, "disable-attach", false, "Disables RPC_PUBLISH_UNPUBLISH_VOLUME capability.") + flag.StringVar(&config.DriverName, "name", service.Name, "CSI driver name.") + flag.Int64Var(&config.AttachLimit, "attach-limit", 0, "number of attachable volumes on a node") + flag.Parse() + + endpoint := os.Getenv("CSI_ENDPOINT") + if len(endpoint) == 0 { + fmt.Println("CSI_ENDPOINT must be defined and must be a path") + os.Exit(1) + } + if strings.Contains(endpoint, ":") { + fmt.Println("CSI_ENDPOINT must be a unix path") + os.Exit(1) + } + + // Create mock driver + s := service.New(config) + servers := &driver.CSIDriverServers{ + Controller: s, + Identity: s, + Node: s, + } + d := driver.NewCSIDriver(servers) + + // If creds is enabled, set the default creds. + setCreds := os.Getenv("CSI_ENABLE_CREDS") + if len(setCreds) > 0 && setCreds == "true" { + d.SetDefaultCreds() + } + + // Listen + os.Remove(endpoint) + l, err := net.Listen("unix", endpoint) + if err != nil { + fmt.Printf("Error: Unable to listen on %s socket: %v\n", + endpoint, + err) + os.Exit(1) + } + defer os.Remove(endpoint) + + // Start server + if err := d.Start(l); err != nil { + fmt.Printf("Error: Unable to start mock CSI server: %v\n", + err) + os.Exit(1) + } + fmt.Println("mock driver started") + + // Wait for signal + sigc := make(chan os.Signal, 1) + sigs := []os.Signal{ + syscall.SIGTERM, + syscall.SIGHUP, + syscall.SIGINT, + syscall.SIGQUIT, + } + signal.Notify(sigc, sigs...) + + <-sigc + d.Stop() + fmt.Println("mock driver stopped") +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml b/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml new file mode 100644 index 00000000..e7c9f20d --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml @@ -0,0 +1,16 @@ +CreateVolumeSecret: + secretKey: secretval1 +DeleteVolumeSecret: + secretKey: secretval2 +ControllerPublishVolumeSecret: + secretKey: secretval3 +ControllerUnpublishVolumeSecret: + secretKey: secretval4 +NodeStageVolumeSecret: + secretKey: secretval5 +NodePublishVolumeSecret: + secretKey: secretval6 +CreateSnapshotSecret: + secretKey: secretval7 +DeleteSnapshotSecret: + secretKey: secretval8 diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go new file mode 100644 index 00000000..eace79f8 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go @@ -0,0 +1,577 @@ +package service + +import ( + "fmt" + "math" + "path" + "reflect" + "strconv" + + log "github.com/sirupsen/logrus" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +const ( + MaxStorageCapacity = tib + ReadOnlyKey = "readonly" +) + +func (s *service) CreateVolume( + ctx context.Context, + req *csi.CreateVolumeRequest) ( + *csi.CreateVolumeResponse, error) { + + if len(req.Name) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume Name cannot be empty") + } + if req.VolumeCapabilities == nil { + return nil, status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty") + } + + // Check to see if the volume already exists. + if i, v := s.findVolByName(ctx, req.Name); i >= 0 { + // Requested volume name already exists, need to check if the existing volume's + // capacity is more or equal to new request's capacity. + if v.GetCapacityBytes() < req.GetCapacityRange().GetRequiredBytes() { + return nil, status.Error(codes.AlreadyExists, + fmt.Sprintf("Volume with name %s already exists", req.GetName())) + } + return &csi.CreateVolumeResponse{Volume: &v}, nil + } + + // If no capacity is specified then use 100GiB + capacity := gib100 + if cr := req.CapacityRange; cr != nil { + if rb := cr.RequiredBytes; rb > 0 { + capacity = rb + } + if lb := cr.LimitBytes; lb > 0 { + capacity = lb + } + } + // Check for maximum available capacity + if capacity >= MaxStorageCapacity { + return nil, status.Errorf(codes.OutOfRange, "Requested capacity %d exceeds maximum allowed %d", capacity, MaxStorageCapacity) + } + // Create the volume and add it to the service's in-mem volume slice. + v := s.newVolume(req.Name, capacity) + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + s.vols = append(s.vols, v) + MockVolumes[v.GetVolumeId()] = Volume{ + VolumeCSI: v, + NodeID: "", + ISStaged: false, + ISPublished: false, + StageTargetPath: "", + TargetPath: "", + } + + return &csi.CreateVolumeResponse{Volume: &v}, nil +} + +func (s *service) DeleteVolume( + ctx context.Context, + req *csi.DeleteVolumeRequest) ( + *csi.DeleteVolumeResponse, error) { + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + // If the volume is not specified, return error + if len(req.VolumeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + // If the volume does not exist then return an idempotent response. + i, _ := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return &csi.DeleteVolumeResponse{}, nil + } + + // This delete logic preserves order and prevents potential memory + // leaks. The slice's elements may not be pointers, but the structs + // themselves have fields that are. + copy(s.vols[i:], s.vols[i+1:]) + s.vols[len(s.vols)-1] = csi.Volume{} + s.vols = s.vols[:len(s.vols)-1] + log.WithField("volumeID", req.VolumeId).Debug("mock delete volume") + return &csi.DeleteVolumeResponse{}, nil +} + +func (s *service) ControllerPublishVolume( + ctx context.Context, + req *csi.ControllerPublishVolumeRequest) ( + *csi.ControllerPublishVolumeResponse, error) { + + if s.config.DisableAttach { + return nil, status.Error(codes.Unimplemented, "ControllerPublish is not supported") + } + + if len(req.VolumeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + if len(req.NodeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Node ID cannot be empty") + } + if req.VolumeCapability == nil { + return nil, status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty") + } + + if req.NodeId != s.nodeID { + return nil, status.Errorf(codes.NotFound, "Not matching Node ID %s to Mock Node ID %s", req.NodeId, s.nodeID) + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // devPathKey is the key in the volume's attributes that is set to a + // mock device path if the volume has been published by the controller + // to the specified node. + devPathKey := path.Join(req.NodeId, "dev") + + // Check to see if the volume is already published. + if device := v.VolumeContext[devPathKey]; device != "" { + var volRo bool + var roVal string + if ro, ok := v.VolumeContext[ReadOnlyKey]; ok { + roVal = ro + } + + if roVal == "true" { + volRo = true + } else { + volRo = false + } + + // Check if readonly flag is compatible with the publish request. + if req.GetReadonly() != volRo { + return nil, status.Error(codes.AlreadyExists, "Volume published but has incompatible readonly flag") + } + + return &csi.ControllerPublishVolumeResponse{ + PublishContext: map[string]string{ + "device": device, + "readonly": roVal, + }, + }, nil + } + + var roVal string + if req.GetReadonly() { + roVal = "true" + } else { + roVal = "false" + } + + // Publish the volume. + device := "/dev/mock" + v.VolumeContext[devPathKey] = device + v.VolumeContext[ReadOnlyKey] = roVal + s.vols[i] = v + + return &csi.ControllerPublishVolumeResponse{ + PublishContext: map[string]string{ + "device": device, + "readonly": roVal, + }, + }, nil +} + +func (s *service) ControllerUnpublishVolume( + ctx context.Context, + req *csi.ControllerUnpublishVolumeRequest) ( + *csi.ControllerUnpublishVolumeResponse, error) { + + if s.config.DisableAttach { + return nil, status.Error(codes.Unimplemented, "ControllerPublish is not supported") + } + + if len(req.VolumeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + nodeID := req.NodeId + if len(nodeID) == 0 { + // If node id is empty, no failure as per Spec + nodeID = s.nodeID + } + + if req.NodeId != s.nodeID { + return nil, status.Errorf(codes.NotFound, "Node ID %s does not match to expected Node ID %s", req.NodeId, s.nodeID) + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // devPathKey is the key in the volume's attributes that is set to a + // mock device path if the volume has been published by the controller + // to the specified node. + devPathKey := path.Join(nodeID, "dev") + + // Check to see if the volume is already unpublished. + if v.VolumeContext[devPathKey] == "" { + return &csi.ControllerUnpublishVolumeResponse{}, nil + } + + // Unpublish the volume. + delete(v.VolumeContext, devPathKey) + delete(v.VolumeContext, ReadOnlyKey) + s.vols[i] = v + + return &csi.ControllerUnpublishVolumeResponse{}, nil +} + +func (s *service) ValidateVolumeCapabilities( + ctx context.Context, + req *csi.ValidateVolumeCapabilitiesRequest) ( + *csi.ValidateVolumeCapabilitiesResponse, error) { + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + if len(req.VolumeCapabilities) == 0 { + return nil, status.Error(codes.InvalidArgument, req.VolumeId) + } + i, _ := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + return &csi.ValidateVolumeCapabilitiesResponse{ + Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{ + VolumeContext: req.GetVolumeContext(), + VolumeCapabilities: req.GetVolumeCapabilities(), + Parameters: req.GetParameters(), + }, + }, nil +} + +func (s *service) ListVolumes( + ctx context.Context, + req *csi.ListVolumesRequest) ( + *csi.ListVolumesResponse, error) { + + // Copy the mock volumes into a new slice in order to avoid + // locking the service's volume slice for the duration of the + // ListVolumes RPC. + var vols []csi.Volume + func() { + s.volsRWL.RLock() + defer s.volsRWL.RUnlock() + vols = make([]csi.Volume, len(s.vols)) + copy(vols, s.vols) + }() + + var ( + ulenVols = int32(len(vols)) + maxEntries = req.MaxEntries + startingToken int32 + ) + + if v := req.StartingToken; v != "" { + i, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return nil, status.Errorf( + codes.InvalidArgument, + "startingToken=%d !< int32=%d", + startingToken, math.MaxUint32) + } + startingToken = int32(i) + } + + if startingToken > ulenVols { + return nil, status.Errorf( + codes.InvalidArgument, + "startingToken=%d > len(vols)=%d", + startingToken, ulenVols) + } + + // Discern the number of remaining entries. + rem := ulenVols - startingToken + + // If maxEntries is 0 or greater than the number of remaining entries then + // set maxEntries to the number of remaining entries. + if maxEntries == 0 || maxEntries > rem { + maxEntries = rem + } + + var ( + i int + j = startingToken + entries = make( + []*csi.ListVolumesResponse_Entry, + maxEntries) + ) + + for i = 0; i < len(entries); i++ { + entries[i] = &csi.ListVolumesResponse_Entry{ + Volume: &vols[j], + } + j++ + } + + var nextToken string + if n := startingToken + int32(i); n < ulenVols { + nextToken = fmt.Sprintf("%d", n) + } + + return &csi.ListVolumesResponse{ + Entries: entries, + NextToken: nextToken, + }, nil +} + +func (s *service) GetCapacity( + ctx context.Context, + req *csi.GetCapacityRequest) ( + *csi.GetCapacityResponse, error) { + + return &csi.GetCapacityResponse{ + AvailableCapacity: MaxStorageCapacity, + }, nil +} + +func (s *service) ControllerGetCapabilities( + ctx context.Context, + req *csi.ControllerGetCapabilitiesRequest) ( + *csi.ControllerGetCapabilitiesResponse, error) { + + caps := []*csi.ControllerServiceCapability{ + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_LIST_VOLUMES, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_GET_CAPACITY, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, + }, + }, + }, + } + + if !s.config.DisableAttach { + caps = append(caps, &csi.ControllerServiceCapability{ + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, + }, + }, + }) + } + + return &csi.ControllerGetCapabilitiesResponse{ + Capabilities: caps, + }, nil +} + +func (s *service) CreateSnapshot(ctx context.Context, + req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { + // Check arguments + if len(req.GetName()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot Name cannot be empty") + } + if len(req.GetSourceVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot SourceVolumeId cannot be empty") + } + + // Check to see if the snapshot already exists. + if i, v := s.snapshots.FindSnapshot("name", req.GetName()); i >= 0 { + // Requested snapshot name already exists + if v.SnapshotCSI.GetSourceVolumeId() != req.GetSourceVolumeId() || !reflect.DeepEqual(v.Parameters, req.GetParameters()) { + return nil, status.Error(codes.AlreadyExists, + fmt.Sprintf("Snapshot with name %s already exists", req.GetName())) + } + return &csi.CreateSnapshotResponse{Snapshot: &v.SnapshotCSI}, nil + } + + // Create the snapshot and add it to the service's in-mem snapshot slice. + snapshot := s.newSnapshot(req.GetName(), req.GetSourceVolumeId(), req.GetParameters()) + s.snapshots.Add(snapshot) + + return &csi.CreateSnapshotResponse{Snapshot: &snapshot.SnapshotCSI}, nil +} + +func (s *service) DeleteSnapshot(ctx context.Context, + req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { + + // If the snapshot is not specified, return error + if len(req.SnapshotId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot ID cannot be empty") + } + + // If the snapshot does not exist then return an idempotent response. + i, _ := s.snapshots.FindSnapshot("id", req.SnapshotId) + if i < 0 { + return &csi.DeleteSnapshotResponse{}, nil + } + + // This delete logic preserves order and prevents potential memory + // leaks. The slice's elements may not be pointers, but the structs + // themselves have fields that are. + s.snapshots.Delete(i) + log.WithField("SnapshotId", req.SnapshotId).Debug("mock delete snapshot") + return &csi.DeleteSnapshotResponse{}, nil +} + +func (s *service) ListSnapshots(ctx context.Context, + req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + + // case 1: SnapshotId is not empty, return snapshots that match the snapshot id. + if len(req.GetSnapshotId()) != 0 { + return getSnapshotById(s, req) + } + + // case 2: SourceVolumeId is not empty, return snapshots that match the source volume id. + if len(req.GetSourceVolumeId()) != 0 { + return getSnapshotByVolumeId(s, req) + } + + // case 3: no parameter is set, so we return all the snapshots. + return getAllSnapshots(s, req) +} + +func getSnapshotById(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + if len(req.GetSnapshotId()) != 0 { + i, snapshot := s.snapshots.FindSnapshot("id", req.GetSnapshotId()) + if i < 0 { + return &csi.ListSnapshotsResponse{}, nil + } + + if len(req.GetSourceVolumeId()) != 0 { + if snapshot.SnapshotCSI.GetSourceVolumeId() != req.GetSourceVolumeId() { + return &csi.ListSnapshotsResponse{}, nil + } + } + + return &csi.ListSnapshotsResponse{ + Entries: []*csi.ListSnapshotsResponse_Entry{ + { + Snapshot: &snapshot.SnapshotCSI, + }, + }, + }, nil + } + return nil, nil +} + +func getSnapshotByVolumeId(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + if len(req.GetSourceVolumeId()) != 0 { + i, snapshot := s.snapshots.FindSnapshot("sourceVolumeId", req.SourceVolumeId) + if i < 0 { + return &csi.ListSnapshotsResponse{}, nil + } + return &csi.ListSnapshotsResponse{ + Entries: []*csi.ListSnapshotsResponse_Entry{ + { + Snapshot: &snapshot.SnapshotCSI, + }, + }, + }, nil + } + return nil, nil +} + +func getAllSnapshots(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + // Copy the mock snapshots into a new slice in order to avoid + // locking the service's snapshot slice for the duration of the + // ListSnapshots RPC. + readyToUse := true + snapshots := s.snapshots.List(readyToUse) + + var ( + ulenSnapshots = int32(len(snapshots)) + maxEntries = req.MaxEntries + startingToken int32 + ) + + if v := req.StartingToken; v != "" { + i, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return nil, status.Errorf( + codes.Aborted, + "startingToken=%d !< int32=%d", + startingToken, math.MaxUint32) + } + startingToken = int32(i) + } + + if startingToken > ulenSnapshots { + return nil, status.Errorf( + codes.Aborted, + "startingToken=%d > len(snapshots)=%d", + startingToken, ulenSnapshots) + } + + // Discern the number of remaining entries. + rem := ulenSnapshots - startingToken + + // If maxEntries is 0 or greater than the number of remaining entries then + // set maxEntries to the number of remaining entries. + if maxEntries == 0 || maxEntries > rem { + maxEntries = rem + } + + var ( + i int + j = startingToken + entries = make( + []*csi.ListSnapshotsResponse_Entry, + maxEntries) + ) + + for i = 0; i < len(entries); i++ { + entries[i] = &csi.ListSnapshotsResponse_Entry{ + Snapshot: &snapshots[j], + } + j++ + } + + var nextToken string + if n := startingToken + int32(i); n < ulenSnapshots { + nextToken = fmt.Sprintf("%d", n) + } + + return &csi.ListSnapshotsResponse{ + Entries: entries, + NextToken: nextToken, + }, nil +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go new file mode 100644 index 00000000..7e8735a9 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go @@ -0,0 +1,48 @@ +package service + +import ( + "golang.org/x/net/context" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/protobuf/ptypes/wrappers" +) + +func (s *service) GetPluginInfo( + ctx context.Context, + req *csi.GetPluginInfoRequest) ( + *csi.GetPluginInfoResponse, error) { + + return &csi.GetPluginInfoResponse{ + Name: s.config.DriverName, + VendorVersion: VendorVersion, + Manifest: Manifest, + }, nil +} + +func (s *service) Probe( + ctx context.Context, + req *csi.ProbeRequest) ( + *csi.ProbeResponse, error) { + + return &csi.ProbeResponse{ + Ready: &wrappers.BoolValue{Value: true}, + }, nil +} + +func (s *service) GetPluginCapabilities( + ctx context.Context, + req *csi.GetPluginCapabilitiesRequest) ( + *csi.GetPluginCapabilitiesResponse, error) { + + return &csi.GetPluginCapabilitiesResponse{ + Capabilities: []*csi.PluginCapability{ + { + Type: &csi.PluginCapability_Service_{ + Service: &csi.PluginCapability_Service{ + Type: csi.PluginCapability_Service_CONTROLLER_SERVICE, + }, + }, + }, + }, + }, nil +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go new file mode 100644 index 00000000..886a219a --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go @@ -0,0 +1,244 @@ +package service + +import ( + "path" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "golang.org/x/net/context" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +func (s *service) NodeStageVolume( + ctx context.Context, + req *csi.NodeStageVolumeRequest) ( + *csi.NodeStageVolumeResponse, error) { + + device, ok := req.PublishContext["device"] + if !ok { + if s.config.DisableAttach { + device = "mock device" + } else { + return nil, status.Error( + codes.InvalidArgument, + "stage volume info 'device' key required") + } + } + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if len(req.GetStagingTargetPath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Staging Target Path cannot be empty") + } + + if req.GetVolumeCapability() == nil { + return nil, status.Error(codes.InvalidArgument, "Volume Capability cannot be empty") + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // nodeStgPathKey is the key in the volume's attributes that is set to a + // mock stage path if the volume has been published by the node + nodeStgPathKey := path.Join(s.nodeID, req.StagingTargetPath) + + // Check to see if the volume has already been staged. + if v.VolumeContext[nodeStgPathKey] != "" { + // TODO: Check for the capabilities to be equal. Return "ALREADY_EXISTS" + // if the capabilities don't match. + return &csi.NodeStageVolumeResponse{}, nil + } + + // Stage the volume. + v.VolumeContext[nodeStgPathKey] = device + s.vols[i] = v + + return &csi.NodeStageVolumeResponse{}, nil +} + +func (s *service) NodeUnstageVolume( + ctx context.Context, + req *csi.NodeUnstageVolumeRequest) ( + *csi.NodeUnstageVolumeResponse, error) { + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if len(req.GetStagingTargetPath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Staging Target Path cannot be empty") + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // nodeStgPathKey is the key in the volume's attributes that is set to a + // mock stage path if the volume has been published by the node + nodeStgPathKey := path.Join(s.nodeID, req.StagingTargetPath) + + // Check to see if the volume has already been unstaged. + if v.VolumeContext[nodeStgPathKey] == "" { + return &csi.NodeUnstageVolumeResponse{}, nil + } + + // Unpublish the volume. + delete(v.VolumeContext, nodeStgPathKey) + s.vols[i] = v + + return &csi.NodeUnstageVolumeResponse{}, nil +} + +func (s *service) NodePublishVolume( + ctx context.Context, + req *csi.NodePublishVolumeRequest) ( + *csi.NodePublishVolumeResponse, error) { + + device, ok := req.PublishContext["device"] + if !ok { + if s.config.DisableAttach { + device = "mock device" + } else { + return nil, status.Error( + codes.InvalidArgument, + "stage volume info 'device' key required") + } + } + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if len(req.GetTargetPath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Target Path cannot be empty") + } + + if req.GetVolumeCapability() == nil { + return nil, status.Error(codes.InvalidArgument, "Volume Capability cannot be empty") + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // nodeMntPathKey is the key in the volume's attributes that is set to a + // mock mount path if the volume has been published by the node + nodeMntPathKey := path.Join(s.nodeID, req.TargetPath) + + // Check to see if the volume has already been published. + if v.VolumeContext[nodeMntPathKey] != "" { + + // Requests marked Readonly fail due to volumes published by + // the Mock driver supporting only RW mode. + if req.Readonly { + return nil, status.Error(codes.AlreadyExists, req.VolumeId) + } + + return &csi.NodePublishVolumeResponse{}, nil + } + + // Publish the volume. + if req.GetStagingTargetPath() != "" { + v.VolumeContext[nodeMntPathKey] = req.GetStagingTargetPath() + } else { + v.VolumeContext[nodeMntPathKey] = device + } + s.vols[i] = v + + return &csi.NodePublishVolumeResponse{}, nil +} + +func (s *service) NodeUnpublishVolume( + ctx context.Context, + req *csi.NodeUnpublishVolumeRequest) ( + *csi.NodeUnpublishVolumeResponse, error) { + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + if len(req.GetTargetPath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Target Path cannot be empty") + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // nodeMntPathKey is the key in the volume's attributes that is set to a + // mock mount path if the volume has been published by the node + nodeMntPathKey := path.Join(s.nodeID, req.TargetPath) + + // Check to see if the volume has already been unpublished. + if v.VolumeContext[nodeMntPathKey] == "" { + return &csi.NodeUnpublishVolumeResponse{}, nil + } + + // Unpublish the volume. + delete(v.VolumeContext, nodeMntPathKey) + s.vols[i] = v + + return &csi.NodeUnpublishVolumeResponse{}, nil +} + +func (s *service) NodeGetCapabilities( + ctx context.Context, + req *csi.NodeGetCapabilitiesRequest) ( + *csi.NodeGetCapabilitiesResponse, error) { + + return &csi.NodeGetCapabilitiesResponse{ + Capabilities: []*csi.NodeServiceCapability{ + { + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: csi.NodeServiceCapability_RPC_UNKNOWN, + }, + }, + }, + { + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, + }, + }, + }, + }, + }, nil +} + +func (s *service) NodeGetInfo(ctx context.Context, + req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { + csiNodeResponse := &csi.NodeGetInfoResponse{ + NodeId: s.nodeID, + } + if s.config.AttachLimit > 0 { + csiNodeResponse.MaxVolumesPerNode = s.config.AttachLimit + } + return csiNodeResponse, nil +} + +func (s *service) NodeGetVolumeStats(ctx context.Context, + req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { + return &csi.NodeGetVolumeStatsResponse{}, nil + +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go new file mode 100644 index 00000000..2254ccb8 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go @@ -0,0 +1,147 @@ +package service + +import ( + "fmt" + "strings" + "sync" + "sync/atomic" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/kubernetes-csi/csi-test/mock/cache" + "golang.org/x/net/context" + + "github.com/golang/protobuf/ptypes" +) + +const ( + // Name is the name of the CSI plug-in. + Name = "io.kubernetes.storage.mock" + + // VendorVersion is the version returned by GetPluginInfo. + VendorVersion = "0.3.0" +) + +// Manifest is the SP's manifest. +var Manifest = map[string]string{ + "url": "https://github.com/kubernetes-csi/csi-test/mock", +} + +type Config struct { + DisableAttach bool + DriverName string + AttachLimit int64 +} + +// Service is the CSI Mock service provider. +type Service interface { + csi.ControllerServer + csi.IdentityServer + csi.NodeServer +} + +type service struct { + sync.Mutex + nodeID string + vols []csi.Volume + volsRWL sync.RWMutex + volsNID uint64 + snapshots cache.SnapshotCache + snapshotsNID uint64 + config Config +} + +type Volume struct { + sync.Mutex + VolumeCSI csi.Volume + NodeID string + ISStaged bool + ISPublished bool + StageTargetPath string + TargetPath string +} + +var MockVolumes map[string]Volume + +// New returns a new Service. +func New(config Config) Service { + s := &service{ + nodeID: config.DriverName, + config: config, + } + s.snapshots = cache.NewSnapshotCache() + s.vols = []csi.Volume{ + s.newVolume("Mock Volume 1", gib100), + s.newVolume("Mock Volume 2", gib100), + s.newVolume("Mock Volume 3", gib100), + } + MockVolumes = map[string]Volume{} + + s.snapshots.Add(s.newSnapshot("Mock Snapshot 1", "1", map[string]string{"Description": "snapshot 1"})) + s.snapshots.Add(s.newSnapshot("Mock Snapshot 2", "2", map[string]string{"Description": "snapshot 2"})) + s.snapshots.Add(s.newSnapshot("Mock Snapshot 3", "3", map[string]string{"Description": "snapshot 3"})) + + return s +} + +const ( + kib int64 = 1024 + mib int64 = kib * 1024 + gib int64 = mib * 1024 + gib100 int64 = gib * 100 + tib int64 = gib * 1024 + tib100 int64 = tib * 100 +) + +func (s *service) newVolume(name string, capcity int64) csi.Volume { + return csi.Volume{ + VolumeId: fmt.Sprintf("%d", atomic.AddUint64(&s.volsNID, 1)), + VolumeContext: map[string]string{"name": name}, + CapacityBytes: capcity, + } +} + +func (s *service) findVol(k, v string) (volIdx int, volInfo csi.Volume) { + s.volsRWL.RLock() + defer s.volsRWL.RUnlock() + return s.findVolNoLock(k, v) +} + +func (s *service) findVolNoLock(k, v string) (volIdx int, volInfo csi.Volume) { + volIdx = -1 + + for i, vi := range s.vols { + switch k { + case "id": + if strings.EqualFold(v, vi.GetVolumeId()) { + return i, vi + } + case "name": + if n, ok := vi.VolumeContext["name"]; ok && strings.EqualFold(v, n) { + return i, vi + } + } + } + + return +} + +func (s *service) findVolByName( + ctx context.Context, name string) (int, csi.Volume) { + + return s.findVol("name", name) +} + +func (s *service) newSnapshot(name, sourceVolumeId string, parameters map[string]string) cache.Snapshot { + + ptime := ptypes.TimestampNow() + return cache.Snapshot{ + Name: name, + Parameters: parameters, + SnapshotCSI: csi.Snapshot{ + SnapshotId: fmt.Sprintf("%d", atomic.AddUint64(&s.snapshotsNID, 1)), + CreationTime: ptime, + SourceVolumeId: sourceVolumeId, + ReadyToUse: true, + }, + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md index f258382c..fd30f192 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md @@ -6,15 +6,6 @@ For CSI drivers written in Golang, the framework provides a simple API function to call to test the driver. Another way to run the test suite is to use the command line program [csi-sanity](https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity). -## Status -Although the project can be used immediately, it will not provide full -coverage since it is not yet finished. Below shows the percentage of -completion for each CSI service: - -* Identity Service: 95% -* Controller Service: 0% -* Node Service: 0% - ## For Golang CSI Drivers This framework leverages the Ginkgo BDD testing framework to deliver a descriptive test suite for your driver. To test your driver, simply call the API in one of your @@ -22,13 +13,50 @@ Golang `TestXXX` functions. For example: ```go func TestMyDriver(t *testing.T) { - // Setup the full driver and its environment - ... setup driver ... + // Setup the full driver and its environment + ... setup driver ... + config := &sanity.Config{ + TargetPath: ... + StagingPath: ... + Address: endpoint, + } + - // Now call the test suite - sanity.Test(t, driverEndpointAddress) + // Now call the test suite + sanity.Test(t, config) } ``` +Only one such test function is supported because under the hood a +Ginkgo test suite gets constructed and executed by the call. + +Alternatively, the tests can also be embedded inside a Ginkgo test +suite. In that case it is possible to define multiple tests with +different configurations: + +```go +var _ = Describe("MyCSIDriver", func () { + Context("Config A", func () { + var config &sanity.Config + + BeforeEach(func() { + //... setup driver and config... + }) + + AfterEach(func() { + //...tear down driver... + }) + + Describe("CSI sanity", func() { + sanity.GinkgoTest(config) + }) + }) + + Context("Config B", func () { + // other configs + }) +}) +``` + ## Command line program Please see [csi-sanity](https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go new file mode 100644 index 00000000..65a30334 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go @@ -0,0 +1,134 @@ +/* +Copyright 2018 Intel Corporation + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sanity + +import ( + "context" + "log" + + "github.com/container-storage-interface/spec/lib/go/csi" + + . "github.com/onsi/ginkgo" +) + +// VolumeInfo keeps track of the information needed to delete a volume. +type VolumeInfo struct { + // Node on which the volume was published, empty if none + // or publishing is not supported. + NodeID string + + // Volume ID assigned by CreateVolume. + VolumeID string +} + +// Cleanup keeps track of resources, in particular volumes, which need +// to be freed when testing is done. +type Cleanup struct { + Context *SanityContext + ControllerClient csi.ControllerClient + NodeClient csi.NodeClient + ControllerPublishSupported bool + NodeStageSupported bool + + // Maps from volume name to the node ID for which the volume + // is published and the volume ID. + volumes map[string]VolumeInfo +} + +// RegisterVolume adds or updates an entry for the volume with the +// given name. +func (cl *Cleanup) RegisterVolume(name string, info VolumeInfo) { + if cl.volumes == nil { + cl.volumes = make(map[string]VolumeInfo) + } + cl.volumes[name] = info +} + +// MaybeRegisterVolume adds or updates an entry for the volume with +// the given name if CreateVolume was successful. +func (cl *Cleanup) MaybeRegisterVolume(name string, vol *csi.CreateVolumeResponse, err error) { + if err == nil && vol.GetVolume().GetVolumeId() != "" { + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + } +} + +// UnregisterVolume removes the entry for the volume with the +// given name, thus preventing all cleanup operations for it. +func (cl *Cleanup) UnregisterVolume(name string) { + if cl.volumes != nil { + delete(cl.volumes, name) + } +} + +// DeleteVolumes stops using the registered volumes and tries to delete all of them. +func (cl *Cleanup) DeleteVolumes() { + if cl.volumes == nil { + return + } + logger := log.New(GinkgoWriter, "cleanup: ", 0) + ctx := context.Background() + + for name, info := range cl.volumes { + logger.Printf("deleting %s = %s", name, info.VolumeID) + if _, err := cl.NodeClient.NodeUnpublishVolume( + ctx, + &csi.NodeUnpublishVolumeRequest{ + VolumeId: info.VolumeID, + TargetPath: cl.Context.Config.TargetPath, + }, + ); err != nil { + logger.Printf("warning: NodeUnpublishVolume: %s", err) + } + + if cl.NodeStageSupported { + if _, err := cl.NodeClient.NodeUnstageVolume( + ctx, + &csi.NodeUnstageVolumeRequest{ + VolumeId: info.VolumeID, + StagingTargetPath: cl.Context.Config.StagingPath, + }, + ); err != nil { + logger.Printf("warning: NodeUnstageVolume: %s", err) + } + } + + if cl.ControllerPublishSupported && info.NodeID != "" { + if _, err := cl.ControllerClient.ControllerUnpublishVolume( + ctx, + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: info.VolumeID, + NodeId: info.NodeID, + Secrets: cl.Context.Secrets.ControllerUnpublishVolumeSecret, + }, + ); err != nil { + logger.Printf("warning: ControllerUnpublishVolume: %s", err) + } + } + + if _, err := cl.ControllerClient.DeleteVolume( + ctx, + &csi.DeleteVolumeRequest{ + VolumeId: info.VolumeID, + Secrets: cl.Context.Secrets.DeleteVolumeSecret, + }, + ); err != nil { + logger.Printf("error: DeleteVolume: %s", err) + } + + cl.UnregisterVolume(name) + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go index 33e999b7..022e1e6d 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go @@ -17,33 +17,56 @@ limitations under the License. package sanity import ( + "context" "fmt" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/container-storage-interface/spec/lib/go/csi" - context "golang.org/x/net/context" + + "strconv" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -func verifyVolumeInfo(v *csi.VolumeInfo) { +const ( + // DefTestVolumeSize defines the base size of dynamically + // provisioned volumes. 10GB by default, can be overridden by + // setting Config.TestVolumeSize. + DefTestVolumeSize int64 = 10 * 1024 * 1024 * 1024 + + MaxNameLength int = 128 +) + +func TestVolumeSize(sc *SanityContext) int64 { + if sc.Config.TestVolumeSize > 0 { + return sc.Config.TestVolumeSize + } + return DefTestVolumeSize +} + +func verifyVolumeInfo(v *csi.Volume) { Expect(v).NotTo(BeNil()) - Expect(v.GetId()).NotTo(BeEmpty()) + Expect(v.GetVolumeId()).NotTo(BeEmpty()) } -func isCapabilitySupported( +func verifySnapshotInfo(snapshot *csi.Snapshot) { + Expect(snapshot).NotTo(BeNil()) + Expect(snapshot.GetSnapshotId()).NotTo(BeEmpty()) + Expect(snapshot.GetSourceVolumeId()).NotTo(BeEmpty()) + Expect(snapshot.GetCreationTime()).NotTo(BeZero()) +} + +func isControllerCapabilitySupported( c csi.ControllerClient, capType csi.ControllerServiceCapability_RPC_Type, ) bool { caps, err := c.ControllerGetCapabilities( context.Background(), - &csi.ControllerGetCapabilitiesRequest{ - Version: csiClientVersion, - }) + &csi.ControllerGetCapabilitiesRequest{}) Expect(err).NotTo(HaveOccurred()) Expect(caps).NotTo(BeNil()) Expect(caps.GetCapabilities()).NotTo(BeNil()) @@ -57,387 +80,1367 @@ func isCapabilitySupported( return false } -var _ = Describe("ControllerGetCapabilities [Controller Server]", func() { +var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { var ( c csi.ControllerClient + n csi.NodeClient + + cl *Cleanup ) BeforeEach(func() { - c = csi.NewControllerClient(conn) + c = csi.NewControllerClient(sc.Conn) + n = csi.NewNodeClient(sc.Conn) + + cl = &Cleanup{ + NodeClient: n, + ControllerClient: c, + Context: sc, + } }) - It("should fail when no version is provided", func() { - _, err := c.ControllerGetCapabilities( - context.Background(), - &csi.ControllerGetCapabilitiesRequest{}) - Expect(err).To(HaveOccurred()) + AfterEach(func() { + cl.DeleteVolumes() + }) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + Describe("ControllerGetCapabilities", func() { + It("should return appropriate capabilities", func() { + caps, err := c.ControllerGetCapabilities( + context.Background(), + &csi.ControllerGetCapabilitiesRequest{}) + + By("checking successful response") + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) + Expect(caps.GetCapabilities()).NotTo(BeNil()) + + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetRpc()).NotTo(BeNil()) + + switch cap.GetRpc().GetType() { + case csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME: + case csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME: + case csi.ControllerServiceCapability_RPC_LIST_VOLUMES: + case csi.ControllerServiceCapability_RPC_GET_CAPACITY: + case csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT: + case csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS: + case csi.ControllerServiceCapability_RPC_PUBLISH_READONLY: + default: + Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) + } + } + }) }) - It("should return appropriate capabilities", func() { - caps, err := c.ControllerGetCapabilities( - context.Background(), - &csi.ControllerGetCapabilitiesRequest{ - Version: csiClientVersion, - }) + Describe("GetCapacity", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_GET_CAPACITY) { + Skip("GetCapacity not supported") + } + }) - By("checking successful response") - Expect(err).NotTo(HaveOccurred()) - Expect(caps).NotTo(BeNil()) - Expect(caps.GetCapabilities()).NotTo(BeNil()) + It("should return capacity (no optional values added)", func() { + _, err := c.GetCapacity( + context.Background(), + &csi.GetCapacityRequest{}) + Expect(err).NotTo(HaveOccurred()) - for _, cap := range caps.GetCapabilities() { - Expect(cap.GetRpc()).NotTo(BeNil()) + // Since capacity is int64 we will not be checking it + // The value of zero is a possible value. + }) + }) - switch cap.GetRpc().GetType() { - case csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME: - case csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME: - case csi.ControllerServiceCapability_RPC_LIST_VOLUMES: - case csi.ControllerServiceCapability_RPC_GET_CAPACITY: - default: - Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) + Describe("ListVolumes", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_VOLUMES) { + Skip("ListVolumes not supported") } - } - }) -}) + }) -var _ = Describe("GetCapacity [Controller Server]", func() { - var ( - c csi.ControllerClient - ) + It("should return appropriate values (no optional values added)", func() { + vols, err := c.ListVolumes( + context.Background(), + &csi.ListVolumesRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(vols).NotTo(BeNil()) - BeforeEach(func() { - c = csi.NewControllerClient(conn) + for _, vol := range vols.GetEntries() { + verifyVolumeInfo(vol.GetVolume()) + } + }) - if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_GET_CAPACITY) { - Skip("GetCapacity not supported") - } + // TODO: Add test to test for tokens + + // TODO: Add test which checks list of volume is there when created, + // and not there when deleted. }) - It("should fail when no version is provided", func() { + Describe("CreateVolume", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { + Skip("CreateVolume not supported") + } + }) - By("failing when there is no version") - _, err := c.GetCapacity( - context.Background(), - &csi.GetCapacityRequest{}) - Expect(err).To(HaveOccurred()) + It("should fail when no name is provided", func() { + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + cl.MaybeRegisterVolume("", vol, err) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - It("should return capacity (no optional values added)", func() { - _, err := c.GetCapacity( - context.Background(), - &csi.GetCapacityRequest{ - Version: csiClientVersion, - }) - Expect(err).NotTo(HaveOccurred()) + It("should fail when no volume capabilities are provided", func() { + name := uniqueString("sanity-controller-create-no-volume-capabilities") + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + cl.MaybeRegisterVolume(name, vol, err) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values SingleNodeWriter NoCapacity Type:Mount", func() { + + By("creating a volume") + name := uniqueString("sanity-controller-create-single-no-capacity") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + + It("should return appropriate values SingleNodeWriter WithCapacity 1Gi Type:Mount", func() { + + By("creating a volume") + name := uniqueString("sanity-controller-create-single-with-capacity") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: TestVolumeSize(sc), + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + if serverError, ok := status.FromError(err); ok && + (serverError.Code() == codes.OutOfRange || serverError.Code() == codes.Unimplemented) { + Skip("Required bytes not supported") + } + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + Expect(vol.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", TestVolumeSize(sc))) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + It("should not fail when requesting to create a volume with already existing name and same capacity.", func() { + + By("creating a volume") + name := uniqueString("sanity-controller-create-twice") + size := TestVolumeSize(sc) + + vol1, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol1).NotTo(BeNil()) + Expect(vol1.GetVolume()).NotTo(BeNil()) + Expect(vol1.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol1.GetVolume().GetVolumeId()}) + Expect(vol1.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) + + vol2, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol2).NotTo(BeNil()) + Expect(vol2.GetVolume()).NotTo(BeNil()) + Expect(vol2.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + Expect(vol2.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) + Expect(vol1.GetVolume().GetVolumeId()).To(Equal(vol2.GetVolume().GetVolumeId())) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol1.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + It("should fail when requesting to create a volume with already existing name and different capacity.", func() { + + By("creating a volume") + name := uniqueString("sanity-controller-create-twice-different") + size1 := TestVolumeSize(sc) + + vol1, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size1, + LimitBytes: size1, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).ToNot(HaveOccurred()) + Expect(vol1).NotTo(BeNil()) + Expect(vol1.GetVolume()).NotTo(BeNil()) + Expect(vol1.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol1.GetVolume().GetVolumeId()}) + size2 := 2 * TestVolumeSize(sc) + + _, err = c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size2, + LimitBytes: size2, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol1.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + + It("should not fail when creating volume with maximum-length name", func() { - // Since capacity is uint64 we will not be checking it - // The value of zero is a possible value. + nameBytes := make([]byte, MaxNameLength) + for i := 0; i < MaxNameLength; i++ { + nameBytes[i] = 'a' + } + name := string(nameBytes) + By("creating a volume") + size := TestVolumeSize(sc) + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + Expect(vol.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) }) -}) -var _ = Describe("ListVolumes [Controller Server]", func() { - var ( - c csi.ControllerClient - ) + Describe("DeleteVolume", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { + Skip("DeleteVolume not supported") + } + }) - BeforeEach(func() { - c = csi.NewControllerClient(conn) + It("should fail when no volume id is provided", func() { - if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_VOLUMES) { - Skip("ListVolumes not supported") - } + _, err := c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should succeed when an invalid volume id is used", func() { + + _, err := c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: "reallyfakevolumeid", + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a volume") + name := uniqueString("sanity-controller-create-appropriate") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + // Delete Volume + By("deleting a volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) }) - It("should fail when no version is provided", func() { + Describe("ValidateVolumeCapabilities", func() { + It("should fail when no volume id is provided", func() { - By("failing when there is no version") - _, err := c.ListVolumes( - context.Background(), - &csi.ListVolumesRequest{}) - Expect(err).To(HaveOccurred()) + _, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{}) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when no volume capabilities are provided", func() { + + _, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{ + VolumeId: "id", + }) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-validate") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + // ValidateVolumeCapabilities + By("validating volume capabilities") + valivolcap, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(valivolcap).NotTo(BeNil()) + + // If confirmation is provided then it is REQUIRED to provide + // the volume capabilities + if valivolcap.GetConfirmed() != nil { + Expect(valivolcap.GetConfirmed().GetVolumeCapabilities()).NotTo(BeEmpty()) + } + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + + It("should fail when the requested volume does not exist", func() { + + _, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{ + VolumeId: "some-vol-id", + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + }) }) - It("should return appropriate values (no optional values added)", func() { - vols, err := c.ListVolumes( - context.Background(), - &csi.ListVolumesRequest{ - Version: csiClientVersion, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(vols).NotTo(BeNil()) - Expect(vols.GetEntries()).NotTo(BeNil()) + Describe("ControllerPublishVolume", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { + Skip("ControllerPublishVolume not supported") + } + }) - for _, vol := range vols.GetEntries() { - verifyVolumeInfo(vol.GetVolumeInfo()) - } + It("should fail when no volume id is provided", func() { + + _, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when no node id is provided", func() { + + _, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: "id", + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when no volume capability is provided", func() { + + _, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: "id", + NodeId: "fakenode", + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-publish") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + By("getting a node id") + nid, err := n.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) + + // ControllerPublishVolume + By("calling controllerpublish on that volume") + + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: nid.GetNodeId()}) + Expect(conpubvol).NotTo(BeNil()) + + By("cleaning up unpublishing the volume") + + conunpubvol, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + // NodeID is optional in ControllerUnpublishVolume + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(conunpubvol).NotTo(BeNil()) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + + It("should fail when the volume does not exist", func() { + + By("calling controller publish on a non-existent volume") + + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: "some-vol-id", + NodeId: "some-node-id", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + Expect(conpubvol).To(BeNil()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + }) + + It("should fail when the node does not exist", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-wrong-node") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + // ControllerPublishVolume + By("calling controllerpublish on that volume") + + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: "some-fake-node-id", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + Expect(conpubvol).To(BeNil()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + + It("should fail when the volume is already published but is incompatible", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-published-incompatible") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + By("getting a node id") + nid, err := n.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) + + // ControllerPublishVolume + By("calling controllerpublish on that volume") + + pubReq := &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + } + + conpubvol, err := c.ControllerPublishVolume(context.Background(), pubReq) + Expect(err).NotTo(HaveOccurred()) + Expect(conpubvol).NotTo(BeNil()) + + // Publish again with different attributes. + pubReq.Readonly = true + + conpubvol, err = c.ControllerPublishVolume(context.Background(), pubReq) + Expect(err).To(HaveOccurred()) + Expect(conpubvol).To(BeNil()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) + + By("cleaning up unpublishing the volume") + + conunpubvol, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + // NodeID is optional in ControllerUnpublishVolume + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + + Expect(err).NotTo(HaveOccurred()) + Expect(conunpubvol).NotTo(BeNil()) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) }) - // TODO: Add test to test for tokens + Describe("ControllerUnpublishVolume", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { + Skip("ControllerUnpublishVolume not supported") + } + }) + + It("should fail when no volume id is provided", func() { + + _, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-unpublish") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + By("getting a node id") + nid, err := n.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) + + // ControllerPublishVolume + By("calling controllerpublish on that volume") + + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: nid.GetNodeId()}) + Expect(conpubvol).NotTo(BeNil()) + + // ControllerUnpublishVolume + By("calling controllerunpublish on that volume") + + conunpubvol, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + // NodeID is optional in ControllerUnpublishVolume + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(conunpubvol).NotTo(BeNil()) + + By("cleaning up deleting the volume") - // TODO: Add test which checks list of volume is there when created, - // and not there when deleted. + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + }) }) -var _ = Describe("CreateVolume [Controller Server]", func() { +var _ = DescribeSanity("ListSnapshots [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) + c = csi.NewControllerClient(sc.Conn) - if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { - Skip("CreateVolume not supported") + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS) { + Skip("ListSnapshots not supported") } }) - It("should fail when no version is provided", func() { - - _, err := c.CreateVolume( + It("should return appropriate values (no optional values added)", func() { + snapshots, err := c.ListSnapshots( context.Background(), - &csi.CreateVolumeRequest{}) - Expect(err).To(HaveOccurred()) + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + for _, snapshot := range snapshots.GetEntries() { + verifySnapshotInfo(snapshot.GetSnapshot()) + } }) - It("should fail when no name is provided", func() { + It("should return snapshots that match the specify snapshot id", func() { - _, err := c.CreateVolume( + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-1") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + + By("creating a snapshot") + snapshotReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-1", volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) + Expect(err).NotTo(HaveOccurred()) + + snapshots, err := c.ListSnapshots( context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - }) - Expect(err).To(HaveOccurred()) + &csi.ListSnapshotsRequest{SnapshotId: snapshot.GetSnapshot().GetSnapshotId()}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(len(snapshots.GetEntries())).To(BeNumerically("==", 1)) + verifySnapshotInfo(snapshots.GetEntries()[0].GetSnapshot()) + Expect(snapshots.GetEntries()[0].GetSnapshot().GetSnapshotId()).To(Equal(snapshot.GetSnapshot().GetSnapshotId())) + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) }) - It("should fail when no volume capabilities are provided", func() { + It("should return empty when the specify snapshot id is not exist", func() { - _, err := c.CreateVolume( + snapshots, err := c.ListSnapshots( context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: "name", - }) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + &csi.ListSnapshotsRequest{SnapshotId: "none-exist-id"}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(snapshots.GetEntries()).To(BeEmpty()) }) - It("should return appropriate values SingleNodeWriter NoCapacity Type:Mount", func() { + It("should return snapshots that match the specify source volume id)", func() { By("creating a volume") - name := "sanity" - vol, err := c.CreateVolume( + volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-2") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + + By("creating a snapshot") + snapshotReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-2", volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) + Expect(err).NotTo(HaveOccurred()) + + snapshots, err := c.ListSnapshots( context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - }) + &csi.ListSnapshotsRequest{SourceVolumeId: snapshot.GetSnapshot().GetSourceVolumeId()}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + for _, snap := range snapshots.GetEntries() { + verifySnapshotInfo(snap.GetSnapshot()) + Expect(snap.GetSnapshot().GetSourceVolumeId()).To(Equal(snapshot.GetSnapshot().GetSourceVolumeId())) + } + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolumeInfo()).NotTo(BeNil()) - Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) - By("cleaning up deleting the volume") - _, err = c.DeleteVolume( + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return empty when the specify source volume id is not exist", func() { + + snapshots, err := c.ListSnapshots( context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - }) + &csi.ListSnapshotsRequest{SourceVolumeId: "none-exist-volume-id"}) Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(snapshots.GetEntries()).To(BeEmpty()) }) - // Pending fix in mock file - It("[MOCKERRORS] should return appropriate values SingleNodeWriter WithCapacity 1Gi Type:Mount", func() { + It("check the presence of new snapshots in the snapshot list", func() { + // List Snapshots before creating new snapshots. + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + + totalSnapshots := len(snapshots.GetEntries()) By("creating a volume") - name := "sanity" - size := uint64(1 * 1024 * 1024 * 1024) - vol, err := c.CreateVolume( + volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-3") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + + By("creating a snapshot") + snapReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-3", volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapReq) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshot).NotTo(BeNil()) + verifySnapshotInfo(snapshot.GetSnapshot()) + + snapshots, err = c.ListSnapshots( context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: size, - }, - }) + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(len(snapshots.GetEntries())).To(Equal(totalSnapshots + 1)) + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolumeInfo()).NotTo(BeNil()) - Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) - Expect(vol.GetVolumeInfo().GetCapacityBytes()).To(Equal(size)) By("cleaning up deleting the volume") - _, err = c.DeleteVolume( + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + + // List snapshots and check if the deleted snapshot exists in the snapshot list. + snapshots, err = c.ListSnapshots( context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - }) + &csi.ListSnapshotsRequest{}) Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(len(snapshots.GetEntries())).To(Equal(totalSnapshots)) }) -}) -var _ = Describe("DeleteVolume [Controller Server]", func() { - var ( - c csi.ControllerClient - ) + It("should return next token when a limited number of entries are requested", func() { + // minSnapshotCount is the minimum number of snapshots expected to exist, + // based on which paginated snapshot listing is performed. + minSnapshotCount := 5 + // maxEntried is the maximum entries in list snapshot request. + maxEntries := 2 + // currentTotalVols is the total number of volumes at a given time. It + // is used to verify that all the snapshots have been listed. + currentTotalSnapshots := 0 + + // Get the number of existing volumes. + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) - BeforeEach(func() { - c = csi.NewControllerClient(conn) + initialTotalSnapshots := len(snapshots.GetEntries()) + currentTotalSnapshots = initialTotalSnapshots - if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { - Skip("DeleteVolume not supported") - } - }) + createVols := make([]*csi.Volume, 0) + createSnapshots := make([]*csi.Snapshot, 0) - It("should fail when no version is provided", func() { + // Ensure minimum minVolCount volumes exist. + if initialTotalSnapshots < minSnapshotCount { - _, err := c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{}) - Expect(err).To(HaveOccurred()) + By("creating required new volumes") + requiredSnapshots := minSnapshotCount - initialTotalSnapshots - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + for i := 1; i <= requiredSnapshots; i++ { + volReq := MakeCreateVolumeReq(sc, "volume"+strconv.Itoa(i)) + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + Expect(volume).NotTo(BeNil()) + createVols = append(createVols, volume.GetVolume()) - It("should fail when no volume id is provided", func() { + snapReq := MakeCreateSnapshotReq(sc, "snapshot"+strconv.Itoa(i), volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapReq) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshot).NotTo(BeNil()) + verifySnapshotInfo(snapshot.GetSnapshot()) + createSnapshots = append(createSnapshots, snapshot.GetSnapshot()) + } + + // Update the current total snapshots count. + currentTotalSnapshots += requiredSnapshots + } - _, err := c.DeleteVolume( + // Request list snapshots with max entries maxEntries. + snapshots, err = c.ListSnapshots( context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, + &csi.ListSnapshotsRequest{ + MaxEntries: int32(maxEntries), }) - Expect(err).To(HaveOccurred()) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + nextToken := snapshots.GetNextToken() - It("should succeed when an invalid volume id is used", func() { + Expect(len(snapshots.GetEntries())).To(Equal(maxEntries)) - _, err := c.DeleteVolume( + // Request list snapshots with starting_token and no max entries. + snapshots, err = c.ListSnapshots( context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: "reallyfakevolumeid", + &csi.ListSnapshotsRequest{ + StartingToken: nextToken, }) Expect(err).NotTo(HaveOccurred()) - }) + Expect(snapshots).NotTo(BeNil()) - It("should return appropriate values (no optional values added)", func() { + // Ensure that all the remaining entries are returned at once. + Expect(len(snapshots.GetEntries())).To(Equal(currentTotalSnapshots - maxEntries)) - // Create Volume First - By("creating a volume") - name := "sanity" - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - }) + if initialTotalSnapshots < minSnapshotCount { - Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolumeInfo()).NotTo(BeNil()) - Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) + By("cleaning up deleting the snapshots") - // Delete Volume - By("deleting a volume") - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - }) - Expect(err).NotTo(HaveOccurred()) + for _, snap := range createSnapshots { + delSnapReq := MakeDeleteSnapshotReq(sc, snap.GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + } + + By("cleaning up deleting the volumes") + + for _, vol := range createVols { + delVolReq := MakeDeleteVolumeReq(sc, vol.GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + } + } }) + }) -var _ = Describe("ValidateVolumeCapabilities [Controller Server]", func() { +var _ = DescribeSanity("DeleteSnapshot [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) - }) + c = csi.NewControllerClient(sc.Conn) - It("should fail when no version is provided", func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT) { + Skip("DeleteSnapshot not supported") + } + }) - _, err := c.ValidateVolumeCapabilities( - context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{}) - Expect(err).To(HaveOccurred()) + It("should fail when no snapshot id is provided", func() { - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + req := &csi.DeleteSnapshotRequest{} - It("should fail when no volume id is provided", func() { + if sc.Secrets != nil { + req.Secrets = sc.Secrets.DeleteSnapshotSecret + } - _, err := c.ValidateVolumeCapabilities( - context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{ - Version: csiClientVersion, - }) + _, err := c.DeleteSnapshot(context.Background(), req) Expect(err).To(HaveOccurred()) serverError, ok := status.FromError(err) @@ -445,351 +1448,252 @@ var _ = Describe("ValidateVolumeCapabilities [Controller Server]", func() { Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should fail when no volume capabilities are provided", func() { - - _, err := c.ValidateVolumeCapabilities( - context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{ - Version: csiClientVersion, - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) + It("should succeed when an invalid snapshot id is used", func() { - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + req := MakeDeleteSnapshotReq(sc, "reallyfakesnapshotid") + _, err := c.DeleteSnapshot(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) }) It("should return appropriate values (no optional values added)", func() { - // Create Volume First - By("creating a single node writer volume") - name := "sanity" - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - }) + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "DeleteSnapshot-volume-1") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + // Create Snapshot First + By("creating a snapshot") + snapshotReq := MakeCreateSnapshotReq(sc, "DeleteSnapshot-snapshot-1", volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolumeInfo()).NotTo(BeNil()) - Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) + Expect(snapshot).NotTo(BeNil()) + verifySnapshotInfo(snapshot.GetSnapshot()) - // ValidateVolumeCapabilities - By("validating volume capabilities") - valivolcap, err := c.ValidateVolumeCapabilities( - context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - }) + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) Expect(err).NotTo(HaveOccurred()) - Expect(valivolcap).NotTo(BeNil()) - Expect(valivolcap.GetSupported()).To(BeTrue()) By("cleaning up deleting the volume") - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - }) + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) Expect(err).NotTo(HaveOccurred()) }) }) -var _ = Describe("ControllerPublishVolume [Controller Server]", func() { +var _ = DescribeSanity("CreateSnapshot [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient - n csi.NodeClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) - n = csi.NewNodeClient(conn) + c = csi.NewControllerClient(sc.Conn) - if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { - Skip("ControllerPublishVolume not supported") + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT) { + Skip("CreateSnapshot not supported") } }) - It("should fail when no version is provided", func() { - - _, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{}) - Expect(err).To(HaveOccurred()) + It("should fail when no name is provided", func() { - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + req := &csi.CreateSnapshotRequest{ + SourceVolumeId: "testId", + } - It("should fail when no volume id is provided", func() { + if sc.Secrets != nil { + req.Secrets = sc.Secrets.CreateSnapshotSecret + } - _, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - Version: csiClientVersion, - }) + _, err := c.CreateSnapshot(context.Background(), req) Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should fail when no node id is provided", func() { - - _, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) + It("should fail when no source volume id is provided", func() { - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + req := &csi.CreateSnapshotRequest{ + Name: "name", + } - It("should fail when no volume capability is provided", func() { + if sc.Secrets != nil { + req.Secrets = sc.Secrets.CreateSnapshotSecret + } - _, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: "id", - NodeId: "fakenode", - }) + _, err := c.CreateSnapshot(context.Background(), req) Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should return appropriate values (no optional values added)", func() { + It("should not fail when requesting to create a snapshot with already existing name and same SourceVolumeId.", func() { - // Create Volume First - By("creating a single node writer volume") - name := "sanity" - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - }) + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "CreateSnapshot-volume-1") + volume, err := c.CreateVolume(context.Background(), volReq) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolumeInfo()).NotTo(BeNil()) - Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) - By("getting a node id") - nid, err := n.GetNodeID( - context.Background(), - &csi.GetNodeIDRequest{ - Version: csiClientVersion, - }) + By("creating a snapshot") + snapReq1 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-1", volume.GetVolume().GetVolumeId(), nil) + snap1, err := c.CreateSnapshot(context.Background(), snapReq1) Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) + Expect(snap1).NotTo(BeNil()) + verifySnapshotInfo(snap1.GetSnapshot()) - // ControllerPublishVolume - By("calling controllerpublish on that volume") - conpubvol, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - NodeId: nid.GetNodeId(), - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - Readonly: false, - }) + snap2, err := c.CreateSnapshot(context.Background(), snapReq1) Expect(err).NotTo(HaveOccurred()) - Expect(conpubvol).NotTo(BeNil()) + Expect(snap2).NotTo(BeNil()) + verifySnapshotInfo(snap2.GetSnapshot()) - By("cleaning up unpublishing the volume") - conunpubvol, err := c.ControllerUnpublishVolume( - context.Background(), - &csi.ControllerUnpublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - // NodeID is optional in ControllerUnpublishVolume - NodeId: nid.GetNodeId(), - }) + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snap1.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) Expect(err).NotTo(HaveOccurred()) - Expect(conunpubvol).NotTo(BeNil()) By("cleaning up deleting the volume") - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - }) + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) Expect(err).NotTo(HaveOccurred()) }) -}) -var _ = Describe("ControllerUnpublishVolume [Controller Server]", func() { - var ( - c csi.ControllerClient - n csi.NodeClient - ) + It("should fail when requesting to create a snapshot with already existing name and different SourceVolumeId.", func() { - BeforeEach(func() { - c = csi.NewControllerClient(conn) - n = csi.NewNodeClient(conn) + By("creating a volume") + volume, err := c.CreateVolume(context.Background(), MakeCreateVolumeReq(sc, "CreateSnapshot-volume-2")) + Expect(err).ToNot(HaveOccurred()) - if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { - Skip("ControllerUnpublishVolume not supported") - } - }) + By("creating a snapshot with the created volume source id") + req1 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-2", volume.GetVolume().GetVolumeId(), nil) + snap1, err := c.CreateSnapshot(context.Background(), req1) + Expect(err).NotTo(HaveOccurred()) + Expect(snap1).NotTo(BeNil()) + verifySnapshotInfo(snap1.GetSnapshot()) - It("should fail when no version is provided", func() { + volume2, err := c.CreateVolume(context.Background(), MakeCreateVolumeReq(sc, "CreateSnapshot-volume-3")) + Expect(err).ToNot(HaveOccurred()) - _, err := c.ControllerUnpublishVolume( - context.Background(), - &csi.ControllerUnpublishVolumeRequest{}) + By("creating a snapshot with the same name but different volume source id") + req2 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-2", volume2.GetVolume().GetVolumeId(), nil) + _, err = c.CreateSnapshot(context.Background(), req2) Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) - - It("should fail when no volume id is provided", func() { + Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) - _, err := c.ControllerUnpublishVolume( - context.Background(), - &csi.ControllerUnpublishVolumeRequest{ - Version: csiClientVersion, - }) - Expect(err).To(HaveOccurred()) + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snap1.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) }) - It("should return appropriate values (no optional values added)", func() { + It("should not fail when creating snapshot with maximum-length name", func() { - // Create Volume First - By("creating a single node writer volume") - name := "sanity" - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - }) + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "CreateSnapshot-volume-3") + volume, err := c.CreateVolume(context.Background(), volReq) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolumeInfo()).NotTo(BeNil()) - Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) - By("getting a node id") - nid, err := n.GetNodeID( - context.Background(), - &csi.GetNodeIDRequest{ - Version: csiClientVersion, - }) + nameBytes := make([]byte, MaxNameLength) + for i := 0; i < MaxNameLength; i++ { + nameBytes[i] = 'a' + } + name := string(nameBytes) + + By("creating a snapshot") + snapReq1 := MakeCreateSnapshotReq(sc, name, volume.GetVolume().GetVolumeId(), nil) + snap1, err := c.CreateSnapshot(context.Background(), snapReq1) Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) + Expect(snap1).NotTo(BeNil()) + verifySnapshotInfo(snap1.GetSnapshot()) - // ControllerPublishVolume - By("calling controllerpublish on that volume") - conpubvol, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - NodeId: nid.GetNodeId(), - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - Readonly: false, - }) + snap2, err := c.CreateSnapshot(context.Background(), snapReq1) Expect(err).NotTo(HaveOccurred()) - Expect(conpubvol).NotTo(BeNil()) + Expect(snap2).NotTo(BeNil()) + verifySnapshotInfo(snap2.GetSnapshot()) - // ControllerUnpublishVolume - By("calling controllerunpublish on that volume") - conunpubvol, err := c.ControllerUnpublishVolume( - context.Background(), - &csi.ControllerUnpublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - // NodeID is optional in ControllerUnpublishVolume - NodeId: nid.GetNodeId(), - }) + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snap1.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) Expect(err).NotTo(HaveOccurred()) - Expect(conunpubvol).NotTo(BeNil()) By("cleaning up deleting the volume") - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - }) + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) Expect(err).NotTo(HaveOccurred()) }) }) + +func MakeCreateVolumeReq(sc *SanityContext, name string) *csi.CreateVolumeRequest { + size1 := TestVolumeSize(sc) + + req := &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size1, + LimitBytes: size1, + }, + Parameters: sc.Config.TestVolumeParameters, + } + + if sc.Secrets != nil { + req.Secrets = sc.Secrets.CreateVolumeSecret + } + + return req +} + +func MakeCreateSnapshotReq(sc *SanityContext, name, sourceVolumeId string, parameters map[string]string) *csi.CreateSnapshotRequest { + req := &csi.CreateSnapshotRequest{ + Name: name, + SourceVolumeId: sourceVolumeId, + Parameters: parameters, + } + + if sc.Secrets != nil { + req.Secrets = sc.Secrets.CreateSnapshotSecret + } + + return req +} + +func MakeDeleteSnapshotReq(sc *SanityContext, id string) *csi.DeleteSnapshotRequest { + delSnapReq := &csi.DeleteSnapshotRequest{ + SnapshotId: id, + } + + if sc.Secrets != nil { + delSnapReq.Secrets = sc.Secrets.DeleteSnapshotSecret + } + + return delSnapReq +} + +func MakeDeleteVolumeReq(sc *SanityContext, id string) *csi.DeleteVolumeRequest { + delVolReq := &csi.DeleteVolumeRequest{ + VolumeId: id, + } + + if sc.Secrets != nil { + delVolReq.Secrets = sc.Secrets.DeleteVolumeSecret + } + + return delVolReq +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go index facdf39d..c1a5eb7e 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go @@ -17,86 +17,83 @@ limitations under the License. package sanity import ( + "context" + "fmt" "regexp" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/container-storage-interface/spec/lib/go/csi" - context "golang.org/x/net/context" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var ( - csiClientVersion = &csi.Version{ - Major: 0, - Minor: 1, - Patch: 0, - } -) - -var _ = Describe("GetSupportedVersions [Identity Server]", func() { +var _ = DescribeSanity("Identity Service", func(sc *SanityContext) { var ( c csi.IdentityClient ) BeforeEach(func() { - c = csi.NewIdentityClient(conn) + c = csi.NewIdentityClient(sc.Conn) }) - It("should return an array of supported versions", func() { - res, err := c.GetSupportedVersions( - context.Background(), - &csi.GetSupportedVersionsRequest{}) - - By("checking response to have supported versions list") - Expect(err).NotTo(HaveOccurred()) - Expect(res.GetSupportedVersions()).NotTo(BeNil()) - Expect(len(res.GetSupportedVersions()) >= 1).To(BeTrue()) - - By("checking each version") - for _, version := range res.GetSupportedVersions() { - Expect(version).NotTo(BeNil()) - Expect(version.GetMajor()).To(BeNumerically("<", 100)) - Expect(version.GetMinor()).To(BeNumerically("<", 100)) - Expect(version.GetPatch()).To(BeNumerically("<", 100)) - } - }) -}) - -var _ = Describe("GetPluginInfo [Identity Server]", func() { - var ( - c csi.IdentityClient - ) + Describe("GetPluginCapabilities", func() { + It("should return appropriate capabilities", func() { + req := &csi.GetPluginCapabilitiesRequest{} + res, err := c.GetPluginCapabilities(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeNil()) + + By("checking successful response") + Expect(res.GetCapabilities()).NotTo(BeNil()) + for _, cap := range res.GetCapabilities() { + switch cap.GetService().GetType() { + case csi.PluginCapability_Service_CONTROLLER_SERVICE: + case csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS: + default: + Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetService().GetType())) + } + } + + }) - BeforeEach(func() { - c = csi.NewIdentityClient(conn) }) - It("should fail when no version is provided", func() { - _, err := c.GetPluginInfo(context.Background(), &csi.GetPluginInfoRequest{}) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + Describe("Probe", func() { + It("should return appropriate information", func() { + req := &csi.ProbeRequest{} + res, err := c.Probe(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeNil()) + + By("verifying return status") + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code() == codes.FailedPrecondition || + serverError.Code() == codes.OK).To(BeTrue()) + + if res.GetReady() != nil { + Expect(res.GetReady().GetValue() == true || + res.GetReady().GetValue() == false).To(BeTrue()) + } + }) }) - It("should return appropriate information", func() { - req := &csi.GetPluginInfoRequest{ - Version: csiClientVersion, - } - res, err := c.GetPluginInfo(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(res).NotTo(BeNil()) - - By("verifying name size and characters") - Expect(res.GetName()).ToNot(HaveLen(0)) - Expect(len(res.GetName())).To(BeNumerically("<=", 63)) - Expect(regexp. - MustCompile("^[a-zA-Z][A-Za-z0-9-\\.\\_]{0,61}[a-zA-Z]$"). - MatchString(res.GetName())).To(BeTrue()) + Describe("GetPluginInfo", func() { + It("should return appropriate information", func() { + req := &csi.GetPluginInfoRequest{} + res, err := c.GetPluginInfo(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeNil()) + + By("verifying name size and characters") + Expect(res.GetName()).ToNot(HaveLen(0)) + Expect(len(res.GetName())).To(BeNumerically("<=", 63)) + Expect(regexp. + MustCompile("^[a-zA-Z][A-Za-z0-9-\\.\\_]{0,61}[a-zA-Z]$"). + MatchString(res.GetName())).To(BeTrue()) + }) }) }) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go index 2d4734df..9bd9194b 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go @@ -17,215 +17,237 @@ limitations under the License. package sanity import ( + "context" "fmt" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/container-storage-interface/spec/lib/go/csi" - context "golang.org/x/net/context" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var ( - csiTargetPath = "/mnt/csi" -) - -var _ = Describe("NodeGetCapabilities [Node Server]", func() { - var ( - c csi.NodeClient - ) - - BeforeEach(func() { - c = csi.NewNodeClient(conn) - }) - - It("should fail when no version is provided", func() { - _, err := c.NodeGetCapabilities( - context.Background(), - &csi.NodeGetCapabilitiesRequest{}) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) - - It("should return appropriate capabilities", func() { - caps, err := c.NodeGetCapabilities( - context.Background(), - &csi.NodeGetCapabilitiesRequest{ - Version: csiClientVersion, - }) +func isNodeCapabilitySupported(c csi.NodeClient, + capType csi.NodeServiceCapability_RPC_Type, +) bool { - By("checking successful response") - Expect(err).NotTo(HaveOccurred()) - Expect(caps).NotTo(BeNil()) - Expect(caps.GetCapabilities()).NotTo(BeNil()) - - for _, cap := range caps.GetCapabilities() { - Expect(cap.GetRpc()).NotTo(BeNil()) + caps, err := c.NodeGetCapabilities( + context.Background(), + &csi.NodeGetCapabilitiesRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) - switch cap.GetRpc().GetType() { - case csi.NodeServiceCapability_RPC_UNKNOWN: - default: - Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) - } + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetRpc()).NotTo(BeNil()) + if cap.GetRpc().GetType() == capType { + return true } - }) -}) + } + return false +} + +func isPluginCapabilitySupported(c csi.IdentityClient, + capType csi.PluginCapability_Service_Type, +) bool { + + caps, err := c.GetPluginCapabilities( + context.Background(), + &csi.GetPluginCapabilitiesRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) + Expect(caps.GetCapabilities()).NotTo(BeNil()) + + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetService()).NotTo(BeNil()) + if cap.GetService().GetType() == capType { + return true + } + } + return false +} -var _ = Describe("NodeProbe [Node Server]", func() { +var _ = DescribeSanity("Node Service", func(sc *SanityContext) { var ( - c csi.NodeClient + cl *Cleanup + c csi.NodeClient + s csi.ControllerClient + + controllerPublishSupported bool + nodeStageSupported bool ) BeforeEach(func() { - c = csi.NewNodeClient(conn) - }) - - It("should fail when no version is provided", func() { - _, err := c.NodeProbe( - context.Background(), - &csi.NodeProbeRequest{}) - Expect(err).To(HaveOccurred()) + c = csi.NewNodeClient(sc.Conn) + s = csi.NewControllerClient(sc.Conn) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + controllerPublishSupported = isControllerCapabilitySupported( + s, + csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) + nodeStageSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME) + if nodeStageSupported { + err := createMountTargetLocation(sc.Config.StagingPath) + Expect(err).NotTo(HaveOccurred()) + } + cl = &Cleanup{ + Context: sc, + NodeClient: c, + ControllerClient: s, + ControllerPublishSupported: controllerPublishSupported, + NodeStageSupported: nodeStageSupported, + } }) - It("should return appropriate values", func() { - pro, err := c.NodeProbe( - context.Background(), - &csi.NodeProbeRequest{ - Version: csiClientVersion, - }) - - Expect(err).NotTo(HaveOccurred()) - Expect(pro).NotTo(BeNil()) + AfterEach(func() { + cl.DeleteVolumes() }) -}) -var _ = Describe("GetNodeID [Node Server]", func() { - var ( - c csi.NodeClient - ) + Describe("NodeGetCapabilities", func() { + It("should return appropriate capabilities", func() { + caps, err := c.NodeGetCapabilities( + context.Background(), + &csi.NodeGetCapabilitiesRequest{}) - BeforeEach(func() { - c = csi.NewNodeClient(conn) + By("checking successful response") + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) + + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetRpc()).NotTo(BeNil()) + + switch cap.GetRpc().GetType() { + case csi.NodeServiceCapability_RPC_UNKNOWN: + case csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME: + case csi.NodeServiceCapability_RPC_GET_VOLUME_STATS: + default: + Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) + } + } + }) }) - It("should fail when no version is provided", func() { - _, err := c.GetNodeID( - context.Background(), - &csi.GetNodeIDRequest{}) - Expect(err).To(HaveOccurred()) + Describe("NodeGetInfo", func() { + var ( + i csi.IdentityClient + accessibilityConstraintSupported bool + ) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) - - It("should return appropriate values", func() { - nid, err := c.GetNodeID( - context.Background(), - &csi.GetNodeIDRequest{ - Version: csiClientVersion, - }) + BeforeEach(func() { + i = csi.NewIdentityClient(sc.Conn) + accessibilityConstraintSupported = isPluginCapabilitySupported(i, csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS) + }) - Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) - }) -}) + It("should return approproate values", func() { + ninfo, err := c.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) -var _ = Describe("NodePublishVolume [Node Server]", func() { - var ( - s csi.ControllerClient - c csi.NodeClient - controllerPublishSupported bool - ) + Expect(err).NotTo(HaveOccurred()) + Expect(ninfo).NotTo(BeNil()) + Expect(ninfo.GetNodeId()).NotTo(BeEmpty()) + Expect(ninfo.GetMaxVolumesPerNode()).NotTo(BeNumerically("<", 0)) - BeforeEach(func() { - s = csi.NewControllerClient(conn) - c = csi.NewNodeClient(conn) - controllerPublishSupported = isCapabilitySupported( - s, - csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) + if accessibilityConstraintSupported { + Expect(ninfo.GetAccessibleTopology()).NotTo(BeNil()) + } + }) }) - It("should fail when no version is provided", func() { + Describe("NodePublishVolume", func() { + It("should fail when no volume id is provided", func() { + _, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + Secrets: sc.Secrets.NodePublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - _, err := c.NodePublishVolume( - context.Background(), - &csi.NodePublishVolumeRequest{}) - Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + It("should fail when no target path is provided", func() { + _, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + VolumeId: "id", + Secrets: sc.Secrets.NodePublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - It("should fail when no volume id is provided", func() { + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - _, err := c.NodePublishVolume( - context.Background(), - &csi.NodePublishVolumeRequest{ - Version: csiClientVersion, - }) - Expect(err).To(HaveOccurred()) + It("should fail when no volume capability is provided", func() { + _, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + VolumeId: "id", + TargetPath: sc.Config.TargetPath, + Secrets: sc.Secrets.NodePublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) }) - It("should fail when no target path is provided", func() { + Describe("NodeUnpublishVolume", func() { + It("should fail when no volume id is provided", func() { - _, err := c.NodePublishVolume( - context.Background(), - &csi.NodePublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) + _, err := c.NodeUnpublishVolume( + context.Background(), + &csi.NodeUnpublishVolumeRequest{}) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - It("should fail when no volume capability is provided", func() { + It("should fail when no target path is provided", func() { - _, err := c.NodePublishVolume( - context.Background(), - &csi.NodePublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: "id", - TargetPath: csiTargetPath, - }) - Expect(err).To(HaveOccurred()) + _, err := c.NodeUnpublishVolume( + context.Background(), + &csi.NodeUnpublishVolumeRequest{ + VolumeId: "id", + }) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) }) - It("should return appropriate values (no optional values added)", func() { + Describe("NodeStageVolume", func() { + var ( + device string + ) - // Create Volume First - By("creating a single node writer volume") - name := "sanity" - vol, err := s.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ + BeforeEach(func() { + if !nodeStageSupported { + Skip("NodeStageVolume not supported") + } + + device = "/dev/mock" + }) + + It("should fail when no volume id is provided", func() { + _, err := c.NodeStageVolume( + context.Background(), + &csi.NodeStageVolumeRequest{ + StagingTargetPath: sc.Config.StagingPath, + VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, }, @@ -233,32 +255,24 @@ var _ = Describe("NodePublishVolume [Node Server]", func() { Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, + PublishContext: map[string]string{ + "device": device, + }, + Secrets: sc.Secrets.NodeStageVolumeSecret, }, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolumeInfo()).NotTo(BeNil()) - Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) + ) + Expect(err).To(HaveOccurred()) - By("getting a node id") - nid, err := c.GetNodeID( - context.Background(), - &csi.GetNodeIDRequest{ - Version: csiClientVersion, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - var conpubvol *csi.ControllerPublishVolumeResponse - if controllerPublishSupported { - By("controller publishing volume") - conpubvol, err = s.ControllerPublishVolume( + It("should fail when no staging target path is provided", func() { + _, err := c.NodeStageVolume( context.Background(), - &csi.ControllerPublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - NodeId: nid.GetNodeId(), + &csi.NodeStageVolumeRequest{ + VolumeId: "id", VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, @@ -267,138 +281,86 @@ var _ = Describe("NodePublishVolume [Node Server]", func() { Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, - Readonly: false, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(conpubvol).NotTo(BeNil()) - } - - // NodePublishVolume - By("publishing the volume on a node") - nodepubvolRequest := &csi.NodePublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - TargetPath: csiTargetPath, - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + PublishContext: map[string]string{ + "device": device, + }, + Secrets: sc.Secrets.NodeStageVolumeSecret, }, - }, - } - if controllerPublishSupported { - nodepubvolRequest.PublishVolumeInfo = conpubvol.GetPublishVolumeInfo() - } - nodepubvol, err := c.NodePublishVolume(context.Background(), nodepubvolRequest) - Expect(err).NotTo(HaveOccurred()) - Expect(nodepubvol).NotTo(BeNil()) + ) + Expect(err).To(HaveOccurred()) - // NodeUnpublishVolume - By("cleaning up calling nodeunpublish") - nodeunpubvol, err := c.NodeUnpublishVolume( - context.Background(), - &csi.NodeUnpublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - TargetPath: csiTargetPath, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(nodeunpubvol).NotTo(BeNil()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - if controllerPublishSupported { - By("cleaning up calling controllerunpublishing the volume") - nodeunpubvol, err := c.NodeUnpublishVolume( + It("should fail when no volume capability is provided", func() { + _, err := c.NodeStageVolume( context.Background(), - &csi.NodeUnpublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - TargetPath: csiTargetPath, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(nodeunpubvol).NotTo(BeNil()) - } - - By("cleaning up deleting the volume") - _, err = s.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - }) - Expect(err).NotTo(HaveOccurred()) - }) -}) - -var _ = Describe("NodeUnpublishVolume [Node Server]", func() { - var ( - s csi.ControllerClient - c csi.NodeClient - controllerPublishSupported bool - ) + &csi.NodeStageVolumeRequest{ + VolumeId: "id", + StagingTargetPath: sc.Config.StagingPath, + PublishContext: map[string]string{ + "device": device, + }, + Secrets: sc.Secrets.NodeStageVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - BeforeEach(func() { - s = csi.NewControllerClient(conn) - c = csi.NewNodeClient(conn) - controllerPublishSupported = isCapabilitySupported( - s, - csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) }) - It("should fail when no version is provided", func() { - - _, err := c.NodeUnpublishVolume( - context.Background(), - &csi.NodeUnpublishVolumeRequest{}) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + Describe("NodeUnstageVolume", func() { + BeforeEach(func() { + if !nodeStageSupported { + Skip("NodeUnstageVolume not supported") + } + }) - It("should fail when no volume id is provided", func() { + It("should fail when no volume id is provided", func() { - _, err := c.NodeUnpublishVolume( - context.Background(), - &csi.NodeUnpublishVolumeRequest{ - Version: csiClientVersion, - }) - Expect(err).To(HaveOccurred()) + _, err := c.NodeUnstageVolume( + context.Background(), + &csi.NodeUnstageVolumeRequest{ + StagingTargetPath: sc.Config.StagingPath, + }) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - It("should fail when no target path is provided", func() { + It("should fail when no staging target path is provided", func() { - _, err := c.NodeUnpublishVolume( - context.Background(), - &csi.NodeUnpublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) + _, err := c.NodeUnstageVolume( + context.Background(), + &csi.NodeUnstageVolumeRequest{ + VolumeId: "id", + }) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) }) - It("should return appropriate values (no optional values added)", func() { + It("should work", func() { + name := uniqueString("sanity-node-full") // Create Volume First By("creating a single node writer volume") - name := "sanity" vol, err := s.CreateVolume( context.Background(), &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: name, + Name: name, VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ + { AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, }, @@ -407,22 +369,32 @@ var _ = Describe("NodeUnpublishVolume [Node Server]", func() { }, }, }, - }) + Secrets: sc.Secrets.CreateVolumeSecret, + }, + ) Expect(err).NotTo(HaveOccurred()) Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolumeInfo()).NotTo(BeNil()) - Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + By("getting a node id") + nid, err := c.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) - // ControllerPublishVolume var conpubvol *csi.ControllerPublishVolumeResponse if controllerPublishSupported { - By("calling controllerpublish on the volume") + By("controller publishing volume") + conpubvol, err = s.ControllerPublishVolume( context.Background(), &csi.ControllerPublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - NodeId: "foobar", + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, @@ -431,65 +403,115 @@ var _ = Describe("NodeUnpublishVolume [Node Server]", func() { Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, - Readonly: false, - }) + VolumeContext: vol.GetVolume().GetVolumeContext(), + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) Expect(err).NotTo(HaveOccurred()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: nid.GetNodeId()}) Expect(conpubvol).NotTo(BeNil()) } - + // NodeStageVolume + if nodeStageSupported { + By("node staging volume") + nodestagevol, err := c.NodeStageVolume( + context.Background(), + &csi.NodeStageVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + StagingTargetPath: sc.Config.StagingPath, + VolumeContext: vol.GetVolume().GetVolumeContext(), + PublishContext: conpubvol.GetPublishContext(), + Secrets: sc.Secrets.NodeStageVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(nodestagevol).NotTo(BeNil()) + } // NodePublishVolume By("publishing the volume on a node") - nodepubvolRequest := &csi.NodePublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - TargetPath: csiTargetPath, - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + var stagingPath string + if nodeStageSupported { + stagingPath = sc.Config.StagingPath + } + nodepubvol, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + TargetPath: sc.Config.TargetPath, + StagingTargetPath: stagingPath, + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, }, + VolumeContext: vol.GetVolume().GetVolumeContext(), + PublishContext: conpubvol.GetPublishContext(), + Secrets: sc.Secrets.NodePublishVolumeSecret, }, - } - if controllerPublishSupported { - nodepubvolRequest.PublishVolumeInfo = conpubvol.GetPublishVolumeInfo() - } - nodepubvol, err := c.NodePublishVolume(context.Background(), nodepubvolRequest) + ) Expect(err).NotTo(HaveOccurred()) Expect(nodepubvol).NotTo(BeNil()) // NodeUnpublishVolume + By("cleaning up calling nodeunpublish") nodeunpubvol, err := c.NodeUnpublishVolume( context.Background(), &csi.NodeUnpublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - TargetPath: csiTargetPath, + VolumeId: vol.GetVolume().GetVolumeId(), + TargetPath: sc.Config.TargetPath, }) Expect(err).NotTo(HaveOccurred()) Expect(nodeunpubvol).NotTo(BeNil()) + if nodeStageSupported { + By("cleaning up calling nodeunstage") + nodeunstagevol, err := c.NodeUnstageVolume( + context.Background(), + &csi.NodeUnstageVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + StagingTargetPath: sc.Config.StagingPath, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(nodeunstagevol).NotTo(BeNil()) + } + if controllerPublishSupported { - By("cleaning up unpublishing the volume") - nodeunpubvol, err := c.NodeUnpublishVolume( + By("cleaning up calling controllerunpublishing") + + controllerunpubvol, err := s.ControllerUnpublishVolume( context.Background(), - &csi.NodeUnpublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - TargetPath: csiTargetPath, - }) + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) Expect(err).NotTo(HaveOccurred()) - Expect(nodeunpubvol).NotTo(BeNil()) + Expect(controllerunpubvol).NotTo(BeNil()) } By("cleaning up deleting the volume") + _, err = s.DeleteVolume( context.Background(), &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - }) + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) Expect(err).NotTo(HaveOccurred()) }) }) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go index ecf88b19..e3c1684e 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go @@ -17,10 +17,14 @@ limitations under the License. package sanity import ( - "sync" + "crypto/rand" + "fmt" + "io/ioutil" + "os" "testing" "github.com/kubernetes-csi/csi-test/utils" + yaml "gopkg.in/yaml.v2" "google.golang.org/grpc" @@ -28,28 +32,163 @@ import ( . "github.com/onsi/gomega" ) -var ( - driverAddress string - conn *grpc.ClientConn - lock sync.Mutex -) +// CSISecrets consists of secrets used in CSI credentials. +type CSISecrets struct { + CreateVolumeSecret map[string]string `yaml:"CreateVolumeSecret"` + DeleteVolumeSecret map[string]string `yaml:"DeleteVolumeSecret"` + ControllerPublishVolumeSecret map[string]string `yaml:"ControllerPublishVolumeSecret"` + ControllerUnpublishVolumeSecret map[string]string `yaml:"ControllerUnpublishVolumeSecret"` + NodeStageVolumeSecret map[string]string `yaml:"NodeStageVolumeSecret"` + NodePublishVolumeSecret map[string]string `yaml:"NodePublishVolumeSecret"` + CreateSnapshotSecret map[string]string `yaml:"CreateSnapshotSecret"` + DeleteSnapshotSecret map[string]string `yaml:"DeleteSnapshotSecret"` +} + +// Config provides the configuration for the sanity tests. It +// needs to be initialized by the user of the sanity package. +type Config struct { + TargetPath string + StagingPath string + Address string + SecretsFile string + + TestVolumeSize int64 + TestVolumeParametersFile string + TestVolumeParameters map[string]string +} + +// SanityContext holds the variables that each test can depend on. It +// gets initialized before each test block runs. +type SanityContext struct { + Config *Config + Conn *grpc.ClientConn + Secrets *CSISecrets + + connAddress string +} -// Test will test the CSI driver at the specified address -func Test(t *testing.T, address string) { - lock.Lock() - defer lock.Unlock() +// Test will test the CSI driver at the specified address by +// setting up a Ginkgo suite and running it. +func Test(t *testing.T, reqConfig *Config) { + path := reqConfig.TestVolumeParametersFile + if len(path) != 0 { + yamlFile, err := ioutil.ReadFile(path) + if err != nil { + panic(fmt.Sprintf("failed to read file %q: %v", path, err)) + } + err = yaml.Unmarshal(yamlFile, &reqConfig.TestVolumeParameters) + if err != nil { + panic(fmt.Sprintf("error unmarshaling yaml: %v", err)) + } + } - driverAddress = address + sc := &SanityContext{ + Config: reqConfig, + } + + registerTestsInGinkgo(sc) RegisterFailHandler(Fail) RunSpecs(t, "CSI Driver Test Suite") } -var _ = BeforeSuite(func() { +func GinkgoTest(reqConfig *Config) { + sc := &SanityContext{ + Config: reqConfig, + } + + registerTestsInGinkgo(sc) +} + +func (sc *SanityContext) setup() { var err error - conn, err = utils.Connect(driverAddress) + + if len(sc.Config.SecretsFile) > 0 { + sc.Secrets, err = loadSecrets(sc.Config.SecretsFile) + Expect(err).NotTo(HaveOccurred()) + } else { + sc.Secrets = &CSISecrets{} + } + + // It is possible that a test sets sc.Config.Address + // dynamically (and differently!) in a BeforeEach, so only + // reuse the connection if the address is still the same. + if sc.Conn == nil || sc.connAddress != sc.Config.Address { + By("connecting to CSI driver") + sc.Conn, err = utils.Connect(sc.Config.Address) + Expect(err).NotTo(HaveOccurred()) + sc.connAddress = sc.Config.Address + } else { + By(fmt.Sprintf("reusing connection to CSI driver at %s", sc.connAddress)) + } + + By("creating mount and staging directories") + err = createMountTargetLocation(sc.Config.TargetPath) Expect(err).NotTo(HaveOccurred()) -}) + if len(sc.Config.StagingPath) > 0 { + err = createMountTargetLocation(sc.Config.StagingPath) + Expect(err).NotTo(HaveOccurred()) + } +} + +func (sc *SanityContext) teardown() { + // We intentionally do not close the connection to the CSI + // driver here because the large amount of connection attempts + // caused test failures + // (https://github.com/kubernetes-csi/csi-test/issues/101). We + // could fix this with retries + // (https://github.com/kubernetes-csi/csi-test/pull/97) but + // that requires more discussion, so instead we just connect + // once per process instead of once per test case. This was + // also said to be faster + // (https://github.com/kubernetes-csi/csi-test/pull/98). +} + +func createMountTargetLocation(targetPath string) error { + fileInfo, err := os.Stat(targetPath) + if err != nil && os.IsNotExist(err) { + return os.MkdirAll(targetPath, 0755) + } else if err != nil { + return err + } + if !fileInfo.IsDir() { + return fmt.Errorf("Target location %s is not a directory", targetPath) + } + + return nil +} + +func loadSecrets(path string) (*CSISecrets, error) { + var creds CSISecrets + + yamlFile, err := ioutil.ReadFile(path) + if err != nil { + return &creds, fmt.Errorf("failed to read file %q: #%v", path, err) + } -var _ = AfterSuite(func() { - conn.Close() -}) + err = yaml.Unmarshal(yamlFile, &creds) + if err != nil { + return &creds, fmt.Errorf("error unmarshaling yaml: #%v", err) + } + + return &creds, nil +} + +var uniqueSuffix = "-" + pseudoUUID() + +// pseudoUUID returns a unique string generated from random +// bytes, empty string in case of error. +func pseudoUUID() string { + b := make([]byte, 8) + if _, err := rand.Read(b); err != nil { + // Shouldn't happen?! + return "" + } + return fmt.Sprintf("%08X-%08X", b[0:4], b[4:8]) +} + +// uniqueString returns a unique string by appending a random +// number. In case of an error, just the prefix is returned, so it +// alone should already be fairly unique. +func uniqueString(prefix string) string { + return prefix + uniqueSuffix +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go new file mode 100644 index 00000000..47763b75 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 Intel Corporation + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sanity + +import ( + . "github.com/onsi/ginkgo" +) + +type test struct { + text string + body func(*SanityContext) +} + +var tests []test + +// DescribeSanity must be used instead of the usual Ginkgo Describe to +// register a test block. The difference is that the body function +// will be called multiple times with the right context (when +// setting up a Ginkgo suite or a testing.T test, with the right +// configuration). +func DescribeSanity(text string, body func(*SanityContext)) bool { + tests = append(tests, test{text, body}) + return true +} + +// registerTestsInGinkgo invokes the actual Gingko Describe +// for the tests registered earlier with DescribeSanity. +func registerTestsInGinkgo(sc *SanityContext) { + for _, test := range tests { + Describe(test.text, func() { + BeforeEach(func() { + sc.setup() + }) + + test.body(sc) + + AfterEach(func() { + sc.teardown() + }) + }) + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go b/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go index 49ad8283..03b0f052 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go +++ b/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go @@ -16,13 +16,16 @@ limitations under the License. package test import ( + "context" + "fmt" + "reflect" "testing" "github.com/container-storage-interface/spec/lib/go/csi" - gomock "github.com/golang/mock/gomock" + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" mock_driver "github.com/kubernetes-csi/csi-test/driver" mock_utils "github.com/kubernetes-csi/csi-test/utils" - "golang.org/x/net/context" ) func TestPluginInfoResponse(t *testing.T) { @@ -33,13 +36,7 @@ func TestPluginInfoResponse(t *testing.T) { driver := mock_driver.NewMockIdentityServer(m) // Setup input - in := &csi.GetPluginInfoRequest{ - Version: &csi.Version{ - Major: 0, - Minor: 1, - Patch: 0, - }, - } + in := &csi.GetPluginInfoRequest{} // Setup mock outout out := &csi.GetPluginInfoResponse{ @@ -64,6 +61,24 @@ func TestPluginInfoResponse(t *testing.T) { } } +type pbMatcher struct { + x proto.Message +} + +func (p pbMatcher) Matches(x interface{}) bool { + y := x.(proto.Message) + return proto.Equal(p.x, y) +} + +func (p pbMatcher) String() string { + return fmt.Sprintf("pb equal to %v", p.x) +} + +func pbMatch(x interface{}) gomock.Matcher { + v := x.(proto.Message) + return &pbMatcher{v} +} + func TestGRPCGetPluginInfoReponse(t *testing.T) { // Setup mock @@ -72,13 +87,7 @@ func TestGRPCGetPluginInfoReponse(t *testing.T) { driver := mock_driver.NewMockIdentityServer(m) // Setup input - in := &csi.GetPluginInfoRequest{ - Version: &csi.Version{ - Major: 0, - Minor: 1, - Patch: 0, - }, - } + in := &csi.GetPluginInfoRequest{} // Setup mock outout out := &csi.GetPluginInfoResponse{ @@ -91,7 +100,7 @@ func TestGRPCGetPluginInfoReponse(t *testing.T) { // Setup expectation // !IMPORTANT!: Must set context expected value to gomock.Any() to match any value - driver.EXPECT().GetPluginInfo(gomock.Any(), in).Return(out, nil).Times(1) + driver.EXPECT().GetPluginInfo(gomock.Any(), pbMatch(in)).Return(out, nil).Times(1) // Create a new RPC server := mock_driver.NewMockCSIDriver(&mock_driver.MockCSIDriverServers{ @@ -115,3 +124,65 @@ func TestGRPCGetPluginInfoReponse(t *testing.T) { t.Errorf("Unknown name: %s\n", name) } } + +func TestGRPCAttach(t *testing.T) { + + // Setup mock + m := gomock.NewController(&mock_utils.SafeGoroutineTester{}) + defer m.Finish() + driver := mock_driver.NewMockControllerServer(m) + + // Setup input + defaultVolumeID := "myname" + defaultNodeID := "MyNodeID" + defaultCaps := &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + }, + } + publishVolumeInfo := map[string]string{ + "first": "foo", + "second": "bar", + "third": "baz", + } + defaultRequest := &csi.ControllerPublishVolumeRequest{ + VolumeId: defaultVolumeID, + NodeId: defaultNodeID, + VolumeCapability: defaultCaps, + Readonly: false, + } + + // Setup mock outout + out := &csi.ControllerPublishVolumeResponse{ + PublishContext: publishVolumeInfo, + } + + // Setup expectation + // !IMPORTANT!: Must set context expected value to gomock.Any() to match any value + driver.EXPECT().ControllerPublishVolume(gomock.Any(), pbMatch(defaultRequest)).Return(out, nil).Times(1) + + // Create a new RPC + server := mock_driver.NewMockCSIDriver(&mock_driver.MockCSIDriverServers{ + Controller: driver, + }) + conn, err := server.Nexus() + if err != nil { + t.Errorf("Error: %s", err.Error()) + } + defer server.Close() + + // Make call + c := csi.NewControllerClient(conn) + r, err := c.ControllerPublishVolume(context.Background(), defaultRequest) + if err != nil { + t.Errorf("Error: %s", err.Error()) + } + + info := r.GetPublishContext() + if !reflect.DeepEqual(info, publishVolumeInfo) { + t.Errorf("Invalid publish info: %v", info) + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go b/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go index a0cf555a..ae8c3367 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go +++ b/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go @@ -36,17 +36,13 @@ type simpleDriver struct { wg sync.WaitGroup } -func (s *simpleDriver) GetSupportedVersions( - context.Context, *csi.GetSupportedVersionsRequest) (*csi.GetSupportedVersionsResponse, error) { - return &csi.GetSupportedVersionsResponse{ - SupportedVersions: []*csi.Version{ - &csi.Version{ - Major: 0, - Minor: 1, - Patch: 0, - }, - }, - }, nil +func (s *simpleDriver) GetPluginCapabilities(context.Context, *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { + // TODO: Return some simple Plugin Capabilities + return &csi.GetPluginCapabilitiesResponse{}, nil +} + +func (s *simpleDriver) Probe(context.Context, *csi.ProbeRequest) (*csi.ProbeResponse, error) { + return &csi.ProbeResponse{}, nil } func (s *simpleDriver) GetPluginInfo( diff --git a/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go b/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go index c89a5cf1..3baf9672 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go +++ b/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go @@ -29,7 +29,7 @@ type SafeGoroutineTester struct{} // Errorf prints the error to the screen then panics func (s *SafeGoroutineTester) Errorf(format string, args ...interface{}) { - fmt.Printf(format, args) + fmt.Printf(format, args...) panic("MOCK TEST ERROR") }