From 8568edee7cd26c1814dd3108df46fde0c086ee43 Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Tue, 28 Nov 2023 06:01:47 +0000 Subject: [PATCH] markdown source builds Auto-generated via {sandpaper} Source : 30c35499b30594199c55c92a1f53210cf0a39dd3 Branch : main Author : Vlad Dracula Time : 2023-11-28 06:00:47 +0000 Message : adding demo of onehot per issue number 3 --- 01-introduction.md | 54 ++++++++++++++++++--------------------- 02-image-data.md | 57 ++++++++++++++++++++++++++++++++++++------ fig/01_test_image.png | Bin 0 -> 9267 bytes md5sum.txt | 4 +-- 4 files changed, 77 insertions(+), 38 deletions(-) create mode 100644 fig/01_test_image.png diff --git a/01-introduction.md b/01-introduction.md index 1226563e..5d0cbbc3 100644 --- a/01-introduction.md +++ b/01-introduction.md @@ -91,8 +91,9 @@ It is the goal of this training workshop to produce a Deep Learning program, usi ```python # load the required packages from tensorflow import keras # library for neural networks +from sklearn.model_selection import train_test_split # library for splitting data into sets import matplotlib.pyplot as plt # library for plotting -from icwithcnn_functions import prepare_image_icwithcnn # custom function +import numpy as np # library for working with images as arrays # load the CIFAR-10 dataset included with the keras library (train_images, train_labels), (test_images, test_labels) = keras.datasets.cifar10.load_data() @@ -131,11 +132,11 @@ print('Test: Images=%s, Labels=%s' % (test_images.shape, test_labels.shape)) ## Output ```output -Train: Images=(50000, 32, 32, 3), Labels=(50000, 10) +Train: Images=(50000, 32, 32, 3), Labels=(40000, 10) Validate: Images=(10000, 32, 32, 3), Labels=(10000, 10) Test: Images=(10000, 32, 32, 3), Labels=(10000, 10) ``` -The training set consists of 50000 images of 32x32 pixels and 3 channels (RGB values) and labels. +The training set consists of 40000 images of 32x32 pixels and 3 channels (RGB values) and labels. The validation and test datasets consist of 10000 images of 32x32 pixels and 3 channels (RGB values) and labels. @@ -195,7 +196,9 @@ x_intro = keras.layers.Dense(64, activation='relu')(x_intro) outputs_intro = keras.layers.Dense(10, activation='softmax')(x_intro) # create the model -model_intro = keras.Model(inputs=inputs_intro, outputs=outputs_intro, name="cifar_model_intro") +model_intro = keras.Model(inputs = inputs_intro, + outputs = outputs_intro, + name = "cifar_model_intro") ``` ### Step 5. Choose a loss function and optimizer @@ -208,7 +211,7 @@ The optimizer is responsible for taking the output of the loss function and then # compile the model model_intro.compile(optimizer = 'adam', loss = keras.losses.CategoricalCrossentropy(), - metrics=['accuracy']) + metrics = ['accuracy']) ``` ### Step 6. Train the model @@ -219,7 +222,7 @@ We can now go ahead and start training our neural network. We will probably keep # fit the model history_intro = model_intro.fit(train_images, train_labels, epochs = 10, validation_data = (val_images, val_labels), - batch_size=32) + batch_size = 32) ``` Your output will begin to print similar to the output below: @@ -228,42 +231,40 @@ Epoch 1/10 1250/1250 [==============================] - 15s 12ms/step - loss: 1.4651 - accuracy: 0.4738 - val_loss: 1.2736 - val_accuracy: 0.5507 ``` -::::::::::::::::::::::::::::::::::::::::: spoiler -### What does this output mean? +#### What does this output mean? This output printed during the fit phase, i.e. training the model against known image labels, can be broken down as follows: -- `Epoch` describes the number of full passes over all *training data*. In the output above there are **1250 training observations**. This number is calculated as the total number of images used as input divided by the batch size (40000/32). An epoch will conclude and move to the next epoch after a training pass over all 1563 observations. +- `Epoch` describes the number of full passes over all *training data*. In the output above there are **1250 training observations**. This number is calculated as the total number of images used as input divided by the batch size (40000/32). An epoch will conclude and move to the next epoch after a training pass over all observations. - `loss` and `val_loss` can be considered as related. Where `loss` is a value the model will attempt to minimise, and is the distance between the true label of an image and the models prediction. Minimising this distance is where *learning* occurs to adjust weights and bias which reduce `loss`. On the other hand `val_loss` is a value calculated against the validation data and is a measurement of the models performance against **unseen data**. Both values are a summation of errors made for each example when fitting to the training or validation sets. - `accuracy` and `val_accuracy` can also be considered as related. Unlike `loss` and `val_loss`, these values are a percentage and are only revelant to **classification problems**. The `val_accuracy` score can be used to communicate a percentage value of model effectiveness on unseen data. -::::::::::::::::::::::::::::::::::::::::: ### Step 7. Perform a Prediction/Classification After training the network we can use it to perform predictions. This is the mode you would use the network in after you have fully trained it to a satisfactory performance. Doing predictions on a special hold-out set is used in the next step to measure the performance of the network. ```python -# specify a new image and prepare it to match CIFAR-10 dataset -from icwithcnn_functions import prepare_image_icwithcnn +# predict the class name of the first test image +result_intro = model_intro.predict(test_images[0].reshape(1,32,32,3)) -new_img_path = "../data/Jabiru_TGS.JPG" # path to image -new_img_prepped = prepare_image_icwithcnn(new_img_path) - -# predict the class name -result_intro = model_intro.predict(new_img_prepped) # make prediction - -print(' The predicted probability of each class is: \n', result_intro.round(4)) +print('The predicted probability of each class is: ', result_intro.round(4)) print('The class with the highest predicted probability is: ', class_names[result_intro.argmax()]) + +# plot the image with its true label +plt.imshow(test_images[0], cmap=plt.cm.binary) +plt.title('True class:' + class_names[test_labels[0,].argmax()]) +plt.show() ``` ```output -The predicted probability of each class is: [[0.0058 0.714 0. 0.0024 0. 0. 0.2777 0. 0. 0. ]] -The class with the highest predicted probability is: automobile +The predicted probability of each class is: [[0.0074 0.0006 0.0456 0.525 0.0036 0.1062 0.0162 0.0006 0.2908 0.004 ]] +The class with the highest predicted probability is: cat ``` +![](fig/01_test_image.png){alt='poor resolution image of a cat'} ::::::::::::::::::::::::::::::::::::::::: callout My result is different! @@ -275,10 +276,11 @@ If you are finding significant differences in the model predictions, this could Congratulations, you just created your first image classification model and used it to classify an image! -Unfortunately the classification was incorrect. Why might that be? and What can we do about? +Was the classification correct? Why might it be incorrect and What can we do about? There are many ways we can try to improve the accuracy of our model, such as adding or removing layers to the model definition and fine-tuning the hyperparameters, which takes us to the next steps in our workflow. + ### Step 8. Measure Performance Once we trained the network we want to measure its performance. To do this we use some additional data that was **not** part of the training; this is known as a test set. There are many different methods available for measuring performance and which one is best depends on the type of task we are attempting. These metrics are often published as an indication of how well our network performs. @@ -289,13 +291,7 @@ When building image recognition models in Python, especially using libraries lik #### What are hyperparameters? -Hyperparameters are all the parameters set by the person configuring the machine learning instead of those learned by the algorithm itself. These hyperparameters can include the learning rate, the number of layers in the network, the number of neurons per layer, and many more. Hyperparameter tuning refers to the process of systematically searching for the best combination of hyperparameters that will optimize the model's performance. One common method for hyperparameter tuning is **grid search**. - -#### What is Grid Search? - -Grid Search or **GridSearch** (as per the library function call) is foundation method for hyperparameter tuning. The aim of hyperparameter tuning is to define a grid of possible values for each hyperparameter you want to tune. GridSearch will then evaluate the model performance for each combination of hyperparameters in a brute-force manner, iterating through every possible combination in the grid. - -These concepts will be continued, with practical examples, in Episode 05. +Hyperparameters are all the parameters set by the person configuring the machine learning instead of those learned by the algorithm itself. These hyperparameters can include the learning rate, the number of layers in the network, the number of neurons per layer, and many more. Hyperparameter tuning refers to the process of systematically searching for the best combination of hyperparameters that will optimize the model's performance. This concept will be continued, with practical examples, in [Episode 05 Evaluate a Convolutional Neural Network and Make Predictions (Classifications)](./05-evaluate-predict-cnn.md) ### Step 10. Share Model diff --git a/02-image-data.md b/02-image-data.md index 85a1138b..97c8e6a3 100644 --- a/02-image-data.md +++ b/02-image-data.md @@ -53,10 +53,9 @@ In some cases you will be able to download an image dataset that is already labe Where labelled data exists, in most cases the data provider or other users will have created functions that you can use to load the data. We already saw an example of this in the introduction: ```python -# load the CIFAR-10 dataset included with the keras packages from tensorflow import keras -# commented out in case these are already be in memory +# load the cifar dataset included with the keras library (train_images, train_labels), (test_images, test_labels) = keras.datasets.cifar10.load_data() ``` @@ -147,7 +146,7 @@ Two of the most commonly used libraries for image representation and manipulatio :::::::::::::::::::::::::::::::::::::::::::::::::: -Let us start by looking at the image we used in the introduction. +Let us start by taking a closer look at the Jabiru image. ```python # load the libraries required @@ -249,7 +248,7 @@ The min, max, and mean pixel values are 0.0 , 255.0 , and 87.0 respectively. After normalization, the min, max, and mean pixel values are 0.0 , 1.0 , and 0.0 respectively. ``` -Of course, if there are a large number of images to preprocess you do not want to copy and paste these steps for each image! Fortunately, keras has a solution for that: [tf.keras.utils.image_dataset_from_directory] +Of course, if there are a large number of images to preprocess you do not want to copy and paste these steps for each image! Fortunately, Keras has a solution for that: [tf.keras.utils.image_dataset_from_directory] ### One-hot encoding @@ -280,6 +279,48 @@ Table 2. After One-Hot Encoding. Each category has its own binary column, and the value is set to 1 in the corresponding column for each row that matches that category. +The Keras function for one_hot encoding is called [to_categorical]: + +`tf.keras.utils.to_categorical(y, num_classes=None, dtype="float32")` + +- `y` is array-like with class values to be converted into a matrix (integers from 0 to num_classes - 1) +- `num_classes` is the total number of classes. If None, this would be inferred as max(y) + 1 +- `dtype` is the data type expected by the input. Default: 'float32' + +We performed this operation in **Step 3. Prepare data** of the Introduction but let us look at the labels before and after one-hot encoding. + +``` +print() +print('train_labels before one hot encoding') +print(train_labels) + +# one-hot encode labels +train_labels = keras.utils.to_categorical(train_labels, len(class_names)) +val_labels = keras.utils.to_categorical(val_labels, len(class_names)) + +print() +print('train_labels after one hot encoding') +print(train_labels) +``` +```output +train_labels before one hot encoding +[[6] + [9] + [9] + ... + [9] + [1] + [1]] + +train_labels after one hot encoding +[[0. 0. 0. ... 0. 0. 0.] + [0. 0. 0. ... 0. 0. 1.] + [0. 0. 0. ... 0. 0. 1.] + ... + [0. 0. 0. ... 0. 0. 1.] + [0. 1. 0. ... 0. 0. 0.] + [0. 1. 0. ... 0. 0. 0.]] + ``` ### Image augmentation @@ -293,8 +334,9 @@ There are several ways to augment your data to increase the diversity of the tra - brightness, contrast, or hue - these changes simulate variations in lighting conditions -We will look at image augmentation in a later episode. +We will not be looking at image augmentation in this lesson but it is important that you be aware of this type of data preparation because it can make a big difference in your model's ability to predict outside of your training data. +Have a look at [Image augmentation layers] for information about these operations. ### Data Splitting @@ -448,7 +490,8 @@ Our dataset is preprocessed and split into three sets which means we are ready t [tf.keras.utils.image_dataset_from_directory]: https://keras.io/api/data_loading/image/ +[to_categorical]: https://keras.io/api/utils/python_utils/#to_categorical-function [train_test_split]: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html -[tf.data.Dataset]: https://www.tensorflow.org/api_docs/python/tf/data/Dataset -[CINIC-10]: https://github.com/BayesWatch/cinic-10/ +[Image augmentation layers]: https://keras.io/api/layers/preprocessing_layers/image_augmentation/ + diff --git a/fig/01_test_image.png b/fig/01_test_image.png new file mode 100644 index 0000000000000000000000000000000000000000..5d54999bfe44c7dea2a45bcbeb68b66d5ad3c036 GIT binary patch literal 9267 zcmai41ymFZ+g=puQjm}o1Vms#K_pfXBwZR6kS>v>dueHrl5UVot28XNw3L)I>`KSd zu}ez*N}A|e2O zrCbfj1HQyu|eXMn>bm3R7_kPZ0ucZtW4S5ES#LJ?CpfWqF_Njwl^*=4$k5N z0=EC$0BrAMDe$no`ZI74Vh2TCXAp>#`qu{!l$uTt0&#p)l9hfAPv6S0c6dHfjk(06 zZ;w~zC4T=LOeK?OWBz3P8bgx({ZxAPuBWCDc^)Yl(>mEN_p~(~8Mt}QIFs*bD|a~F zENWx@lH8}RdaWz7KKWa<%E?WskjWf0!92032ob)FTG02;dA=EvIJ{7jN2kr#WMKCt zH;#x8hb1i!fMspw($JbAYEhI5f8IJ;Y?gv3;KQvulh;}~70XBipdXCs@RQhD!9-x` z00S!h0Q?5FT6|)+`A`7>jXS*(#M}8HtGAvG zjVO0oe)h$nfmTiGT$ry{<}Qtlk`l3MhInvD2th<-WOh*zF|T&X*MSTPJpD8J%Y|@( zg|ykTm9r+${HNW%nyYPRLo+i`Gc$&e)6L3VICzG{di)C>==qBmcpwr|(sGMlTASG> zpP4h7*M!7wbiXR^2&Xe5*>4sS6@8a=w#0qBniDN3DH&z{APk8_dSEq^`IjUb!gZ_O zTpvgm3zTJdoH~wElO%2z+?}$_GO@8CYrZ_+*^8b%Bx_f!{@ljL*1B!_veYorSnyr3 zUJW}F1;>rBxw+OrdRA5iH#dFZ-@Wt%wSZ98OgaG4mJs3<;e_`fvN1`|jr4XH{q? zR+h8P+e!^MZ}Gnh-z@JkMvM3!x3ijVj2EMtjXo^4BX;;-pvbtDFegaZBl0*xSw7l4 zL=ZCugDskk0JW%o@^KN??hqU!r3B^6sW>d?o+Zz9?f+!|<_L8zB9A+@=cXQ|Rok^r zRF3^9Sj$SBMyMIjOgVC`1f?*Y=E>K&Lx~>$lC#1h@^FGre*3eak*9L!XqVSo21_KR z&{u@**{Xt6Tbg}Sm|0hp$<9opsN3eGG9@=r8nj@sn<|$EP6l7n!Ps10L_rlC;9*}!?q3~zd58=WROG4^M zGJ!0b$c{q(n9<|Hm>Ir)a8szVP^@<-196G|s#{FltxGfcdZ!4_pYMx%}4Q-+qIa34AGv@QgBX zF5+Uq#3m(V$2hjcF(fY~j4Db=mL{QYP}FcPrcUIG8L?+07*dtNS{yhAS~W5!zG@@^ zNDJ}GbVF)zxyT{nRP=r;Mwh~f(y!i=*@vKcKrDq}G%===vnsb|M@)-1r^si?osi11 zNBh>6%71&>e@Cv{c@29Zi!Ekx2JX;GJ>^)@VLlaKq(TjUk&+7=F#+feyRwWnU*D21 z+{b&OCU-#|#>mL<8zLJQx9;q5TvAW0;`A(FmR*bD=n^*qW6h}Y7U#Cx`jm^`2zQGmmDug`thK?k*fc6f860Bq~j{ z^1<4-;__tBPhQ&pr-43I&<5ej05HksQv;?2$$ofXGIXk8H&G~5yx++9c*xJ6oJgfnpyYa}ds-|Y}y%x|pl0v=}q3tGj;*^fheS_LfkU+;DcL?}qXT&Mq z5`C06u*(odxQe8_#QHlS^HcpaYt0yDh5JTmYLsL9L_mT{X)Gg43@V6rG_BR>opf=Z z_o$EpI6-n;N&!<%McuyHN~D&aJX;dV^g8%qhoVfi%siMxdtT_wS3EmBe^UeB6 zt2Q|IU^aym`LSTaNY)nfb5LFH5M;C%%Tz7E5W=MuLt_#w|1#6DjFOCOZpLe;5xyXM zW%hLD$YH|3{T~P*L~;1-+cPFHCvy>>>-Z)V*Vk13#RjAuh;RHFHJa#+{%3*Q+6E2| zScuN)MmpA;5BhzCnLq=R&zEwDa@?8?w51pzzJPt4}hsK2zt2sCm{4_SqKvR7hCN>)>>W3gvm0x_Qr|;(rq_pnRT$ z8Rg!hPxc&P=+GXgc)${<-q?hulj=JCyrJ!=^b=lA{^2>|?p3*M`2u|Mm z!mW{ZB#FAANRqiH;n=|lBr1A93Fp*9>dkOJTZx2$({&zOaa<={5KtWRjf7-um=7eo zxV|CgBF*oP_cq&VYI$O`2d4u`H@%(-cdj`7;)E1zv1nR<;L7+=0e6X z>pgYlClE;Q>O}b&NKVplD0SlDqoE8#ypLS?gJR@7t%fbjG9^W!v_4 zWQd-d>RXiODPh85NY6&YH7q=`Swn?i49YQOVNOtJ3)A;uO=mk|1eqU?V%~EL>%Jew z-sI=!zkBaq8+N%@Ad;MtGN-)!`qf`~(>tzKLB5Zu)q9{AG~LZD3hi zS?N1i!k9zHZNK?qvi*m$aO-CkXQsO_LNLbB6-vJB7baI+OCr`&Ro<%jd& zV{PF6}Yf>=irh#sb-(_(prTZMNkw`&1A~sKr1X=pNdW$;41NUY? zrt+956F=oigXkF+o691sb1VFvk=h2s_v05s@N9)w$tLm@!d2KpV!7wvwbS2PB=@DKGw4I6L1KGeg0X+Sl>Te+q?wu12)bv$5Lo^^ zfSFQSvw}@^VCFUZ>srAusJzEk6P6Kv9SX(O3pr!s7q4|7cb890G9QY3>anSOxc>U~ zTJy?Kci#2b^J}Y>b@}WN2|np`^Fr}mFN*3TU#hXro*Y?zcHhNo*;wY!jLH;kXTCic zle=$vl%5((x(Bu7oz_Lgs2+x=S#*>??Xdpojf+%+m-FbrF3ujkSw8!3W(F9@cX!5x-9ay6D<5)QnPtjl*nU$tbP@xb2hDm2I9Lt>*1+ z!BTfRt}uDfhqU(6-q|KmRokmqG4YFLsfil?AU_i^-Ko*nV*1nvoyX#@a%oLWXr&D8 z>cyiVsa;1^YKMu5v@|zwnw(&llR7g!*LnE#YpGmaT?;>dZfCSd-k&xBG=gP5`C%xl zFFhdz({aIG7por=Gc)t*>T1i+gAL4AF=?vT(N=+t+1Yp}sYZ8!Nq0#oH1iQ%ZA}NW zw*E!nN$HN&?xRsCbt9u_fZYJXyKmnmfK8u6h~!X#)rTe{BLg_v6G8f~)M^mvO9Vh` zvek)z#s7(MzAs6DL*yTzW1>r|Jo1$z8WM1LcsO+kr3%u0EkCShWW*ZK$_6q9WOUNj z=!T$5-0RDwCC4>>lh4!2xLxJn7?a|)^j5XN9+4iO3;C@gc)P;soIJKBFo z_CHsafIgOD_^LCGu}~Ov68ShrX`MWzOMZ>%GJvf*GRleJ-lH_S9ORmOHPb1p!i7AB z>8BoDb@553Uyev=H1s*QTDp2vbQEqPkM})8$^sU(UEvYHBPx_`PWkI- zumE%H3zF_%-5dB1E9P(A`#XzQEZScYRk}kB>*08mMTaF-DtXkO}|^YT0`+yc-#57DDkExaUPpom#Y!Y-s+hSWV5 zxw#yhBLFp+zFXjMJp(7;>uWHOp>_21>f4eLg+hKvULuczd|l8>9UVp6I*Mj+cxBz4 zSMlv$Wgg_NT*iC@r)Z1oVg2~g#Q(F6gW|V4xv3o^vb1+zzkUPrlH*u6Q03^Eeni&9 ztNURP&Fy2AnTu#OaV5>d(at6e^siJOB!V%-V%%!DR7b}dPGSx-SanW)ed<7wVH;=Q zh!4bnWp(HhYKsXS?cr$>E~#I#z{$o|D%9Qk?w2`HEu>fxpJ&yWXU&XA#-x1wF-RRa ztdco;BeHNO%RF{MB!atp@7PvA%}v^Y^mo?#Z&c@Z$!YRxt+=6qk@E3A)z8yYlT@x( zdU|`4X3-{aIQ`MApHzK)y}E{mzqX;rH4hIYIVDIoMjri~}p0 zI#}Q+^}huPZ70S4k-nqfO%;sC)KWyZ~@MPuAbe_>HkYa+iMIPxD13J(NWG=xV0 zf%@UWL)7+AXshcukdSgAFUhd&{Y_qVZEZr(@v?wl7~tnEG@Wj@Z%;R97#c=t8~M1R z;#B0RS4Fcg?2MYcA7&&4iKF|BQ=~v2me!=q78gfF9z2k>CB<&UFK+DA2M!c9SnyOg zV(m{0`vq%*V$*s#2ZHlpN<0$WPcS;f2S2Yo@g;7S3xF<+ilk;IMpCfOA1wUjf$B_w-kDa+J11{VXD{y9^;5GSD^va> z1gEJFt>;A7TRe`Y-3!vdL~X7CX924JDdB#9m~*S<%)g54F24yG7BREzkD zSD9FAJB1(}w%n??FOR>DR-|jS2;t-2kILP{V=F4keM65FG<#s+)0dkKmL&!`s_2z# zq-F6=_eQ5|&kyTOs#{KVG@j|JYBn|@XAC;lG#t&mWc9K26sJQgQHJP}Elg2M{ zN@n`UuZ@{oC+uEWw_eFKaDB?jqq(|k8sLK&&_eGRu3%BwQD&o^p0ju$6?~WD(|R-9 z;*^#5W1BeWQxihVv>KlN^oT@4w-+|RhpR?40A|%fG0Va z_Wbk>!&z#_0_^f3KgBQJXZJVH{7*>MtY@@uZwV?=rG+1TPjZ>rQ+4evYlLc88CHhL4jwxXh-d~bXT-2#AWm2b2M?U*rs={E{6oOf~ z@vT>ArA!bFr$nD)+TeXM!x1oviY5_H8Ucufa%v56>AA7J-T9RAanQwuZ*qPZpiLl$lV;-Lba4-mr2zu4*+jwElLP<+QbI-P&uGV|l-9%RGB8dYz7RhY%WiJTI8_aoU>e^eA zqUEx~??!9s6SJqusNa+foyl3ijVUW*%?fCZ%*};2=5@L9h9gRbUxH&N+U>>K7jAIz z`SwFnJreUIvr2jj^K^|y=CL1(l|-Jwa-Zf;CB!?NOlIXET=T8Q!@PFKSm6-(b{(b+ zh!7&8qs_)Xy#T}W&6coT6T_d^79EEKYr}IoebE% zy)S%NHrLnN=G#LZ(Y7^sAh#`4Fbegms;X+PEtu4K;Uln7`%S{ruV3*~Q&Y>FF{(>T zOLIdCta6r?+~VTmRAdBu0H?Z(sjjZh27Ka!g9An7y_Y?J2ISYK+1!IOsi>$-R#{Pt z*!>^^-35a~EOZ4`sG}iRA`}{(;H;;ocbz1diX?chXD*TrfK|PGa_uV;Z^NW*}h;YpkUm3_G#+RL1n2Odvy zspT_IL$(-AuqA!-J^IEtia2CE__n9jh*?cn($fx7=XjcjYx7TxMn6!Z6fz=6s?P7M zbII$aa2CFikYsM7%i`Pbf~gRe<8<}e%;(}R@r>Hm_eXle^wj%=xIzagwb0$_&7Nmu zJ4!m$=3%IhF*N_5(0uM$38BSa#mnQrG4G@Uf0og+SP4JWmf;{xNnJ=}Mfl0v7I=Os6lMJ&^Z1Ao_ooO`M$O+k@`uQtwP^`yPw|G8*LSbQIz5EV7QgG z=)k%qYILci-`LHtrvIK#r|c~XOHzxZ9Wl3)_eoud$(7O@ZeLz>ko7U*e0T%#%Ub@g z@nTz5!(NOcJ~=u0JNX>NJ2#HE-J8R=&^2cNjW&6p%*2!kH7dfB^F7dEsP*ZWr5)~p z)ZWw#Q9Wv}U+)_1j@+5=7`@|uXTdvEw}4)j{W^@)8bzJ5R9$Z13F^k~8O)NYT#2-D zy7XCF$61(T46$N>D^NmL{?J*n*--m|$uM$$s0qJ0C6thH`-LyE9lrIwy zhqVK%`4aJLM7Zv%Ry%rvisnxrZbf%pkM1&|Cpg@hjb2%K4SM42EM(}lRW0WFoGQqA z@qJSz8WCAvFAnI%Y`~kUnRa6&ARwS3Gd%N`2DIDU++4c&J)*X1N4ZuzU0MKs91b^i z0SkJu)bD)bX4c2zVsZe03QJ2Bykl#0XaR?*?$h+~6SlYZ-iwTYr3HHbS6@jZ#!2;` zDHbwCY+}2djY!B8Wh~{s%_J``4+3dvX~8iW?qx1(nl^tpPygage^he6cz=9iA{028 zNkk-&p8_E-L&EkLXCeAn|*@Dqn%~Xr;9w}dY&C|J4-qh3HCI&OC-5`$5 zoqo=quQXS7vN2rEHar!ZDofARXi(ddmu1o~zGChic266~8$w6hpoB@-%{xgYkPt3i z?ntA}H?lr`!j#S6dS^~9g@+>BmtVU*V9;3ejD2Bnn7mE~fAo4t&ItT}XG-@l1qKh!b-kBL3aoHen_e2nioI|656TtfL3%hoK7Jy8VDXMMiNjZa zV4^2F-tIdP;D1ix70yXYCegQPcJ;(BUcA7)Dy;bJw=29azdSK8U|!$Y&~lsk ztB(*88agyN`3|tqghfPPTAG;Q=z>hoU(urMukF{@cRLyF`|4Pyr>AQdqa@U|v;u*Q zZ~js93HjU6Qeqs(nDvb?*)q=q*bk>(ejS7j1Hu+fU`#u{nA*|s`A&?YXhK4Qs-N#XcsteGScL7+~Z6X#zBJ&zIfio&Oq&+i9#C=W9;S@>R6tqx!Ie*3m! zA?+U??g>2KziuDk&8E?@UJM*OFLnpQ(Zb?lIVcqRtV#8Z;}7Msi_{jpP>=#HUyuHC z7gdik36i|Q@A1PaPzn~vyQiTP^Zk!zzv2d^qA8`$91PEDGvWI~3PV#a#mJWpQb@?p%{L^vIg)b z9I#|O_O7@yE%CD;{K+JkW0`{6G12US*@@1efdp@W4YCLqqt@P zkZ+={>olh2^oG7C7C<-O9IJJ{xYH_1q}#b<5N0KhP{b+Id6`+Cif`eV%GA$-i&(6`%@BASCkLTEF zsr!0CNy&h2xoL2v&C~