diff --git a/.gitignore b/.gitignore index dd0bbe49a749..d21e46cb2d43 100644 --- a/.gitignore +++ b/.gitignore @@ -157,3 +157,7 @@ wandb dump.py docs/sources/source/test_build/ + +# Checkpoints, config files and temporary files created in tutorials. +examples/neural_graphs/*.chkpt +examples/neural_graphs/*.yml \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index a00d22c74c6e..9a749af6e06b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -78,6 +78,8 @@ To release a new version, please update the changelog as followed: - Online audio augmentation notebook in ASR examples ([PR #605](https://github.com/NVIDIA/NeMo/pull/605)) - @titu1994 - ContextNet Encoder + Decoder Initial Support ([PR #630](https://github.com/NVIDIA/NeMo/pull/630)) - @titu1994 - Added finetuning with Megatron-LM ([PR #601](https://github.com/NVIDIA/NeMo/pull/601)) - @ekmb +- Added documentation for 8 kHz model ([PR #632](https://github.com/NVIDIA/NeMo/pull/632)) - @jbalam-nv + ### Changed - Syncs across workers at each step to check for NaN or inf loss. Terminates all workers if stop\_on\_nan\_loss is set (as before), lets Apex deal with it if apex.amp optimization level is O1 or higher, and skips the step across workers otherwise. ([PR #637](https://github.com/NVIDIA/NeMo/pull/637)) - @redoctopus diff --git a/Jenkinsfile b/Jenkinsfile index bf6cd48cb448..f76dea063f03 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -2,7 +2,7 @@ pipeline { agent { docker { image 'nvcr.io/nvidia/pytorch:20.01-py3' - args '--device=/dev/nvidia0 --gpus all --user 0:128 -v /home:/home -v $HOME/.cache/torch:/root/.cache/torch --shm-size=8g' + args '--device=/dev/nvidia0 --gpus all --user 0:128 -v /home/TestData:/home/TestData -v $HOME/.cache/torch:/root/.cache/torch --shm-size=8g' } } options { @@ -193,11 +193,22 @@ pipeline { } stage ('Punctuation and Classification Training/Inference Test') { steps { - sh 'cd examples/nlp/token_classification && CUDA_VISIBLE_DEVICES=1 python punctuation_capitalization.py --data_dir /home/TestData/nlp/token_classification_punctuation/ --work_dir punctuation_output --save_epoch_freq 1 --num_epochs 1 --save_step_freq -1 --batch_size 2' + sh 'cd examples/nlp/token_classification && CUDA_VISIBLE_DEVICES=1 python punctuation_capitalization.py \ + --data_dir /home/TestData/nlp/token_classification_punctuation/ --work_dir punctuation_output --save_epoch_freq 1 \ + --num_epochs 1 --save_step_freq -1 --batch_size 2' sh 'cd examples/nlp/token_classification && DATE_F=$(ls punctuation_output/) && DATA_DIR="/home/TestData/nlp/token_classification_punctuation" && CUDA_VISIBLE_DEVICES=1 python punctuation_capitalization_infer.py --checkpoint_dir punctuation_output/$DATE_F/checkpoints/ --punct_labels_dict $DATA_DIR/punct_label_ids.csv --capit_labels_dict $DATA_DIR/capit_label_ids.csv' sh 'rm -rf examples/nlp/token_classification/punctuation_output' } } + stage('SGD Test') { + steps { + sh 'cd examples/nlp/dialogue_state_tracking && CUDA_VISIBLE_DEVICES=0 python dialogue_state_tracking_sgd.py \ + --data_dir /home/TestData/nlp/sgd/ --schema_embedding_dir /home/TestData/nlp/sgd/embeddings/ --eval_dataset dev \ + --dialogues_example_dir /home/TestData/nlp/sgd/dialogue_example_dir/ --work_dir sgd_output --task DEBUG \ + --num_epochs 1 --save_epoch_freq=0' + sh 'rm -rf examples/nlp/dialogue_state_tracking/sgd_output' + } + } } } @@ -355,7 +366,8 @@ pipeline { post { always { + sh "chmod -R 777 ." cleanWs() } } -} +} \ No newline at end of file diff --git a/docs/docs_zh/sources/source/speech_command/tutorial.rst b/docs/docs_zh/sources/source/speech_command/tutorial.rst index ab60fdf33717..0a901d6849c7 100644 --- a/docs/docs_zh/sources/source/speech_command/tutorial.rst +++ b/docs/docs_zh/sources/source/speech_command/tutorial.rst @@ -90,7 +90,7 @@ QuartzNet 模型使用一种固定的模型定义模式: QuartzNet-[BxR], 其 process_classification_evaluation_epoch, ) - logging = nemo.logging + from nemo.utils import logging # Lets define some hyper parameters lr = 0.05 @@ -420,7 +420,7 @@ QuartzNet 模型使用一种固定的模型定义模式: QuartzNet-[BxR], 其 import nemo import nemo.collections.asr as nemo_asr - logging = nemo.logging + from nemo.utils import logging # We add some data_dir = '' diff --git a/docs/sources/source/asr/8kHz_models.rst b/docs/sources/source/asr/8kHz_models.rst new file mode 100644 index 000000000000..053bd23a6dc1 --- /dev/null +++ b/docs/sources/source/asr/8kHz_models.rst @@ -0,0 +1,39 @@ +8kHz Models +=========== + +For applications based on telephony speech, using models trained on narrowband audio data sampled at 8 kHz may perform better than using models built with +audio at a higher frequency (Note that to use models with audio at a different sample rate from your data, you would need to resample your data to match the sampling rate in the +config file of the model). One approach to create large datasets for training a model suitable for your application would be to convert all audio data +to the formats prevalent in your application. Here we detail one such approach that we took to train a model based on 8 kHz data. + +To train a model suitable for recognizing telephony speech we converted some of the datasets to G.711 :cite:`8kHz-mod-itu1988g711`. G.711 is a popular speech codec used in VoIP products and encodes speech +at 64 kbps using PCM u-law companding. We converted audio from LibriSpeech, Mozilla Common Voice and WSJ datasets to G.711 format and combined Fisher and Switchboard datasets to +train a :ref:`Quartznet15x5 ` model with about 4000 hours of data. To convert your audio to G.711 format you can use the script `convert_wav_to_g711wav.py` found in the `scripts` sub-directory of the nemo base directory. + +Among the experiments that we ran, we got the best accuracy for a model that used our 16 kHz Quartznet15x5 model's weights as pre-trained weights. We then +trained the model for 250 epochs with five datasets mentioned above. Here are some results for our best model so far (note that all the test sets +were converted to G.711 format for the results below): + +====================== ===================== +Test set WER (%) +====================== ===================== +LibriSpeech dev-clean 4.35 +LibriSpeech dev-other 11.89 +LibriSpeech test-clean 4.45 +LibriSpeech test-other 12.02 +Switchboard test 10.74 +Switchboard dev 10.59 +====================== ===================== + +The model was first pretrained with 8 kHz LibriSpeech data for 134 epochs and then trained for another 250 epochs using G.711 audio from all the five datasets listed above. For best accuracy +in your application, you may choose to :ref:`fine-tune ` this model using data collected from your application. + +.. + The pre-trained model is available for download `here `_. + +References +---------- +.. bibliography:: asr_all.bib + :style: plain + :labelprefix: 8kHz-mod + :keyprefix: 8kHz-mod- diff --git a/docs/sources/source/asr/asr_all.bib b/docs/sources/source/asr/asr_all.bib index 3cdd9c68f9d2..5eb0704b073f 100644 --- a/docs/sources/source/asr/asr_all.bib +++ b/docs/sources/source/asr/asr_all.bib @@ -60,8 +60,6 @@ @misc{ardila2019common primaryClass={cs.CL} } - - @article{graves2012, title={Sequence Transduction with Recurrent Neural Networks}, author={Graves, Alex}, @@ -927,8 +925,14 @@ @article{novograd2019 } @article{kriman2019quartznet, - title={Quartznet: Deep automatic speech recognition with 1d time-channel separable convolutions}, + title={Quartznet: {Deep} automatic speech recognition with 1d time-channel separable convolutions}, author={Kriman, Samuel and Beliaev, Stanislav and Ginsburg, Boris and Huang, Jocelyn and Kuchaiev, Oleksii and Lavrukhin, Vitaly and Leary, Ryan and Li, Jason and Zhang, Yang}, journal={arXiv preprint arXiv:1910.10261}, year={2019} -} \ No newline at end of file +} + +@misc{itu1988g711, + title={{ITU-T} {G.711} - {Pulse} code modulation ({PCM}) of voice frequencies}, + author={ITU-T Geneva Switzerland}, + year={1988}, +} diff --git a/docs/sources/source/asr/intro.rst b/docs/sources/source/asr/intro.rst index f8aac81833e9..cfa5d1e16919 100644 --- a/docs/sources/source/asr/intro.rst +++ b/docs/sources/source/asr/intro.rst @@ -10,6 +10,8 @@ Speech Recognition tutorial datasets models + 8kHz_models + diff --git a/docs/sources/source/asr/jasper.rst b/docs/sources/source/asr/jasper.rst index dc136cbb5f19..ec98d88ae0f1 100644 --- a/docs/sources/source/asr/jasper.rst +++ b/docs/sources/source/asr/jasper.rst @@ -23,3 +23,10 @@ Jasper10x5dr | Librispeech, `here `__ ============= ======================= ================================================================================= + +References +^^^^^^^^^^ +.. bibliography:: asr_all.bib + :style: plain + :labelprefix: ASR-MODELS + :keyprefix: asr-models- \ No newline at end of file diff --git a/docs/sources/source/asr/models.rst b/docs/sources/source/asr/models.rst index 57f529bc5298..66b5249af508 100644 --- a/docs/sources/source/asr/models.rst +++ b/docs/sources/source/asr/models.rst @@ -7,10 +7,3 @@ Models jasper quartznet -References -------------- - -.. bibliography:: asr_all.bib - :style: plain - :labelprefix: ASR-MODELS - :keyprefix: asr-models- \ No newline at end of file diff --git a/docs/sources/source/asr/quartznet.rst b/docs/sources/source/asr/quartznet.rst index 6dbadab71907..58e38ad1cc44 100644 --- a/docs/sources/source/asr/quartznet.rst +++ b/docs/sources/source/asr/quartznet.rst @@ -1,7 +1,9 @@ +.. _Quartznet_model: + QuartzNet --------- -QuartzNet is a version of Jasper :cite:`asr-models-li2019jasper` model with separable convolutions and larger filters. It can achieve performance +QuartzNet :cite:`qtz-models-kriman2019quartznet` is a version of Jasper :cite:`qtz-models-li2019jasper` model with separable convolutions and larger filters. It can achieve performance similar to Jasper but with an order of magnitude less parameters. Similarly to Jasper, QuartzNet family of models are denoted as QuartzNet_[BxR] where B is the number of blocks, and R - the number of convolutional sub-blocks within a block. Each sub-block contains a 1-D *separable* convolution, batch normalization, ReLU, and dropout: @@ -9,11 +11,9 @@ Similarly to Jasper, QuartzNet family of models are denoted as QuartzNet_[BxR] w :align: center :alt: quartznet model - .. note:: This checkpoint was trained on LibriSpeech :cite:`panayotov2015librispeech` and full "validated" part of En Mozilla Common Voice :cite:`ardila2019common` - `QuartzNet paper `_. -Pretrained models can be found, `here `_. +Pretrained models can be found at the following links: ============= ===================== ============================================================================== Network Dataset Download Link @@ -24,7 +24,10 @@ QuartzNet15x5 Aishell2 `here `_. These QuartzNet models were trained for 200 epochs using mixed precision on 2 GPUs with a batch size of 128 over 200 epochs. @@ -32,7 +30,7 @@ QuartzNet3x2 (93k params) Speech Commands V2 97.29% Test References ----------- +^^^^^^^^^^ .. bibliography:: speech_recognition_all.bib :style: plain diff --git a/docs/sources/source/speech_command/speech_recognition_all.bib b/docs/sources/source/speech_command/speech_recognition_all.bib index 277e56e7ec9b..a358cf2a70c9 100644 --- a/docs/sources/source/speech_command/speech_recognition_all.bib +++ b/docs/sources/source/speech_command/speech_recognition_all.bib @@ -40,4 +40,11 @@ @article{park2019 year = "2019", eid = {arXiv:1904.08779}, eprint = {1904.08779}, +} + +@article{li2019jasper, + title={Jasper: An End-to-End Convolutional Neural Acoustic Model}, + author={Li, Jason and Lavrukhin, Vitaly and Ginsburg, Boris and Leary, Ryan and Kuchaiev, Oleksii and Cohen, Jonathan M and Nguyen, Huyen and Gadde, Ravi Teja}, + journal={arXiv preprint arXiv:1904.03288}, + year={2019} } \ No newline at end of file diff --git a/docs/sources/source/speech_command/tutorial.rst b/docs/sources/source/speech_command/tutorial.rst index e5c59970b5ed..bd0aa38814bb 100644 --- a/docs/sources/source/speech_command/tutorial.rst +++ b/docs/sources/source/speech_command/tutorial.rst @@ -111,7 +111,7 @@ The script below does both training and evaluation (on V1 dataset) on single GPU process_classification_evaluation_epoch, ) - logging = nemo.logging + from nemo.utils import logging # Lets define some hyper parameters lr = 0.05 @@ -447,7 +447,7 @@ but they can similarly be used for v2 dataset. import nemo import nemo.collections.asr as nemo_asr - logging = nemo.logging + from nemo.utils import logging # We add some data_dir = '' diff --git a/docs/sources/source/tutorials/intro.rst b/docs/sources/source/tutorials/intro.rst index 3d4177e4a153..be51895dffc0 100644 --- a/docs/sources/source/tutorials/intro.rst +++ b/docs/sources/source/tutorials/intro.rst @@ -12,3 +12,4 @@ Getting started weightsharing callbacks complex_training + neural_graphs diff --git a/docs/sources/source/tutorials/neural_graphs.rst b/docs/sources/source/tutorials/neural_graphs.rst new file mode 100644 index 000000000000..e8f363457237 --- /dev/null +++ b/docs/sources/source/tutorials/neural_graphs.rst @@ -0,0 +1,54 @@ +Neural Graphs +============= + +The Neural Graph is a high-level abstract concept empowering the user to build graphs consisting of many, +interconnected Neural Modules. +Once the user defines a graph, its topology is “frozen”, i.e. connections between modules cannot change. +If a user wants to change the topology - he/she can build another graph, potentially spanned over the same modules. +At the same time, he can reuse and nest one graph into another. + + +.. figure:: neural_graphs_general.png + +The import/export/save/restore options combined with the lightweight API make Neural Graphs +a perfect tool for rapid prototyping and experimentation. + +There are two Jupyter Notebook tutorials focusing on different aspects of the Neural Graphs functionality. + +Tutorial I: The basic functionality +----------------------------------- + +In this first part of the Neural Graphs (NGs) tutorial we will focus on a simple example: +training TaylorNet module to approximate a sine wave function. +We will build a simple "model graph" and show how we can nest it into another graphs. + + +.. figure:: neural_graphs_nesting.png + +This part covers the following: + * how to create a Neural Graph object + * how to activate/deactivate graph context (in various ways) + * how to bind NG inputs and outpus (in various ways) + * how to nest one graph (representing the our "trainable model") into training and validation graphs + + +Tutorial II: The advanced functionality +--------------------------------------- + +In this first part of the Neural Graphs (NGs) tutorial we will focus on a more complex example: +training of an End-to-End Convolutional Neural Acoustic Model called JASPER. +We will build a "model graph" and show how we can nest it into another graphs, how we can freeze/unfreeze modules, +use graph configuration and save/load graph checkpoints. + +This part covers the following: + * how to nest one graph into another + * how to serialize and deserialize a graph + * how to export and import serialized graph configuration to/from YAML files + * how to save and load graph checkpoints (containing weights of the Trainable NMs) + * how to freeze/unfreeze modules in a graph + +Additionally, we will show how use `AppState` to list all the modules and graphs we have created in the scope of +our application. + +.. note:: + Both tutorial notebooks can be found in the `nemo/examples/neural_graphs` folder. diff --git a/docs/sources/source/tutorials/neural_graphs_general.png b/docs/sources/source/tutorials/neural_graphs_general.png new file mode 100644 index 000000000000..996e3db26e3d Binary files /dev/null and b/docs/sources/source/tutorials/neural_graphs_general.png differ diff --git a/docs/sources/source/tutorials/neural_graphs_nesting.png b/docs/sources/source/tutorials/neural_graphs_nesting.png new file mode 100644 index 000000000000..c411587714b8 Binary files /dev/null and b/docs/sources/source/tutorials/neural_graphs_nesting.png differ diff --git a/examples/applications/asr_service/app/__init__.py b/examples/applications/asr_service/app/__init__.py index a31e50d7ef94..f5da84fa3f61 100644 --- a/examples/applications/asr_service/app/__init__.py +++ b/examples/applications/asr_service/app/__init__.py @@ -7,8 +7,7 @@ import nemo import nemo.collections.asr as nemo_asr - -logging = nemo.logging +from nemo.utils import logging app = Flask(__name__) # make sure WORK_DIR exists before calling your service diff --git a/examples/applications/asr_service/app/routes.py b/examples/applications/asr_service/app/routes.py index ccc7dbc20cce..7bd636a9b39f 100644 --- a/examples/applications/asr_service/app/routes.py +++ b/examples/applications/asr_service/app/routes.py @@ -17,10 +17,8 @@ from flask import request from werkzeug.utils import secure_filename -import nemo import nemo.collections.asr as nemo_asr - -logging = nemo.logging +from nemo.utils import logging try: from app import beam_search_with_lm diff --git a/examples/asr/configs/quartznet15x5_8kHz.yaml b/examples/asr/configs/quartznet15x5_8kHz.yaml new file mode 100644 index 000000000000..3bbe1019e460 --- /dev/null +++ b/examples/asr/configs/quartznet15x5_8kHz.yaml @@ -0,0 +1,198 @@ +model: "QuartzNet" +sample_rate: 8000 + +AudioToTextDataLayer: + max_duration: 16.7 + trim_silence: true + + train: + shuffle: true + + eval: + shuffle: false + max_duration: null + +AudioToMelSpectrogramPreprocessor: + window_size: 0.02 + window_stride: 0.01 + window: "hann" + normalize: "per_feature" + n_fft: 512 + features: 64 + dither: 0.00001 + pad_to: 16 + stft_conv: true + +SpectrogramAugmentation: + rect_masks: 5 + rect_time: 120 + rect_freq: 50 + +JasperEncoder: + activation: "relu" + conv_mask: true + + jasper: + - filters: 256 + repeat: 1 + kernel: [33] + stride: [2] + dilation: [1] + dropout: 0.0 + residual: false + separable: true + + - filters: 256 + repeat: 5 + kernel: [33] + stride: [1] + dilation: [1] + dropout: 0.0 + residual: true + separable: true + + - filters: 256 + repeat: 5 + kernel: [33] + stride: [1] + dilation: [1] + dropout: 0.0 + residual: true + separable: true + + - filters: 256 + repeat: 5 + kernel: [33] + stride: [1] + dilation: [1] + dropout: 0.0 + residual: true + separable: true + + - filters: 256 + repeat: 5 + kernel: [39] + stride: [1] + dilation: [1] + dropout: 0.0 + residual: true + separable: true + + - filters: 256 + repeat: 5 + kernel: [39] + stride: [1] + dilation: [1] + dropout: 0.0 + residual: true + separable: true + + - filters: 256 + repeat: 5 + kernel: [39] + stride: [1] + dilation: [1] + dropout: 0.0 + residual: true + separable: true + + - filters: 512 + repeat: 5 + kernel: [51] + stride: [1] + dilation: [1] + dropout: 0.0 + residual: true + separable: true + + - filters: 512 + repeat: 5 + kernel: [51] + stride: [1] + dilation: [1] + dropout: 0.0 + residual: true + separable: true + + - filters: 512 + repeat: 5 + kernel: [51] + stride: [1] + dilation: [1] + dropout: 0.0 + residual: true + separable: true + + - filters: 512 + repeat: 5 + kernel: [63] + stride: [1] + dilation: [1] + dropout: 0.0 + residual: true + separable: true + + - filters: 512 + repeat: 5 + kernel: [63] + stride: [1] + dilation: [1] + dropout: 0.0 + residual: true + separable: true + + - filters: 512 + repeat: 5 + kernel: [63] + stride: [1] + dilation: [1] + dropout: 0.0 + residual: true + separable: true + + - filters: 512 + repeat: 5 + kernel: [75] + stride: [1] + dilation: [1] + dropout: 0.0 + residual: true + separable: true + + - filters: 512 + repeat: 5 + kernel: [75] + stride: [1] + dilation: [1] + dropout: 0.0 + residual: true + separable: true + + - filters: 512 + repeat: 5 + kernel: [75] + stride: [1] + dilation: [1] + dropout: 0.0 + residual: true + separable: true + + - filters: 512 + repeat: 1 + kernel: [87] + stride: [1] + dilation: [2] + dropout: 0.0 + residual: false + separable: true + + - filters: 1024 + repeat: 1 + kernel: [1] + stride: [1] + dilation: [1] + dropout: 0.0 + residual: false + +labels: [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'"] diff --git a/examples/asr/contextnet.py b/examples/asr/contextnet.py index 6e6845142d8f..2857bb7f0b44 100644 --- a/examples/asr/contextnet.py +++ b/examples/asr/contextnet.py @@ -23,10 +23,9 @@ import nemo.collections.asr as nemo_asr import nemo.utils.argparse as nm_argparse from nemo.collections.asr.helpers import monitor_asr_train_progress, process_evaluation_batch, process_evaluation_epoch +from nemo.utils import logging from nemo.utils.lr_policies import CosineAnnealing -logging = nemo.logging - def parse_args(): parser = argparse.ArgumentParser( diff --git a/examples/asr/jasper.py b/examples/asr/jasper.py index 4ac0b05c8f7e..10b4d5d47f5e 100644 --- a/examples/asr/jasper.py +++ b/examples/asr/jasper.py @@ -11,10 +11,9 @@ import nemo.collections.asr as nemo_asr import nemo.utils.argparse as nm_argparse from nemo.collections.asr.helpers import monitor_asr_train_progress, process_evaluation_batch, process_evaluation_epoch +from nemo.utils import logging from nemo.utils.lr_policies import CosineAnnealing -logging = nemo.logging - def parse_args(): parser = argparse.ArgumentParser( diff --git a/examples/asr/jasper_aishell.py b/examples/asr/jasper_aishell.py index baed99786114..0ee584507909 100644 --- a/examples/asr/jasper_aishell.py +++ b/examples/asr/jasper_aishell.py @@ -10,10 +10,9 @@ import nemo.collections.asr as nemo_asr import nemo.utils.argparse as nm_argparse from nemo.collections.asr.helpers import monitor_asr_train_progress, process_evaluation_batch, process_evaluation_epoch +from nemo.utils import logging from nemo.utils.lr_policies import SquareAnnealing -logging = nemo.logging - def parse_args(): parser = argparse.ArgumentParser( diff --git a/examples/asr/jasper_aishell_infer.py b/examples/asr/jasper_aishell_infer.py index 048e6ee160be..1e44b8527e5f 100644 --- a/examples/asr/jasper_aishell_infer.py +++ b/examples/asr/jasper_aishell_infer.py @@ -9,8 +9,7 @@ import nemo import nemo.collections.asr as nemo_asr from nemo.collections.asr.helpers import post_process_predictions, post_process_transcripts, word_error_rate - -logging = nemo.logging +from nemo.utils import logging def load_vocab(vocab_file): diff --git a/examples/asr/jasper_an4.py b/examples/asr/jasper_an4.py index 9ac79f3d1935..6d3f6c82b24f 100644 --- a/examples/asr/jasper_an4.py +++ b/examples/asr/jasper_an4.py @@ -17,10 +17,9 @@ process_evaluation_epoch, word_error_rate, ) +from nemo.utils import logging from nemo.utils.lr_policies import CosineAnnealing -logging = nemo.logging - def create_dags(model_config_file, vocab, args, nf): diff --git a/examples/asr/jasper_eval.py b/examples/asr/jasper_eval.py index 8b37cd974e05..5ef4d4c51149 100644 --- a/examples/asr/jasper_eval.py +++ b/examples/asr/jasper_eval.py @@ -23,8 +23,7 @@ import nemo import nemo.collections.asr as nemo_asr from nemo.collections.asr.helpers import post_process_predictions, post_process_transcripts, word_error_rate - -logging = nemo.logging +from nemo.utils import logging def main(): diff --git a/examples/asr/notebooks/3_Speech_Commands_using_NeMo.ipynb b/examples/asr/notebooks/3_Speech_Commands_using_NeMo.ipynb index b7c2fc8416c1..36a9834ee800 100644 --- a/examples/asr/notebooks/3_Speech_Commands_using_NeMo.ipynb +++ b/examples/asr/notebooks/3_Speech_Commands_using_NeMo.ipynb @@ -254,7 +254,7 @@ ")\n", "from nemo.collections.asr.metrics import classification_accuracy\n", "\n", - "logging = nemo.logging" + "from nemo.utils import logging" ] }, { diff --git a/examples/asr/notebooks/4_Online_Data_Augmentation.ipynb b/examples/asr/notebooks/4_Online_Data_Augmentation.ipynb index ddffabfb270c..edbfa3e271b8 100644 --- a/examples/asr/notebooks/4_Online_Data_Augmentation.ipynb +++ b/examples/asr/notebooks/4_Online_Data_Augmentation.ipynb @@ -836,7 +836,7 @@ ")\n", "from nemo.collections.asr.metrics import classification_accuracy\n", "\n", - "logging = nemo.logging" + "from nemo.utils import logging" ] }, { diff --git a/examples/asr/quartznet.py b/examples/asr/quartznet.py index aee030010ae6..9dbea554c78d 100644 --- a/examples/asr/quartznet.py +++ b/examples/asr/quartznet.py @@ -10,10 +10,9 @@ import nemo.collections.asr as nemo_asr import nemo.utils.argparse as nm_argparse from nemo.collections.asr.helpers import monitor_asr_train_progress, process_evaluation_batch, process_evaluation_epoch +from nemo.utils import logging from nemo.utils.lr_policies import CosineAnnealing -logging = nemo.logging - def parse_args(): parser = argparse.ArgumentParser( diff --git a/examples/asr/quartznet_speech_commands.py b/examples/asr/quartznet_speech_commands.py index 13cab6b9951d..7bcb9058974a 100644 --- a/examples/asr/quartznet_speech_commands.py +++ b/examples/asr/quartznet_speech_commands.py @@ -17,10 +17,9 @@ process_classification_evaluation_batch, process_classification_evaluation_epoch, ) +from nemo.utils import logging from nemo.utils.lr_policies import CosineAnnealing, PolynomialDecayAnnealing, PolynomialHoldDecayAnnealing -logging = nemo.logging - def parse_args(): parser = argparse.ArgumentParser( diff --git a/examples/image/gan.py b/examples/image/gan.py index 28cac4cba43c..42ed3cb0ac90 100644 --- a/examples/image/gan.py +++ b/examples/image/gan.py @@ -9,9 +9,7 @@ import nemo import nemo.collections.simple_gan as nemo_simple_gan from nemo.backends.pytorch.torchvision.helpers import compute_accuracy, eval_epochs_done_callback, eval_iter_callback - -logging = nemo.logging - +from nemo.utils import logging parser = argparse.ArgumentParser(description='MNIST') parser.add_argument("--local_rank", default=None, type=int) diff --git a/examples/image/transfer_learning.py b/examples/image/transfer_learning.py index bb3d54fe837c..206104d2404f 100644 --- a/examples/image/transfer_learning.py +++ b/examples/image/transfer_learning.py @@ -8,8 +8,7 @@ import nemo from nemo.backends.pytorch.torchvision.helpers import compute_accuracy, eval_epochs_done_callback, eval_iter_callback - -logging = nemo.logging +from nemo.utils import logging sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) diff --git a/examples/neural_graphs/img/neural_graphs_general.png b/examples/neural_graphs/img/neural_graphs_general.png new file mode 100644 index 000000000000..996e3db26e3d Binary files /dev/null and b/examples/neural_graphs/img/neural_graphs_general.png differ diff --git a/examples/neural_graphs/img/neural_graphs_nesting.png b/examples/neural_graphs/img/neural_graphs_nesting.png new file mode 100644 index 000000000000..c411587714b8 Binary files /dev/null and b/examples/neural_graphs/img/neural_graphs_nesting.png differ diff --git a/examples/neural_graphs/neural_graph_advanced.ipynb b/examples/neural_graphs/neural_graph_advanced.ipynb new file mode 100644 index 000000000000..fd8a0b955dc9 --- /dev/null +++ b/examples/neural_graphs/neural_graph_advanced.ipynb @@ -0,0 +1,379 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# =============================================================================\n", + "# Copyright (c) 2020 NVIDIA. All Rights Reserved.\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# http://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License.\n", + "# =============================================================================\n", + "\n", + "from functools import partial\n", + "from os.path import expanduser, join, abspath, dirname, exists\n", + "import tarfile\n", + "\n", + "from ruamel.yaml import YAML\n", + "\n", + "import nemo\n", + "import nemo.collections.asr as nemo_asr\n", + "from nemo.collections.asr.helpers import monitor_asr_train_progress\n", + "from nemo.core import NeuralGraph, OperationMode, DeviceType, SimpleLossLoggerCallback\n", + "from nemo.utils import logging\n", + "from nemo.utils.app_state import AppState\n", + "\n", + "# Create Neural(Module)Factory, use CPU.\n", + "nf = nemo.core.NeuralModuleFactory(placement=DeviceType.CPU)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tutorial II: The advanced functionality\n", + "\n", + "In this first part of the Neural Graphs (NGs) tutorial we will focus on a more complex example: training of an End-to-End Convolutional Neural Acoustic Model called JASPER. We will build a \"model graph\" and show how we can nest it into another graphs, how we can freeze/unfreeze modules, use graph configuration and save/load graph checkpoints.\n", + "\n", + "#### This part covers the following:\n", + " * how to nest one graph into another\n", + " * how to serialize and deserialize a graph\n", + " * how to export and import serialized graph configuration to/from YAML files\n", + " * how to save and load graph checkpoints (containing weights of the Trainable NMs)\n", + " * how to freeze/unfreeze modules in a graph\n", + " \n", + "Additionally, we will show how use `AppState` to list all the modules and graphs we have created in the scope of our application.\n", + "In order to learn more about graph nesting and input/output binding please refer to the first part of the tutorial.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Prepare the samples for training JASPER - we will use the data available in NeMo tests.\n", + "data_folder = abspath(\"../../tests/data/\")\n", + "logging.info(\"Looking up for test ASR data\")\n", + "if not exists(join(data_folder, \"asr\")):\n", + " logging.info(\"Extracting ASR data to: {0}\".format(join(data_folder, \"asr\")))\n", + " tar = tarfile.open(join(data_folder, \"asr.tar.gz\"), \"r:gz\")\n", + " tar.extractall(path=data_folder)\n", + " tar.close()\n", + "else:\n", + " logging.info(\"ASR data found in: {0}\".format(join(data_folder, \"asr\")))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Set paths to model configuration, manifest and sample files.\n", + "model_config_file = abspath(\"../asr/configs/jasper_an4.yaml\")\n", + "manifest_path = join(data_folder, 'asr/tarred_an4/tarred_audio_manifest.json')\n", + "tarpath = join(data_folder, 'asr/tarred_an4/audio_1.tar')\n", + "\n", + "# Open the model config file and get vocabulary.\n", + "yaml = YAML(typ=\"safe\")\n", + "with open(expanduser(model_config_file)) as f:\n", + " config = yaml.load(f)\n", + " \n", + "# Get labels (vocabulary).\n", + "vocab = config['labels']\n", + "vocab_len = len(vocab)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Instantiate DataLayer that can load the tarred samples.\n", + "data_layer = nemo_asr.TarredAudioToTextDataLayer(\n", + " audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, labels=vocab, batch_size=16)\n", + "logging.info(\"Loaded {} samples that we will use for training\".format(len(data_layer)))\n", + "\n", + "# Create rest of the modules using the Neural Module deserialization feature.\n", + "data_preprocessor = nemo_asr.AudioToMelSpectrogramPreprocessor.deserialize(config[\"AudioToMelSpectrogramPreprocessor\"])\n", + "\n", + "jasper_encoder = nemo_asr.JasperEncoder.deserialize(config[\"JasperEncoder\"])\n", + "jasper_decoder = nemo_asr.JasperDecoderForCTC.deserialize(\n", + " config[\"JasperDecoderForCTC\"], overwrite_params={\"num_classes\": vocab_len}\n", + ")\n", + "ctc_loss = nemo_asr.CTCLossNM(num_classes=vocab_len)\n", + "greedy_decoder = nemo_asr.GreedyCTCDecoder()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create the Jasper \"model\" graph.\n", + "with NeuralGraph(operation_mode=OperationMode.both, name=\"jasper_model\") as jasper_model:\n", + " # Copy one input port definitions - using \"user\" port names.\n", + " jasper_model.inputs[\"input\"] = data_preprocessor.input_ports[\"input_signal\"]\n", + " # Bind selected inputs - bind other using the default port name.\n", + " i_processed_signal, i_processed_signal_len = data_preprocessor(input_signal=jasper_model.inputs[\"input\"], length=jasper_model)\n", + " i_encoded, i_encoded_len = jasper_encoder(audio_signal=i_processed_signal, length=i_processed_signal_len)\n", + " i_log_probs = jasper_decoder(encoder_output=i_encoded)\n", + " # Bind selected outputs - using \"user\" port names.\n", + " jasper_model.outputs[\"log_probs\"] = i_log_probs\n", + " jasper_model.outputs[\"encoded_len\"] = i_encoded_len\n", + "\n", + "# Print the summary.\n", + "logging.info(jasper_model.summary())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Serialize the whole graph.\n", + "serialized_jasper = jasper_model.serialize()\n", + "logging.info(\"Serialized JASPER model:\\n {}\".format(serialized_jasper))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# You can also serialize/deserialize a single NeuralModule, e.g. a decoder.\n", + "logging.info(\"Serialized JASPER Decoder:\\n {}\".format(jasper_decoder.serialize()))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# We can also export the serialized configuration to a file.\n", + "jasper_model.export_to_config(\"my_jasper.yml\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Display the lists of graph and modules.\n", + "logging.info(AppState().graphs.summary())\n", + "logging.info(AppState().modules.summary())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Deserialize graph - create a copy of the JASPER \"model\".\n", + "# Please note that the modules exist, so we must enable the graph to \"reuse\" them.\n", + "# (Commenting out reuse_existing_modules will raise a KeyError.)\n", + "jasper_copy = NeuralGraph.deserialize(serialized_jasper, reuse_existing_modules=True)\n", + "serialized_jasper_copy = jasper_copy.serialize()\n", + "assert serialized_jasper == serialized_jasper_copy # THE SAME! Please note name of the graph is not exported." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Alternativelly, import a copy of the JASPER \"model\" from config.\n", + "jasper_copy = NeuralGraph.import_from_config(\"my_jasper.yml\", reuse_existing_modules=True, name=\"jasper_copy\")\n", + "\n", + "# Print the summary.\n", + "logging.info(jasper_copy.summary())\n", + "\n", + "# Display list of graph and modules\n", + "logging.info(AppState().graphs.summary())\n", + "logging.info(AppState().modules.summary())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that there are two graphs in the \"Graph Registry\", yet the list of modules haven't changed. This means that both graphs are spanned on the same list of modules." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create the \"training\" graph.\n", + "with NeuralGraph(operation_mode=OperationMode.training) as training_graph:\n", + " # Create the \"implicit\" training graph.\n", + " o_audio_signal, o_audio_signal_len, o_transcript, o_transcript_len = data_layer()\n", + " # Use Jasper module as any other neural module.\n", + " o_log_probs, o_encoded_len = jasper_copy(input=o_audio_signal, length=o_audio_signal_len)\n", + " o_predictions = greedy_decoder(log_probs=o_log_probs)\n", + " o_loss = ctc_loss(\n", + " log_probs=o_log_probs, targets=o_transcript, input_length=o_encoded_len, target_length=o_transcript_len\n", + " )\n", + " # Set the graph output.\n", + " training_graph.outputs[\"o_loss\"] = o_loss\n", + "\n", + "# Print the summary.\n", + "logging.info(training_graph.summary())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create a simple loss callback.\n", + "loss_callback = nemo.core.SimpleLossLoggerCallback(\n", + " tensors=[training_graph.output_tensors[\"o_loss\"]],\n", + " print_func=lambda x: logging.info(f'Train Loss: {str(x[0].item())}'), step_freq=1\n", + ")\n", + "# Train the graph.\n", + "nf.train(\n", + " training_graph=training_graph,\n", + " optimizer=\"novograd\",\n", + " callbacks=[loss_callback],\n", + " optimization_params={\"max_steps\": 5, \"lr\": 0.01},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Please note that the loss is going down. Still, we use only 65 samples, so we cannot really expect the model to be useful;)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Finally, I can save the graph checkpoint!\n", + "# Note that optionally you can indicate the names of the modules to be saved.\n", + "jasper_copy.save_to(\"my_jasper.chkpt\")#, module_names=[\"jasperencoder0\"])\n", + "# Please note only \"trainable\" modules will be saved." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# We can also save the whole training graph - which in this case will result in the same checkpoint...\n", + "training_graph.export_to_config(\"my_whole_graph.yml\")\n", + "training_graph.save_to(\"my_whole_graph.chkpt\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Finally, I can load everything and continue training.\n", + "new_training_graph = NeuralGraph.import_from_config(\"my_whole_graph.yml\", reuse_existing_modules=True)\n", + "\n", + "# Let's restore only the encoder\n", + "new_training_graph.restore_from(\"my_whole_graph.chkpt\", module_names=[\"jasperencoder0\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# So let us freeze the whole graph...\n", + "training_graph.freeze() #we can also freeze a subset, using \"module_names=[]\"\"\n", + "# ... and finetune only the decoder.\n", + "training_graph.unfreeze(module_names=[\"jasperdecoderforctc0\"])\n", + "\n", + "# Ok, let us see what the graph looks like now.\n", + "logging.info(training_graph.summary())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# Create a new simple callback using graph outputs \"o_loss\".\n", + "loss_callback = nemo.core.SimpleLossLoggerCallback(\n", + " tensors=[new_training_graph.output_tensors[\"o_loss\"]],\n", + " print_func=lambda x: logging.info(f'Train Loss: {str(x[0].item())}'), step_freq=1\n", + ")\n", + "\n", + "# And continue training...\n", + "nf.reset_trainer()\n", + "nf.train(\n", + " training_graph=new_training_graph,\n", + " optimizer=\"novograd\",\n", + " callbacks=[loss_callback],\n", + " optimization_params={\"max_steps\": 5, \"lr\": 0.01},\n", + ")\n", + "# Please note that this will throw an error if you will freeze all the trainable modules!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "nemo-env", + "language": "python", + "name": "nemo-env" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/neural_graphs/neural_graph_basic.ipynb b/examples/neural_graphs/neural_graph_basic.ipynb new file mode 100644 index 000000000000..8c90654c7723 --- /dev/null +++ b/examples/neural_graphs/neural_graph_basic.ipynb @@ -0,0 +1,296 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# =============================================================================\n", + "# Copyright (c) 2020 NVIDIA. All Rights Reserved.\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# http://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License.\n", + "# =============================================================================\n", + "\n", + "import torch\n", + "\n", + "from nemo.backends.pytorch.tutorials import MSELoss, RealFunctionDataLayer, TaylorNet\n", + "from nemo.core import (\n", + " DeviceType,\n", + " EvaluatorCallback,\n", + " NeuralGraph,\n", + " NeuralModuleFactory,\n", + " OperationMode,\n", + " SimpleLossLoggerCallback,\n", + ")\n", + "from nemo.utils import logging\n", + "from nemo.utils.app_state import AppState\n", + "\n", + "# Create Neural(Module)Factory, use CPU.\n", + "nf = NeuralModuleFactory(placement=DeviceType.CPU)" + ] + }, + { + "attachments": { + "neural_graphs_general.png": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA5wAAAHPCAYAAAA/N9WhAAAKx2lDQ1BJQ0MgUHJvZmlsZQAASImVlwdYU1kWgO97L73QAhGQEnpHepUSeiiCdLARkkBCiTEhqNgQEUdwLKiIoCLgoICCYwFkLIgF2yCoiH2CDCrqOFjAgso+YAkzu9/ufnved3L/d3LuOefe7958JwBQCWyRKANWAiBTmCWODPRhxCckMvC/AwSQgAogADybIxExIyJCASpT499l5C6AxsfbVuOx/v37/yrKXJ6EAwAUgXIyV8LJRPkEqm84InEWAMgB1G6wNEs0zldQVhWjBaL8aJxTJ3lonJMnGIOZ8ImO9EVZHQAChc0WpwJAMUTtjGxOKhqH4oeyjZArEKKMvgNPDp/NRRnNCywzMxePswxl0+S/xEn9W8xkeUw2O1XOk2uZEIKfQCLKYC//P7fjf0tmhnQqhzGqFL44KBId6eie3UtfHCJnYfKc8CkWcCf8J5gvDYqZYo7EN3GKuWy/EPncjDmhU5wiCGDJ42SxoqeYJ/GPmmLx4kh5rhSxL3OK2eLpvNL0GLmdz2PJ4+fwo+OmOFsQO2eKJelRIdM+vnK7WBopr58nDPSZzhsgX3um5C/rFbDkc7P40UHytbOn6+cJmdMxJfHy2rg8P/9pnxi5vyjLR55LlBEh9+dlBMrtkuwo+dws9EBOz42Q72EaOzhiioEf8Aeh6MMAMcAOOABb9DMMgCzesvEzCnwXi5aLBan8LAYTvWU8BkvIsbZk2NnY2QAwfmcnj8T7exN3EaITpm0iNL4reuaR6mlbsiYAzeg50iBO2wwPAqAYD0BTLkcqzp60jV8ngEV/CxSBKtAAOsAAmAIrtDIn4A680YqDQTiIBglgIeAAPsgEYrAUrARrQQEoAlvBTlAGKkA1OASOgGOgGZwG58FlcB10gR7wEMjAAHgFhsAIGIUgCA9RIRqkAelCRpAFZAe5QJ6QPxQKRUIJUBKUCgkhKbQSWgcVQcVQGVQJ1UI/Q6eg89BVqBu6D/VBg9A76AuMwBRYFdaGjeFZsAvMhEPgaHgBnAovgXPgfHgzXApXwYfhJvg8fB3ugWXwK3gYAQgZoSN6iBXigvgi4UgikoKIkdVIIVKCVCENSCvSgdxGZMhr5DMGh6FhGBgrjDsmCBOD4WCWYFZjNmHKMIcwTZiLmNuYPswQ5juWitXCWmDdsCxsPDYVuxRbgC3B1mBPYi9he7AD2BEcDkfHmeCccUG4BFwabgVuE24vrhHXhuvG9eOG8Xi8Bt4C74EPx7PxWfgC/G78Yfw5/C38AP4TgUzQJdgRAgiJBCEhj1BCqCOcJdwiPCeMEpWIRkQ3YjiRS1xO3EI8QGwl3iQOEEdJyiQTkgcpmpRGWksqJTWQLpEekd6TyWR9sit5LllAziWXko+Sr5D7yJ8pKhRzii9lPkVK2Uw5SGmj3Ke8p1KpxlRvaiI1i7qZWku9QH1C/aRAU7BWYClwFdYolCs0KdxSeKNIVDRSZCouVMxRLFE8rnhT8bUSUclYyVeJrbRaqVzplFKv0rAyTdlWOVw5U3mTcp3yVeUXKngVYxV/Fa5Kvkq1ygWVfhpCM6D50ji0dbQDtEu0AVWcqokqSzVNtUj1iGqn6pCaipqDWqzaMrVytTNqMjpCN6az6Bn0LfRj9Lv0LzO0ZzBn8GZsnNEw49aMj+oz1b3VeeqF6o3qPepfNBga/hrpGts0mjUea2I0zTXnai7V3Kd5SfP1TNWZ7jM5MwtnHpv5QAvWMteK1FqhVa11Q2tYW0c7UFukvVv7gvZrHbqOt06azg6dszqDujRdT12B7g7dc7ovGWoMJiODUcq4yBjS09IL0pPqVep16o3qm+jH6OfpN+o/NiAZuBikGOwwaDcYMtQ1DDNcaVhv+MCIaORixDfaZdRh9NHYxDjOeINxs/ELE3UTlkmOSb3JI1OqqZfpEtMq0ztmODMXs3SzvWZd5rC5oznfvNz8pgVs4WQhsNhr0W2JtXS1FFpWWfZaUayYVtlW9VZ91nTrUOs862brN7MMZyXO2jarY9Z3G0ebDJsDNg9tVWyDbfNsW23f2ZnbcezK7e7YU+0D7NfYt9i/dbBw4Dnsc7jnSHMMc9zg2O74zcnZSezU4DTobOic5LzHuddF1SXCZZPLFVesq4/rGtfTrp/dnNyy3I65/elu5Z7uXuf+YrbJbN7sA7P7PfQ92B6VHjJPhmeS535PmZeeF9uryuupt4E317vG+znTjJnGPMx842PjI/Y56fPR1813lW+bH+IX6Ffo1+mv4h/jX+b/JEA/IDWgPmAo0DFwRWBbEDYoJGhbUC9Lm8Vh1bKGgp2DVwVfDKGERIWUhTwNNQ8Vh7aGwWHBYdvDHs0xmiOc0xwOwlnh28MfR5hELIn4ZS5ubsTc8rnPIm0jV0Z2RNGiFkXVRY1E+0RviX4YYxojjWmPVYydH1sb+zHOL644ThY/K35V/PUEzQRBQksiPjE2sSZxeJ7/vJ3zBuY7zi+Yf3eByYJlC64u1FyYsfDMIsVF7EXHk7BJcUl1SV/Z4ewq9nAyK3lP8hDHl7OL84rrzd3BHeR58Ip5z1M8UopTXqR6pG5PHeR78Uv4rwW+gjLB27SgtIq0j+nh6QfTxzLiMhozCZlJmaeEKsJ04cXFOouXLe4WWYgKRLIlbkt2LhkSh4hrJJBkgaQlSxVtjm5ITaXrpX3Zntnl2Z+Wxi49vkx5mXDZjeXmyzcuf54TkPPTCswKzor2lXor167sW8VcVbkaWp28un2NwZr8NQO5gbmH1pLWpq/9Nc8mrzjvw7q4da352vm5+f3rA9fXFygUiAt6N7hvqPgB84Pgh86N9ht3b/xeyC28VmRTVFL0dRNn07UfbX8s/XFsc8rmzi1OW/ZtxW0Vbr27zWvboWLl4pzi/u1h25t2MHYU7viwc9HOqyUOJRW7SLuku2SloaUtuw13b939tYxf1lPuU964R2vPxj0f93L33trnva+hQruiqOLLfsH+e5WBlU1VxlUl1bjq7OpnB2IPdPzk8lNtjWZNUc23g8KDskORhy7WOtfW1mnVbamH66X1g4fnH+464nekpcGqobKR3lh0FByVHn35c9LPd4+FHGs/7nK84YTRiT0naScLm6Cm5U1DzfxmWUtCS/ep4FPtre6tJ3+x/uXgab3T5WfUzmw5Szqbf3bsXM654TZR2+vzqef72xe1P7wQf+HOxbkXOy+FXLpyOeDyhQ5mx7krHldOX3W7euqay7Xm607Xm2443jj5q+OvJzudOptuOt9s6XLtau2e3X32ltet87f9bl++w7pzvWdOT/fdmLv3euf3yu5x7724n3H/7YPsB6MPcx9hHxU+Vnpc8kTrSdVvZr81ypxkZ/r8+m48jXr6sJ/T/+p3ye9fB/KfUZ+VPNd9XvvC7sXpwYDBrpfzXg68Er0afV3wh/Ife96Yvjnxp/efN4bihwbeit+Ovdv0XuP9wQ8OH9qHI4afjGSOjH4s/KTx6dBnl88dX+K+PB9d+hX/tfSb2bfW7yHfH41ljo2J2GL2RCuAoAqnpADwDu0TqAkA0LoAIM2b7KknBJr8HzBB4D/xZN89IU4AVLcBEJ0LQCg67kZHY1QVvQGIQDXaG8D29nL9p0hS7O0mY5Gb0dakZGzsPdo/4s0A+NY7NjbaPDb2rQYt9gEAbSOTvfy4KB0GYP8K27CQmJ6ukVzwL/IP1NgR3fsPECIAAAGdaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA1LjQuMCI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOmV4aWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vZXhpZi8xLjAvIj4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjkyNDwvZXhpZjpQaXhlbFhEaW1lbnNpb24+CiAgICAgICAgIDxleGlmOlBpeGVsWURpbWVuc2lvbj40NjM8L2V4aWY6UGl4ZWxZRGltZW5zaW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KlBLvwQAAQABJREFUeAHsnQecFEX2xx8ZJEcBlZwETxQwgQpiwpxz4kwYUDGf6QBz1jOjf3M6s2LAdBJUEAUFE6CCYEAlR5Gg+3/fWmrsnZ08Pbszy3v76Z0OVa+qfz1TXS9WpSIlMTIEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEQkagcsj8jJ0hYAgYAoaAIWAIGAKGgCFgCBgChoAh4BAwgdO+CIaAIWAIGAKGgCFgCBgChoAhYAgYAjlBwATOnMBqTA0BQ8AQMAQMAUPAEDAEDAFDwBAwBEzgtO+AIWAIGAKGgCFgCBgChoAhYAgYAoZAThAwgTMnsBpTQ8AQMAQMAUPAEDAEDAFDwBAwBAwBEzjtO2AIGAKGgCFgCBgChoAhYAgYAoaAIZATBEzgzAmsxtQQMAQMAUPAEDAEDAFDwBAwBAwBQ8AETvsOGAKGgCFQwRAYPny4VKpUyTbDoEJ8B/g+GxkCFQWBgw46qEL8Lu0dY+/YZs2ayTfffJPST7NSkVJKJa2QIWAIGAKGQEEgwETAyBCoKAj069dPRo8eXVFux+5jA0ZgzJgxsssuu2zACNitVzQEUh2fq1a0G7f7MQQMAUPAEChGgBeBkSFQyAgwQTcyBCoiAjY+V8SnuuHcU7pjswmcG853w+7UEDAENjAEmNAMHTp0A7tru92KgoBZgirKk7T7iIWAjc+xULFzhYJAup5UFsNZKE/W+mkIGAKGgCFgCBgChoAhYAgYAoZAgSFgAmeBPTDrriFgCBgChoAhYAgYAoaAIWAIGAKFgoAJnIXypKyfhoAhYAgYAoaAIWAIGAKGgCFgCBQYAiZwFtgDs+4aAoaAIWAIGAKGgCFgCBgChoAhUCgImMBZKE/K+mkIGAKGgCFgCBgChoAhYAgYAoZAgSFgAmeBPTDrriFgCBgChoAhYAgYAoaAIWAIGAKFgoAJnIXypKyfhoAhYAgYAoaAIWAIGAKGgCFgCBQYAiZwFtgDs+4aAoaAIWAIGAKGgCFgCBgChoAhUCgImMBZKE/K+mkIGAKGgCFgCBgChoAhYAgYAoZAgSFgAmeBPTDrriFgCBgChoAhYAgYAoaAIWAIGAKFgoAJnIXypKyfhoAhYAgYAoaAIWAIGAKGgCFgCBQYAiZwFtgDs+4aAoaAIWAIGAKGgCFgCBgChoAhUCgImMBZKE/K+mkIGAKGgCFgCBgChoAhYAgYAoZAgSFgAmeBPTDrriFgCBgChoAhYAgYAoaAIWAIGAKFgoAJnIXypKyfhoAhYAgYAoaAIWAIGAKGgCFgCBQYAiZwFtgDs+4aAoaAIWAIGAKGgCFgCBgChoAhUCgImMBZKE/K+mkIGAKGgCFgCBgChoAhYAgYAoZAgSFgAmeBPTDrriFgCBgChoAhYAgYAoaAIWAIGAKFgoAJnIXypKyfhoAhYAgYAoaAIWAIGAKGgCFgCBQYAiZwFtgDs+4aAoaAIWAIGAKGgCFgCBgChoAhUCgImMBZKE/K+mkIGAKGgCFgCBgChoAhYAgYAoZAgSFgAmeBPTDrriFgCBgChoAhYAgYAoaAIWAIGAKFgoAJnIXypKyfhoAhYAgUIAIff/yxFBUVlUvP77vvPrnyyivlu+++K9X+77//LkuXLi11vjxO/PXXX/Lbb7+VR9Ol2ly3bp189tln8u6778qMGTNk7dq1pcrYCUPAEKgYCJTX+Mz4y9h81VVXyZo1a0qBuXjxYvnjjz9KnS+PE/k0Pi9cuFDGjx/vxueZM2eW27s1k+dgAmcmqFkdQ8AQMAQMgaQI/PTTT7Lddtu5F2TSwjkocNttt8nQoUPl22+/LcGdyU7z5s2lQYMG8vnnn5e4lqsDBLl4dOCBB7r+PPDAA/GK5Pw8SoHzzjtPqlWrJj169JDdd99dunTpIu3atZOXX3455+1bA4aAIVC2CJTn+Lx8+XI3Nv/73/8uJVhOnDhRGjVqJB07dpTVq1eXCSj5Pj7Tv3/961/SpEkT6dOnjxufO3ToIL1793YKwjIBKctGTODMEkCrbggYAoaAIRAbgUcffdRdeOKJJ2IXKKezK1asECY8UK6tnD/++KMcf/zxTvCOd7tz5sxxl+bNmxevSM7PY21AQIc6derkJjTsMyk96KCD5KOPPuLQyBAwBCoIAvk6PvtxkLFn1apVOUW7UMZnrKwvvfSSw2KvvfaSLbfc0u0zLvft21fmzp2bU5zCYG4CZxgoGg9DwBAwBAyBEgigkb3jjjvcOVxbsSrmCzVr1kw++eQTGTVqlNMW57JfX375pTz++OOltPjBNl977TV59tln5dxzzw2eLtN9tOZ169aVxx57zLnSvv3224L7lifOGxkChkDFQCCfx+e9995bGBOnTJnivFByiXihjM/Vq1eX5557Tr7++mt54403ZOrUqc6tFmxQnnIt36lqvnfQ+mcIGAKGgCFQeAi888474jXV9H7kyJFy5JFHlriRP//801kYq1SpIvXr1xe0zT/88IO0bdtWWrZsWaIsB1gmifepU6eOMGGaNm2aVK1a1VnkatWqVap8rBNozNlwFWWrXDm23nXlypXyzTffOBabb7651KxZsxS7BQsWuDLwoAz3ECTa8RZU4pEWLVrkLtPnevXquXtYtmyZ1K5dW3bddVdhUhGLiO8kDnWTTTaR1q1bS6VKlUoUAxew9O2j7Z49e7a0adMmJo4lKq8/2G233ZyWHGw94dZ2+OGHO2EYrI0MAUOgYiCQr+MzwhNx4zvssIMb2+OhzdjLmNiwYUPBtZR3SJAIEeB9wjiIIq1r165So0aNYBH3Hsh2fMbySBu//vqrcwFu2rRpiTY4CGN8ho+3arIP9e/fXzbddFPnhRIrT0FxqTz6rw/FyBAwBAwBQ6ACIaCvGLL0FA0bNqzc7uqAAw5wfdh+++3d55577lmqLx9++KG7Rl+57vvNp1ohi956660SdXyZfv36lShL+ZNOOqlIJw8lyqtrqCunGuHI+UsuuaRE3fnz50eusaOa4yLf52B/DjvssKKff/7ZlVXte5G+/EvwoewhhxxSpMJhhN/FF19cqgzlNEbSlVELa4nrGisZqcvOp59+WuTvwfdFJ09FTz/9dIlyHhe1CJTqF31SobZE+XQO1PLp+njaaaelUy2Usv4582lkCFQEBEaPHh35zW/I47MKaBEcguP2NttsEzmvQmKpR/7iiy8WqZAVKePHRcZajfd05W+66Sb3/vDX/Ofll19epMJshGe24zPjMOOx588nfWbcDlKuxmdVikbaVw+ZYJNlsu/vO9XxObZqV7kYGQKGgCFgCBgCmSBA7M0rr7ziqnpXTBUeRQW2uOy4jgWUeEE00lhH9UXtXKuiK40ZM8Zpe0888URR4dBdfvDBBwUrHRrnRIRV09eJLvf6669L9+7dXbwibrfHHntspD9vvvlmJGMriYbY6CdaZspCL7zwggwePDjCdquttpKddtopcgw/tn322ceda9y4sejLOnI9uENsDsl7sLKinT/rrLNcW1gAjjrqKLnnnnuCxd0+7X3//feuz2i+Ifp06623uv10/2EhUKWAq7bzzjunW93KGwKGQB4ikM/jc69evZzHSizYLrvsMjn44IOdRQ9rH+M/yc0gLLbe84NwAN4fjIH+OmWuvvpqCSZmy2Z8/s9//uPGYcZj2mDcZ5zG5ZVxW4VOmixBYY3PKk06l1qSCNE+FLzPEo3m00GZiMHWiCFgCBgChkCZIaDvGKd1LS8Nuqa6d+2rcOXuWYUud4zmOUhBC6e6JUUuqetpkb5AXR0sfF4r7TXFGutYpIJlpDyWQX/PQeuftw4GLZxU0pT7kfLewokVEKsqfNSNtAjtsaclS5YUTZo0yR8WcXzjjTcWqcusO0f/NF41wlMndJGytA3PWNp6XwiLJ2W8hVPdY4u8ph9Lsbrj+qJFOmlyZdGsgxPkcaGOt7Cqy3GR1+BjEciETj31VNcW9b31IBM+mdZBcw4uqWrQM23H6hkCZYVAPlg482F8jmfh5DngQRI9Zk6ePNmd4zzvkeD4r+6kRcH3B2P1f//738gjVaGs6Oijj3b1VeEYOc9OJuPzL7/8EunL9ddfH+HHOK3KRHeN95ensMdnrJngwMY764svvvBNlemn70Oq47NZOBUxI0PAEDAEDIFwECC28s4773TMfMzmEUcc4Y6D2uVga1gIiU30RFyOz5iKhY/1IIOEdttrszmvQpmLNWTfZ/JjPx3CIotWHKvlvffeKxtttFGkOrGRPXv2LHF84YUXurhO4o2IOw1qmInpyYbQkpPUCLrhhhtKxB6RWIg+otkeN25ciWbOOOOMiLWVmKZjjjnGXceiQT/TIZJS3H///a7KNddcEze+NB2eVtYQMATKF4FCHZ+9RwceIxdccEGJ8b99+/Yl3h+M1f6dQ7I6PGtYegqaNWuWi53P5in4ZaJ4b7GUlCdiRK+44gp3iMWVONMghTU+B2P9yROgAmewmbzdN4Ezbx+NdcwQMAQMgcJDANdYL7h5IWzfffd1N4Lw6AWpZHfWrVu3SBEEumTkXT4zTZ6AkAfhlkuynGSEy+see+zhBDEmPCQN8sT9Z0Ms6A0hWHbu3LkEKwRhXLMgJk+JiAmRJ+965Y8TfSKc4q4G4X7sBddEdeyaIWAI5D8ChTo++/WSSWKWjEigphZON3aSkI31hIP1fPK2ZHziXffJ5HhXsG5xkAjJ8JRM8Zjp+IyClbWl1VLtFI9qvXVKUt9uvn6awJmvT8b6ZQgYAoZAASIwYsQI1+vjjjsuYhXDeqnunu58OmtyEm8JkaU1GREPCWHNy4S8kOdjHxPxePLJJ10WRbTYEC98dV91AiLH6tfER8akbr6ubosWLWLy8NbXZLgENeExGcU5Scynj0HieUZngIxTzU4bAoZAniNQqOPz9OnTHbLxxkQPO2Mv8ZTEuSMYItRxfPrpp/siWY/PftyNpZgkA7knP4774+jPTMdn+JCZV5MgyfDhwx1b3kn5TiZw5vsTsv4ZAoaAIVAgCJBk5tVXX3W9Ze1JrHN+85bNhx9+OOGalP5WSf7jLXixlkjx5fynT0jkhVR/PtXPzTbbzBUlvX0iwlJI4h9IM9661Pq87DWWx2nS49XFlS1Vat68uSuqsUKlqsAHKwXk+1yqUBYnmNiRjAK69NJLS6Xiz4K1VTUEDIFyRKCQx2eWyoK8sBcPxg8++EBY9xnSjLbCGEqIB6EJiSid8ZnlqSCN5S/F8rPPPouc8+UiJ3Kw4xW5JHfL1nKbg+6VYGkCZwk47MAQMAQMAUMgUwQeeeSRSFXWI+Ml7jcvCCKw6XIgkXLxdjS5RuQSLqvJyMdu4j7lyWubY00MfBn/6V1iyUarSYX86VKfmpAicu7KK6+MuT5npEBgB227Jt4JnIm/6ydXYBXtIuwtj9T2mMbnlN4VXNFOPvlkVwneZIU0MgQMgYqBQD6Nz8H1j1MZn/0alD5+Mt4T8e8NMoGT8TzYTrw6nE9nfPbvI7KlRwuqwbh61kHONQXbi7VWdK7bT4e/CZzpoGVlDQFDwBAwBGIiwIv3rrvuctdw9cFFNXrba6+93PVHH320BA8SHwTjHkkSRIIFiDhQ/4L3lVj6wxPtXnvttZHlO0455RR/SbyGGasqySMSEUklfDIeeASFTiwD1113naveoEGDCJtgbCnadG/FDWqafR+o5CdD7CdavoX0+n7pFqyMa9asoYqb3JCOH8L1d5dddnH7Yf0jWZJfBgWLANhq9t7IplkYw2rK+BgChkAZIpBv4zMhED6G8aGHHko4HgKTZsx2aBHGwHjP/Xgint6Hang3V5Ke+TGWTxKfeQqO7ZmMzyzNAhG+4RPkccw7zCdaI8yiXr16nA6FcBX+5z//KWPHjnX8UA4yVoMdhEDuQy3ciXz8V6Y5dK0xQ8AQMAQMgZwjoO8aggiLynJZFJ/Knnb1ZR/zHnWtTNcvypAWP7gsigp7RSqQRpYD8fcQ5OXTy3ONZUZIQa+TlgjPgQMHlmiX9PmeD/zVHctdj7UsChdIpR8sr2tsFvklXTj//vvvu+VB/HIr8GQJlWAZylFPhUTXFp++PNdYuoRlRlQIdtf5F70sCucmTJgQ6YtaG107QT5q0aWYI49LkCcXWDbF38/ChQuLC8f5r8qBSFlfJ/qTpQXKkmxZlLJE29oqCwTKa1mUfByfhwwZEhlzVMEWgd/3NXopKY3FjJRn3Gf8V0HLnWMsXrp0aZEm04mUYdxkaa7guMmY9u9//zvSVqbjc3AZLMb0ww47rIg+wJ9PDQeJtBHG+Mx478dj7t235c/xbipr8m2nuiwKwbNGhoAhYAgYAhUIAf8iKEuBkxcu7fJyj0cIfL5vGmdTQuBkcuCv8YnQxvpqQfIv7qCQSVmONdFNEetXBonJRHBSo5ZKd5mJiW8rWhBD0PMCoC/DpyadiEwipk2bFpnocI2X/0UXXeQmO75vQUGZddKiJz2a7CHSVSYs8Bk5cmTkHDvUYyIW7AeTsOjJhcdFLccl6gcFTvYTEfcXbCfWvl9XNRGfMK+ZwBkmmsYrHxAoL4EzH8dnlI6Mm4w1jP+e/NqYCJPRxBiHwi44PjHmsj6xhiC44ppHoIRAxnXNKRBZc5O6Qcp0fFavllJ9QWmqni9B9pF1krMZn1esWOEE5WhBE2Wnxq2WaK+sDvwzSFXgrETHtJKRIWAIGAKGQAVBwK9RqQKnDB06NG/vavz48dKnTx/nWkUyCLL64cLZqlWrUunmuYkBAwa4hDlqyXOZYXFpwm2pSZMmCe+R2Mm5c+e6tdpSjenx67exBifxqB5T3xCvTnjyiVuWv46rF+nwycwbTJlPOfqLKxRJkNLJUIgrK3VJJlSnTh3fhQr/icswcVI6oSnhjlzhb9xusMIiwPfZu8Lb+Fz8mBnzCUNIJ+aR8myM/cEwB//FYRwm5IFr3s2WaytXrnSJh1TA9UXdZzbjMy669CXee6tEQyEc8N7hncZ7hLU/y4v8Oy/V8fnv/L3l1WNr1xAwBAwBQ8AQUAQQ7NhSIQS26ElDvHq8lH0innhlos8TD9OxY8fo05FjXrbB+B9/gURFpKyPJspnmlWWZBCxeEa3YceGgCFgCOQKgVyNzygN0413RIgMCpLR98w4HOv9wLqcsc5nMz43bNhQ2MqKUsnaXlZ9SacdSxqUDlpW1hAwBAwBQ8AQMAQMAUPAEDAEDAFDIGUETOBMGSoraAgYAoaAIWAIGAKGgCFgCBgChoAhkA4C5lKbDlpW1hAwBAwBQyA0BEjlTmr3VNcPu+2229xyJYlcXUPrnDEyBAwBQ2ADRsDG5w344efg1k3gzAGoxtIQMAQMAUMgOQIkwOndu3fygutLbL755imXtYKGgCFgCBgCmSNg43Pm2FnN0giYS21pTOyMIWAIGAKGgCFgCBgChoAhYAgYAoZACAiYwBkCiMbCEDAEDAFDwBAwBAwBQ8AQMAQMAUOgNAImcJbGxM4YAoaAIWAIGAKGgCFgCBgChoAhYAiEgIAJnCGAaCwMAUPAEDAEDAFDwBAwBAwBQ8AQMARKI2ACZ2lM7IwhYAgYAoaAIWAIGAKGgCFgCBgChkAICJjAGQKIxsIQMAQMAUPAEDAEDAFDwBAwBAwBQ6A0AiZwlsbEzhgChoAhYAgYAoaAIWAIGAKGgCFgCISAgAmcIYBoLAwBQ8AQMAQMAUPAEDAEDAFDwBAwBEojYAJnaUzsjCFgCBgChoAhYAgYAoaAIWAIGAKGQAgImMAZAojGwhAwBAwBQ8AQMAQMAUPAEDAEDAFDoDQCJnCWxsTOGAKGgCFgCBgChoAhYAgYAoaAIWAIhICACZwhgGgsDAFDwBAwBAwBQ8AQMAQMAUPAEDAESiNgAmdpTOyMIWAIGAKGgCFgCBgChoAhYAgYAoZACAiYwBkCiMbCEDAEDAFDwBAwBAwBQ8AQMAQMAUOgNAImcJbGxM4YAoaAIWAIGAKGgCFgCBgChoAhYAiEgIAJnCGAaCwMAUPAEDAEDAFDwBAwBAwBQ8AQMARKI2ACZ2lM7IwhYAgYAoaAIWAIGAKGgCFgCBgChkAICJjAGQKIxsIQMAQMAUPAEDAEDAFDwBAwBAwBQ6A0AiZwlsbEzhgChoAhYAgYAoaAIWAIGAKGgCFgCISAgAmcIYBoLAwBQ8AQMAQMAUPAEDAEDAFDwBAwBEojULX0KTtjCBgChoAhYAgkR+Dnn3+Wb7/9Vr777jv5atIk+U73q1SpIlt07y7devSQDh06SPv27aVRo0bJmVkJQ8AQMAQMAUPAEKiQCJjAWSEfq92UIWAIGAK5Q2DFihVy4YUXyn333ecaqV+pkjQrKpKW65t87p135M7KlWXZX3+5M5dccolce+21ueuQcTYEDAFDwBAwBAyBvEXABM68fTTWMUPAEDAE8g+B8ePHy6H77SfVli+X07V7m+nWSIXNUqTC5jw9OUW36667Tl597jl5adQoZ/UsVdZOGAKGgCFgCBgChkCFRcBiOCvso7UbMwQMAUMgXATOOuss6dOnj3RetEguWLtWuiv7RM6yzfT6HroN0e0Xdbvt2LGj3HnnnXpkZAgYAoaAIWAIGAIbCgJm4dxQnrTdpyFgCBgCWSBw9tlny1133SUnK49eafLpouWv0e1x3eDTt29f2XLLLdPkEr/4pLmPyJJVc+IXWH+lQa3W0qvlwLjlZi0aI7MWj4173V8Iiw/8dms/1LMt9bl41WyZPPfRUudjnQiLT8+WJ0jDWm1iNeHOvTtzeNxrwQvJ+ATL2r4hYAgYAoZAxUbABM6K/Xzt7gwBQ8AQyBqBUeoKi2VyL+WUrrAZbPw4Pfhdt75qJV2sLrlh0eSfH0lJUGzXsG9CgXPm4jGSikAVFh/uP5mg+M7MYRRLSmHx4d4SCZyp9icZn6Q3ZAUMAUPAEDAEKgwC5lJbYR6l3YghYAgYArlB4IyTT5b2yvqAENifpDzWadKhc049NQRuxsIQMAQMAUPAEDAE8h0Bs3Dm+xOy/hkChoAhUI4I4AK7aP58F4cZRjd46Zyg2x0PPCC9dtpJjjsOu2d2tF+X2+WPtUuSMqlZrUHCMrjbtm/YL2EZLobFJ1lDLeptJYN6jU5WLOn1dPhQNhGl2p9YfLy7cs2q9aVl3cTtJOqDXTMEDAFDwBAoLARM4Cys52W9NQQMAUOgTBF4+7XXZDdNEOSXPAmj8X8ok611G/Xqq6EInGEJL7iSJnInTfXew+JTq2oDadeoX6rNxi0XFh8ayKY/Iz7p5/qIu+2gbca4fftnCBgChoAhUPERyFjgHDNmjAwfnlrygIoPo91hoSNw/vnny7777lvot2H9NwRCRWDVqlXy3Zw5sluoXIuZsZzKJ7rEipEhYAgYAoaAIWAIVGwEMhY4b731VkHoNDIEKgICs2fPNoGzIjxIu4dQEZg1a5b8qetpNg+VazGzNvox8uefZZEusdKoUaLFVRI3PmLSLq4AWVETZaBNzMWuGgKGgCFgCBgChkCuEMg4adDyEDMM5urmjK8hkCoCCJxGhoAhUBIBBM5alStL05KnQzkiCRH02WefFe9k+N8tZaLLmbCEiJEhYAgYAoaAIWAI5B8CGQuc/lb69esnRUVFthkGBfkd4PtrZAgYArERmD59ujSvVCn2xSzP1tD6bWrWlEsuuUQWL16cJTerbggYAoaAIWAIGAL5ikDGLrX5ekPWL0PAEDAEDIFwEGjatKksV2VarmiBJiNao261Xbt2lVtuuUWOPvroUk2tWbJEFk6dWuq8P1Hz8+K9PxbOEbd2i78Q9ZmMT7B44+7dpXqDBsFTJfZ/GTu2xHG8g7D41GndWuq2aROvGVk4ZYqsWbo07nV/IRmf5erpsUJjdpNR9fr1pfFW8bPMxuPjn5X0WJesCbtuCBgCBYxAWGNSqnwyHZOiIU7GJ533SIu+faPZR47T4RPWeyQbPslwidxYgh0TOBOAY5cMAUPAENiQEejSpYss0BjOlQpC7ZCB+En5rfjzT5n03nsyYsQIOeaYY+TNN990gieCrqcFKky9ussu/rDUp8+eO1ceETnq4VLX/YlkfHw5PvcbPVpaJvB+GJngWi749Bw6VHoNGxZkXWL/wyFDJBUhOBmfGY88IpNTSAbIRGr/BDkc4vHxz0qOnC3Sv8Qt2IEhYAhUIATCGpNS5ZPpmBQNeTI+6bxHBiVQ1qbDJ1/eR/9UT6REithoLKOPK0efsGNDwBAwBAwBQwAE2rcvjrScmwM4vlWeTevVk86dOwtJ6N566y0Xz4m187HHHnMtzlWh5ptHH81B68ayXBH4BhWGkSFgCFREBFBapeIpURHvvSLfE0JyNmQWzmzQs7qGgCFgCFRgBLA0btqsmcydN086hnyfPyi/HluzGmcx7bHHHvLFF1/IRRddJCeccIKMGjVKTm3ZUr5RqxuEljcW3b8+S22PlifEuhw510RdQOPxiBRav0PZRFTWfBK509LPPrffLqvV9TgZJePTeeDAhJZdz79GAndjysTjk8hS7XnbpyFgCBQ2ApPWe2N00nGcsSARJRuTUh3bMh2TovuWjE8675Fo3sHjdPiU5/sIl+bx554b7HrG+yZwZgydVTQEDAFDoOIj0HOHHeQTFf76rlkT2s3+rJw+r1pVLozhKnvjjTfKgAED5IILLpB7Xn45sgZoPBfXVeu7Vat9m4T9wxUoHo+EFWNczDc+ieIpY3Q/7ikmf8kmgHErBy6ExSfA0nYNAUOgwBBgHMh2rMy3sS2s90hYfPhKZIux/1qFxcfzi/40gTMaETs2BAwBQ8AQiCBAFtmdXntNcGxNbEOMVEm684qW2LFXLxmqsYmxqH///vLpp5/KjTvtJPLBB67IDz/8IK1atYpVPNRzrAv67bffynfffec2XHw7derk3Ivr1KkTalsbGjNiSKEwhNoNDTu7X0PAEBCZp942M2bMkG+++cat4dytWzc3Nnfo0EGqVKliEIWMAGN1WOO2CZwhPxxjZwgYAoZARUJgu+22k8uuuEKGDRsm/9Ab65Hlzb2t9Uks++ITTyTl1H/XXWXyeoGTicXNN98sgwYNiltv/vz5Ekw4FLdg1IWZM2fKhRdeKBMnfShzf5znrtZuUFmatakq82//U1Ys+dOd69i5rQzYY1+54447ojjYYSoIJEp8lEp9K2MIGAIbHgKR8fn992XuggUOgMYqXNbXNaJ/1sRzqzWxXa3q1aVTx45y1HHHycUXX7zhgZSjO0bgDGvcNoEzRw/J2BoChoAhUFEQwBL5mrq33q/xHLfqTW2U4Y05QVPrPvXkk5GERKmyOlfjSE477bRIJtt27dq5qru1L7aatW/YT84fcn4k4VCqfF966SU5+pgjZdPNq0rX/X6XPptoMqO2InUa/6Usiv11l88XmT9bZMHs7+Xhx++Tka+/KPfe9YDstddeqTZj5QwBQ8AQMATSRMCNz0ccIR1UuNx59WpprvU31a2OCprCpvSrbnM15GPuV1/Jv/71L3lO3y//p4nntkoSi+8q278yQ6BymbVkDRkChoAhYAgULAKffPaZbNK8uZyndzA5g7u4V+vco9ueu+0mR8VYbzMZyyuvvFLGjx8vv/32m2DtvPvuu12V3dsPE7aJb/0ijz/+uPz73/9Oxipy/cgjD5eDDz5Ytj1qjRx83e/S4wCRtr0QNiNF3E5dXaWl3TYi2x4mctgNa2VdrZ9l7733lrPPPrtkQTsyBAwBQ8AQCAWBw/bf343PB+p6zYNV2OynXLvoFh3YgBDaQ7d9dTtDt581+dzWmpAO4dMofxAwgTN/noX1xBAwBAyBvEbgp19+kdtuuEEe0F7iELsohd5+pWWwQU7VDVfUN995J4VasYvsoAmMEDovvfRSGTx4sOy3334unmexrg921llnuUpXXXWVvPrqq7EZBM42a9ZUnnnmOTn+TpHtjwhcSLLbtI3IMWrm3f5IkTvvvFPOOw8R3MgQMAQMAUMgLASaNm4s4zRZ3bXKsF8aTLfUssN021W3G/RdZeOzApEnZAJnnjwI64YhYAgYAoWAwBBdtuTDDz+UVZtvLldpHM3N2ulndPtYNwTQxbphAX1etxt1U3lOum2/vUvE44VCPZUVXaExpR9//LEsXbrUWTtxbV25cmWE54knnii/qHAcj8455xyZP3+BDHpMpHmneKUSn9/5nyL7XChy2223yZdffpm4sF01BAwBQ8AQSAmBM089VZapEvHKdeukUUo1ShdSZxQZqJuNz6WxKa8zJnCWF/LWriFgCBgCBYpA7969ZYwKfPc99JAM0MnBKnVxfbZmTblU7+cS3Z7XbK6VNNnQMWqJfPHFF+V/EyYIWQTDpG222UbGjRsnhx9+uEycOFH++OOPCPsVK1bIKaecEjkO7kzQvmBp3XKASP2Ng1fS3++2m645uaPI0QMPTL+y1TAEDAFDwBAogQDj8z0PPCAnFRWVOJ/JwfZaCVfbgRoDalT+CJjAWf7PwHpgCBgChkDBIcASIccff7zcOWKETFAL3/JVq1xc5dVXXy2Lli+Xtz/6SK685ho56KCDcnZvy7Wd//3vf6X4I3y+/vrrco22H00XXXqubKyy74Bw1rKWXU4T+XrqTLnm2qujm7LjKARmLR4rbHOXT4m6YoeGgCFgCIhcePrpsoMC0T0kMA5VPlOmTZNrr8U516g8ETCBszzRt7YNAUPAEKhACLA22iOPPBLaHZGOfZBqutli0QUXXCDLli2Ldcmdu/zyy+Wtt96KXCe284MxE6XvSZFTWe/Uayqyi67UcvllV8jcuXOz5leRGYzs10/YXj2V9B6FRyzPcP7557uEUYceeqhcdtll8v3334d2I2s00yZJr0Zp7Fo6tKsuH7TxxiXN9ZnySqfdYNkj1IpUr169hL/Hr7/+Wh5++OFgtcj+R6qgAs/PP/88cs7vfKYJy2699VYZOHCg7K+JZC5St/5PPvnEX7bPPEPAj9npLqdBRtq5s2bJ4SHeDy65h+n7g+/WwoULQ+S8YbBaqJnpX91lF7exnw1Vzaay1TUEDAFDwBAwBDwCTJS/++47eeqpp+ToDDLRej6pfDI5uf/++6WypsuvrmuwMcGORf/85z9djGWjRo1c35q1qSJtehSn049VPpNz7bcTefdukU8//VRatmyZCYsNok4tL0tUXVlw9ztp0iTBjTuaGmtyk7ASkyBskfSqR48eaS25s0DXJpw3b16JrmXKqwSTNA6IocbjoCiOcghWCOzEV7do0UIGDFCf9gBN0cksVijufcstSf2iixLpbxqPCTAJEoqjm266SUaPHi39VIFhVDEQmKwJ4TbV71CtkG9n8/X8UIiSeM4odQRWL1kic8eMcRXYz4bMwpkNelbXEDAEDAFDwCHw888/Cy906OabSSWUW8JVd/bs2XLxzQfJgMO6S9sOm0UarFGjRmSfDLaDBqkJUmnqtI+kYatwhU34EgtK9tqPP/mIQ6MKiIBfbgc3bVy2V6kL+X//+1/BshcWsZQDFsAR6qaeLYXJK9u+RNcn7jpRUi9f/pJLLnHCZt26deW5555zQjWCLQInicKIJTeqOAhMVMEmF+o6bP91VTE5Y8aMigNWAd6JWTgL8KFZlw0BQ8AQyDcEsDZU0ay1f+pi3LjAjRw50rm/5bKfrVu3lj+7vSBduokMHj5Utq4/2GXQJYsusZ1YHBEOnn/+eWcR+eqrL6RJ+9z0aOOOIhM+fj83zI1ruSMweXLx6rNnnnmmeIVGLGFznWbWxLqIMqRp06byj3/8Qxo0aBDpP27XKGe6du3qLIIkSampCbd69erl6rDGLFZTT/ye4IfnAHHTuM9i0U9EKFkoH+Q1Z86cUlZQz6Nq1apu3UJ//Ouvv7rf8F9//SXt2rWTzTUjdTQhcPM7x7KarpURSyieB2+88YbzUIjmzTGZn3GjhaZOnSpt27Z1+/zbd9993RY5YTsVAoHJGms5MEd30ky/y9Mtm3iO0E2NrQmcqeFkpQwBQ8AQMAQSIDBGtdO4tzJBrlSpkpssEm9VltSkSRM54IAD3Ea7TIoRPtne0fU/p385W3bdJTc9aqVZLiY9Ycuj5Abd8ue60047yQsvvCBjx46Nq0j59ttv5ZhjjikRX4h1DtfvI4/UhVuVEKJuueUWufjii906gZzDjfSMM86Qk08+mUO3f/fdd+vSPfNle11SaJbGtXmCH9ZW4pfjEZmho3mxdi2u7rGoT58+8sEHHzh3WFxV6VuQWO/2Ic1Ize8LQgDeZ5995KeffgoWS3mfeyC2GhwuvPDCmPV8LDj3GhQ2Yxa2kwWPAO7Wi9V6nQsLJ+Bsotu0r74qeJwK+QbMpbaQn5713RAwBAyBPEHgzTfflLVr17reEMfFxPy9994r197VqlVLdtttNxk6dKi8/fbbUrVaFVm3JpddqpRL5sa7HBHw1kwUGiQOio6ZxJKO5Y1kNoMHD5Z3331Xhg8f7np81FFHlUqGw6L05557rhO87rzzTtl9993l3nvvLXGHWEhbtWolCItYA6kDIaRh9YtHsXjhVv7YY49Ftgd06QlPCMTQk08+6YRNYigffPBBt6QRrqu4sGLZhRYtWiQ77rijEza5P9bDTdcFmHr0MVHyHxIMQbRvVHERIAzj9ttvdwmhcn2XNjrnGuHE/M3CmRgfu2oIGAKGgCGQBAFiY3ATDFK1atXkP//5j/Tv3z94ulz3O3RuJUt+LZ7Iht2RH3T+333rLcJma/zyBIHDDjtMXnvtNUF4xErJRrwlmVMhLIBMnhEivSso7q/Nmzd3McTEe/pkOJQ/4YQTIuU4hnbeeefincB/XMPxHIAQvrDaDxs2zAl63bvHXjwCITWaV/QxWTsh1qTFvRfPBKysEMm/fPIrrJubbLKJPPvss/J///d/zjUdl1ju08e1kkwJF3qWIkqFcOEFO1x1sfzimhtNX3zxhTuF23yQsDIHs/j6xGHBMraf3wjgcYIChWfp4yoROr/S78FctXJ2zkH3eTvtoO7tRuWHgAmc5Ye9tWwIGAKGQIVAgPhNBExv4eSm2GcSisUnVnbPVG58ucbBrdDYM6hF376pVElYZotu/5AJM3IjcP72rcghJ2Tfx4Q3YBfLFQHcSHFvxSJ55ZVXujhEYjURAP0yHbimBl3JcYuFiKkMEsuqpEJLly6V2267zU3OvdWPetkswYP7OxlhUQZ5yyX3gSCJu+tpp+nisgFCyIUo49e9PfjggwMl0t9FiMVt9pBDDnF9wK03SGSVxmX3999/D552gjbWV0/33XdfRCD35+yzfBH4Rb1boDqqLKjbpo2sXr3aCZgImWQXZ3kS4pbxCiAemeRP55xzjrymFva5+r7onIPuz9P8Al1UsWJUfgiYwFl+2FvLhoAhYAhUCAQQOLGQRBPJVZic48qXCc3QCenk9W6J8dbiTIfvlptvJ2+884JWWZdOtaRll/4mMn+2aOKX0stmJK1sBQoKAeIYcQnF4klCIKyZuGwjjEGbbrppJNaRYyyclCMGNEgIXMmIBER77rmnE2axjiLkIoRh1SOhTyaESyzuwQiWCHzeeurjMevXr+/6HOSNFZK4bPo8ffp0dymM5SUQWk866STnvkuCryB16dLFuSFjNW7f/u9MX7gVs3EuGNsarGv75YsAa+0u1i4sUxfzzzW8gnAL3g/B5asQNiGWvsETBtpe13scm4P1VXV4luXafufOuRBlXdftXwoIVAiBE5N8s2bNpGHDhinccrhF0K4Ry8Gacx06dCjBnBcC2s3oBZlLFCrDAzLP0RdeHPlEaGq9+04+9cv6YggYAqkhQEKeWBNgNNuPP/64sLxBrEyXqXEPrxRj9LzZ62S2zm3b9AiP78yJxbxI/mK0YSCwxRZbOCHy/fffd660CEBYDknmE9ZyHcRGYjlFKMOdFcIdEYEz0/c41kvmLCQQ2myzzSIPCzdciN+pj+mMXAzssIYmltBly5aFMufCeovCKmi9pTm8InDjvfHGGy2OM4B/oeziBH6PxvAvDVioo9dK5juMpd27mvfQBFlP1asnq/S7VSvEG522nlenTp1C5Gqs0kWgODAg3Vp5VJ7kFLivIPiVBzFYot0kO100HXjggU5TGAzOjy4T5jHa0HiEfzwvimOPPTZekTI/z7NjIoom08gQMAQKEwGWi2AZBlxqowm3OLTKWDnzgYhH23WPnWXs3x55WXdrmXpMjh5RycW2meIsMZztLjpB2LY65dzEBfPwKgpbvueecAtE2ISwFnohEwEpaO3HtTzaLdTzSPbprYnEjXr68ccf3e6KFSv8KeF3BkW77UYKrN8hzpT1LFkHM8iTy8RKorhHeTRx4noNyvp6wfv2ls177rln/dXij1gKpxIF4hyAHcmKPDEvgEhyxDWEeJImBec34Bm8f1/XPvMHgfralaGaYCseEceLUuFf//pXpAhrK7fW98WzkTPZ7yxSFs+pYEv25eByQ9lz3jA44BLdU2UcNvazoYIXOImXwOUC7Z8fqLIBJMy6rHsFRWezC7MNePFC40fLSydeW95dJl9cUHh5DBkyRK6//vqw4TB+hoAhUIYIMCHkd4xijWQeTBTRWDMBZVLOpDl6clqG3SvV1H13Pyi/fSfy5m2lLmV0YrTqOnfsv3XCZSoyYlwBK+1+wyPCtt1p/y64u8ONk3fs8ccfL6eeempkqQ6ENxQNnGcJk1deecUdk1CH3wWW0HhLfyQDwS+lglKbOQ48vaCI54B3S8TDCiLG1AvB0bzxBMNSCuF5wO/Ub+edd55zrcXFFuI+yLh71113uXvlvr///nt3DSUxdPnll7u+sHwLfIKJfFyBNP7RHu7CQULYfPTRR92p6667zq0pSjvE+uGiHG+uE+Rh++WLwE5qUSS+ORYxByRhVTTdr5b3CXpSc7CFQs8rl101RjTRMkKhNFRBmSBk9tJnyJatwFm10DHy1kMEKRZQ9lrGfLgvMtp99NFH7iWQy/4Q0O/TpcdrhxcW2qToOJJ45XN5/itdC4lFn32ShVy2ZbwNAUMgtwiwRESQiGVj7MvU5S/IK5X9dg2LE/U0rNUmleIu9IGJztlnny076Dy9/sYpVYtZ6Kt3RWZ8IDL21zdiXreTFQcBXE5xmUbQ84QgybsVwmJDUhQmtlgSr7rqKneeDLA9e/Z0+8F/PnYyeC56n/kMgh0xbqeccoqz+JHdFSskLrFTpkxxwiHxpMw1sF5OmjQp5nveC4q0gVAcTcSikgWX3y6/DTLOsiH4IVRzfxAeCwi1ZNkl8y5bu3btXOwqiu1U7itWGfoHfsFxA4sXy7+QPRdXYowLEH3iGplyfb/cBfuXVwj8pdbqnzR7Oc+oiibtQdEBcYzVervttivVX8Ieblu/FuwdGcYpe6ZjdIfI4F81G65R+SNQSa2Cxf4LafZlFw3uRbPdr18/53+fZvVQii9YsEBYp8oTsQnR61hxDXcQbpMYT/bRuFOPxYSjBys0hrhrkEGLAGdcVJYsWeIG2Vgxogy+DIIMlH69KDQ3xDd4qqc+6dHtcG2lpn/2AyhxE7QZTdwjZRigKUNAf5BoCxcbBnyIOAgfM+rdbPz9c502NtpoI3ZL0G+//ebulaQAuNYEB30KelzAgGtkzkNwxAUHHBlMUqHx48dLdDY6Xh5BvFLhE1YZ/z2GX4Y/hbC6YnwMgbgI3D+pv9SoWk9qVqlX/Kn77th/Rp1vvUlHWaPJHS+/dFhkQhyXecgXXn75ZSEDJ2NTtjRJtaphJg0K9mfAXnvKW2++LUfeJNJqy+CV1PbfVi/hKa8Vl2XitPfee7utV69eqTGwUkkR8ONzec4zojuJZY0YRgTQWG7klMfriGWCCGMhcVa2hFKZeUnQJfCXX36RBg0aCGvNesKVF1fT6HmCv57OJ3MP+Pn5RKy6zI34nZNIKQxiXsFcqXbt2qXYgSmGBe45UZ9KVcyzE8yb+V5Dw3R88wqLXHWTHBkkZBo/frIq+Ser9X0T6dt3W7USb+EsxbHmndn2ZYTOEWcrk5c0YdbvGmqBhRqPNr5TzB+7desmftmbeG3tobLFJFUynKXfr5bxCiU4/4he+2j9dfD24zMKIKNwEPByQqrjc7HKKpy2y5yL9/vHHQPtHse33HJLKYHKC17E77CAcZCIwUSb5zVuaBJxTyW4mIE06IKKoITLSXRyoCA/9lnw2QufHDMBY7FoT59//rlzQaHPQUJLSawl7jloFtEAUTZIpBDHPQ1BD0LTyQ/JU/DHRDY6Bm9//5QJrhHGMetf4bbjBV/OIQCSNMC783DO44Jm9JlnnnHaVc5DtPniiy+mlAHMu8Fcc801DmPu2cgQMAQSI1CnejOZ+usziQsFrg5eX/T3oqvkyjF3S00EUxVK3SeCqxdU/WeSa9WqlFZSBZorsUtWTiaqJEwLKgRLFMqDgzdHveWsM7gobr2fJik5WKRBCjOb6WNFxj2kbonLqqui8WWnmETh+PTTT7vJIwo4P7kJjs15cMvWhRAQ4N3r37/x2CGItlFXtLAIoTIoWMIXYTaaUPyGIWzCNxUhEuEvTErUdzBFwW+UGgLkNbn22nvUIPGFGgQ20zG5i1bsrttCzRr+gJ6brvPe1Tp/20ouu2yIy7qcGufkpcZpkad066ffj2d0iRR+Lzxb5uAo9lOJ6X9bBfPHNGnWCWpI2gNeujXSLRm9pwWY5W+k89j3dL6KcojxGa8WPBJwcffjc98QltpK1h+7/jcCBStw8qX1cUFoT/DtR2hCUIsnxCBsIgiiJcTShjCJAIamMNolFV78SAYOHChY/4hPwKWjY8eOTnOZKDkE/JH40WRFE/0jNgKC/x577OEsnQippI5GiwchaLIh/OEK++WXX7qYBRbKRTgmextEP7hfXGkg3Ey8dhDNKhoI+oLl0wt7rqD+Q+D1CQAQGlmkGqvle5pZjEkYAqtfCNrX8YLz7rvv7iy/uMXCm9gQ4mmTET92EiwhtOO6syETk3Lcs1AaYIVGa8tASKr4VC3GyfDjmYAzygss7akQ30X6QJA9vytPP/zwgxu0Tz755DJJ9ER/+e77dd98P4KfvLj4TvoMi8FruLTVqVPH/caD57EUsBYYGlZ+VwhFWKVw845l/Q/WLa/9Lk32TkvgjPSz0p+ycs18t0XOZbBTpVK1khbVoNBapX6Ja4sqrXEtfDzjFdmyencn3HoBt3qV0paLDLoTWhWUanzHBp19tDx13qfSpuc6adZBx1Wdm7Xc/O9m5mqaQ7afdZuhs6l9Duwvr730v0gBxk6UnYzZTG7YiG3j++cnN3zmswAeuRnbMQQMgYJFAAv3IYdcKGPHjlcXVty6e6qwuWmp+9Hph57/Rl2WX3Yu0wcf/E9d61U1aVkQcfvMGRA299ftCl1+xytnmPeynBD9Y06aCh2vvPrstpucpnPcG3Rs7aqdbqUVO+rm74jFVWYFtq91/0Sdvz6oLuee/Lq4zHn9+ExyL/oWHJ/93NnXs89wEShYgdOnIucLQ1ziMccc47TLxDfEEziZZJI5FsJ6SWwEgiZfPNxx0Ux7wpr59ttvRyag06ZNcz8ShDYmsrFcd31dJjCk+SZuI7i2FJqWE0880RUjJoK++gkuriS47+LOCiHoYZ0866yznBss/aVNrLEIl7jr4P7avXt3l0zAC5xotfwP3DHSf/QFq21QqGZggBfEhB2rpXf9wfoINtRB8Ay6EiMAIwBwjxBCPD9mhHGSCgQxdAWi/iWzDkcVr7CHKEzA1j83cOX7gUIBPMMSOInL4XuPJi9odU8ELO5b9CU6hTl9Y1LN97Iskj3RB1y2EhGJNPjuotyhX0FCYMYKgFLJUyyLPtfwXHj++efLLTzA9y/eZ+emf3sxxCuTy/N/FmmmzbUL3ZasnXXF8qY89sEp0kbdeoNUuVLVktbVJJbVeStVwltP7NesWizc1qhSx5/O+pPlLN59faKbDH348bsydeQX8t59S6VajcrSqmtNmfPVH7JuzV/StlNz2abn9nLxQ/s75USshkmWxMbYSVZTP7k5/fTThd8VEy0/wcGtzMgQMAQMgbAQQHmNEUPkTN0m6JbMM6WTlrlIt546TzhF3ZVbaAzw+0m9+LRCKcL4gLCJ2+xZejXW6EbSq3SJ8fkddQlGWP1ADT//U6PIMyq01lVrfhO1en+/fj3PrTSsrLfKArdpAq1iDEq3hDekT1A1c+bMyPjMfBzjjB+b+QzTQ6F0TzbMMyVnaAWEgV+TCqsEk3OEJnzhsUTGW9cR65onJqd8gRH6ECIRLoPWHAQjLwxSh/hJsqghmOK6m0jg9G1Ef+KOSlsIF9QP8sfdIJhYgGOf2Q6rJ3GaWBU9kZgjlYWjffnoT6ySPmkPgqgXNinHBJ1zTPjHjRtXwh0Yy5cXNimLi4QXlkgYkEzgpI6RyMcff+yETRYJRyHA9w3LL0kggs8iW6yuvvpqN4j6mJFs+JGFkXgPb+XOhleYdVF2oCRJFgvD99Ovk3jcccfJRRdd5Fy0OI+7zdZbbx1mt0LlVbtaE2nfaBeZuWh0qHxzwaxqdZEaashcubg097+K1qnQushtpa+WPtPwV5GG60/f8mHXSIFKUtlZVTeq1lAO7jpCOjb+e2yMFEpzp/j7U5wABms+ygmXlOW67WWrrbZKO24M12IUjGx4M/BuQgAl8yffPQROP8FJVeOf5i1ZcUPAENhAEGBeWrzs3X/1jo9I8653VVfXWZqb4xTnxcfcD++MVAlDB0o1DDp46r2k3lphkxuf1yfowgOR5bhQ6hE/j5Ivnf7SNwRZDDps3K9XDtLOmWeeKdtuu60bn1HUs2+UPQIFKXCiQfGZ4tBMQFj6SJyDmyxZ00jznYyY2GMdxXLjU34nquPdT/lyYpFMFG8Qiw9CHrSbuggE4ypjleUc5n80QmitoinaPTb6erJjtDsQwmJ0XASCMBMsrMjBGNZ4PLEigUkya1S8+hvieR8zi2XeW31x12aLJlxQsJSzxAQLdTO4ktAKwlLKwMtzZAClHN9llBdYVEj2xG/DxyhTB5dSJtKUow4Dtg/+5nosYokfvnO4nvK7weLOhDweoR307oOUxdUQJQnnSGkfK/YHF24EcWJ10s2mPGzYMJeIYeedd47XJbn44ovdNV4wwXTsKEmI5c53wq22EAROcKytUmIsgTNdjBcfp0nfdIumIvlLhczdnLC5UbVUInuiOSQ+xkWbLSzlCkpRXMp8OAXrHPoJDpZ4fhde+OQz3clT4rvJn6u3d6/kOlOr6yYy6Omf8qdj1hNDoIAR4N1ZLGwO0btIV9gM3vgDejBH378H6/v97eCFmPvMP1giCAMQnn94xkGD9HwuiRwrbGER86cj1P2XDSKsjPEZ7ynmFsxnguNzsvlSWP3KBz4Lda44fr2XWG+dJzVW2SBTKkiB84knnnD3yxfOWyw4gfCJq9+DDz6YksBJHT8pRlOSjIKupSTESFfg9EIeVq1k9Le2qrgk62whbBC3inDHDz0bov9QrMQDnPfWV+JXk1Euspwla7PQryMEQgxsuK56ATL6vrCKDNQ44qCCge89ShUscsR+YnHu37+/SzmOtQ/C1ZnvDN8VCBds4n2JXR4wYEDkPNfgR1IozscjXiT+d0f8MEJjIq0fShIUK1htEaq9NR3+DO7RSakY2INJqiiTDvGbwpUepU4wk6PngbDOMgJQMkuor5Nvn51V4Hz9mwvzrVsx+1NbZcAwBM6YzPXkXh2vk35t/14wPF65fD2PkocNLxsUP1749BOe4OSmInmN1Pp8/ROpujJfH431yxAoOAR23RXDSxfdwlCcstxOU/XEuE8GDz4tLhYsv4NXIHMTcpMwZlUUYj7FdvPNN7s8D358Zv7N3Jj1brlfrJ+FnDE5lee1WsOa5o4Z44qynw1VzqJMCCQAAEAASURBVKZyedRF0PLurFiJsM75jSQNEJNOrD6pEK6qEMJcMiK5kKd4gpq/HuvTt5FMuEVIKNZWibA2FZYqBFCE6S5dGFTiU6qCKO5eUPCePFcsUm+99ZY79H321+wzHARw7yZRE4IYgiOCZfSzY6FuP4jjGosQh0s333viPFlWxxOCK8IncYisAYfwSOxmtKUQ4RIlC259DKK4T8OPNdX8IuKeZ/ATj4HBgwdHTvH9eeyxx0ps3hoEL4RN+GHR4R6pSzIiJtgQ8atYPSHcthE2ETJxcae8X6DcFUjhH78PXnysVReNI9URfCGfNMwdFNi/jet0lY3rxIqMyb8bcRbOReH3q1GtdnJyz7dLCJvvzhwubLMWjQm/wTLgiECJCxeTNsZ+lC/8Rvmt4LWDQgklCVbRdAn3NiNDwBCouAiQV2HKlP/pDd4Q0k2yxM2D6mp6ulNQx2I6QrPHMi4xP0Tw9POUWGUL/RxzNUIgyBOBAQY5gzkGYzbzIMIhyANDAsJ0idwRGxIVnMDJ5NS7I/IyRjjym7dW8gC9NSbRw8TqAT8olitjdF0m6BBJeXw2K5+oJBV3UuJAISYBCAfxiB+wJ+JG07EgkngoFfJacyY40XWCiY78+p6p8LQyqSOAi+uECRM0m9whTkHCgE1in6Ay4oorrnAMGdwvu+wyJ8ShbEE4I+4Ql+cgESMMP+Ka+V6SOTM6zpdU9whfDJYIXyQVIkEWwloit3KEYqyannCJJQ7Sb7SF0IulkaV9IARffqsItWSTpQwu4mgNIay0EAulQwiwWHNx203XxRVXWpQzCNnEk0ST9y7AhThIKHNwCSLzLlv0sknBsvmwj1ttIVBYLrXBe91y48Pk7O0nlYrXfGfmMGGbuXhMsHhB7uNKi6cOExF+k8R3E3+NIolkF7h2kVCOzNMkfktGvLOw/KfyfkrGy66XLQI8X8Z0vJEY/+ONiXxHuO7nMsFekosBxWU8uvbaa90kOvo6ykGUoEFiDEfxwXwrHqE0SXQ9Xj07nzkC9977mFYepht5YcOiE9Xrand59NFHSzBE0OJ9ieKbuSlL/lV0C18QAOb+zFFI9kioE+MwsfjMy5gfsU/uFX6TqRBLaRHis6FQwQmcPlkQL18mkdEbmgYIt9rVq1eXeI7EkHlC4MMagsCFZcVbZ/x1LJ/BLJ0Mvt46E8x66Sf0WGaIjUtEBFTTFm3SdlDopD2Wd4GC8W0kr/DEWpfeNRGffU/wZIOYYHgLT6IJCdY1MIRYMsPfKy8L3CshhIcwks04ZvavFAIs+8HAheUSbSECJK62/pmP1fWrIL7zWDT9RpwjFBQQiev0saDuYoJ/CJwIpQhfxCJ4N9xY1u4EbCKXSIjCIMz3mthq//3131WWZvF955PfCoSig0Hbl4uXWS7SUJIdYi1wsWdCHr2otF8ShvaChBX2gQcecOMFY4a3ugbL5NP+hipw7tXxejmm+7NSS5MEbUjktecsV4UGHUURGdN9srZDDz3UKXaCiqogPrwzmBShpU+0vFCwju3nBwJ4eeC5gSKdeHm8TEhuGCTmHCggERyjlcMoGciWjCIvFpGHAkEVJV00MRfxHmP+Gu8hhIzoeZW/zieeUcxzeCcY5R4BLG4zZ+IpFL7ny5o1nVWJ/LenIIYQlMHMhVEseIV47u8yP1tg7oRrLb8T5mLMy1AWMm/DHZffLd5eeKwwN4omPNT4PRNeRM6N6DlLdPmKcFxQAieaPm+5xLISi5jQQjzgaHcisrxiCcHSwvIjfqAlgUh0PCbWGtwF4OezCcIXl0S/nAjHfpKM0IB5Ha10PCK2jMktRKIi+kBf6BMJKhD8mJzTHu1AJO8hrocyWK88oRnxa3byxSdODuLFg+CB4JLILZE6XmOK0IP1lXZo28e6odFMx7rq+2afqSPAc8D9lARRCG18fxjQUQD47xKxl3y3/MZ3BoVFMH7ZW6yTtYxlFLdsLChY9XHHYXCEEikoEvHFosn3H80eE2RPXsGD4sL3nU80gfSfSTPJiKA999wzEjfs66f7SRwsEzSIiXhwkPf4RAuUxGWjoPHr2qbbZlmXb9eon9Su3rSsm027vbBiOHGhPaXnO+pCW5zwKe2OVKAKQe05YwPxRHhK8D4ixAMPCSxafuJCWAmx2yhV+GSc8QrZCgRLhbwVPC0Yo7EkQf5djbI7aEHkPN8FsnQzzgaJiS6EoBis48ugxID80lz+PIpw5j+xLKa+TLxPxnWEEfpulHsEpk6dur6RLXLQWGdVCk9279ENyYU2UyB96AOCJwIoBiq8KAkfQuGNFxuhTD4Rp/eYZK7HmM18rqL/bgoqaVBwYDzooINifi+I58R6RxwnglPQcsmAHHRDRKhDO8GLOJqwGOJu513sOEaTiAuiT6hDnXPOOce9zJl0M8H1lkKu+bUUgxlCEeoQNNFU47rKwO6JtNJYTJk440pDWSbITIZpHz9yBnRcIKmHphvLFoSgiZWUwd5/oYOZcBFsoGBfsHDyRYcnAo+vB34MMDvuuKOrwz9f339GLuiOdysO8g5ej7cfi1e8shX9PNihDWPAweJIMD7fVwREvqNkbo1FQUt3rOvR57yyBEukjxNGY833KZPnwXcYl1i+M2i/g4QFlbgHrvfu3Tt4KbLvk1Jlal2NMFq/gzCNS62fqPnraP/5DdFfNJBMzguVujTZSybPfSyvu49L7aqlqsRQQ0flKpl1FRfag7ver1bNBpkxqMC1vPacMQNi8sLGuI1VCwUi4z/vkuA7iUzNWCpQfEYrWePBxXsN4ZWNMQklaHRm83h17Xz6CGAdHDJkiFM6ew8j8ObZoVDAE4P3A0oEkrnxTIJeV75FFBIQAinjcPQ8xyvfGPsZh71rJO60EM8dbxuU4akSykSUjvSHiXZwrpQqDyuXOgK8zypVqq1K08S5PVLnGCy5lTsg+SBeErzfN3SrZhCdRPtt2hSHPuBthRcCYzNzczL54kKLRRMlEPM6DEfeeITnGeMsxp5UiTmgX8KL3yqCa3ToUKq8cl2ucq4bCJM/Dw9rBFuipD1o7ijjNXy+D/hVM4jiUohrHbEN0YOwL8t6k7ikMOAiyOF+guk7Ol09Xxi0jGiREdiwRnlC00E/sOQECUGPLxUmdWLcGOyxLvGC8JYYJs4sXYHA4dvnZYP1khcN9+BjQuGNpYZBAdcqXBW5z6AfOS4y9MXHz/n+4GpFLCHCNTypB35BYZOyCLvU9+6Qvj6f3AvXcC9IhyhPvWXLlqVTrcKU9cvk+Bvy7lJ+IugnG9GDD99FcMuEvFab77cn78LrXcJ9fDIvs0TE95dJBYRyJ9oa7oVMrCpBFysGV98WkxziIpj8BJVBmVpb6QsxJtG/OSY+uNxCrIvoJ1vuhP7zWZv9cT5/kq023wmBE8o0U+1enW5Y70JrwmYxkon/B7XnuKijHGUSEhQ2PQeUqIz7QWWnv+Y/yWY9aNCp0m2rDk47z1iE4gi+vJsaNK4j/ffY0Xk1ROcA8DzsMzMEeB8zl2BsDSoBiVFHaYYnCVZIvwQFITC1atUq0RgJphjjvCdYLLfav61j4uYOnoFXsnPMHCRdImaYeUS0h1m6fKx8cgTGj5+gc4GuyQtmVKJY4OS9zNzShM2MQHRKFzyumDsz10f5g5caczEvaAY5p+Jii8HgaDVIdVIvTDwnkWMYn3HpRUZoocrG/dVrzIcBBvmX535BCZxhAIXAyAPxk+pEPLE64VaLBjE48Meqw9qECIvpWPmYBOPWyIQ7Fn/OYfGMbh+LIvcQy+rFBB7tRrRgHKvPwXMIC/BMt16Qh+2njgAWPVzkWFqE2BwEfB/Di0AEccwE4/zzz3caMZQFxNzwnSSJSCaE2y6Ehf0RtaYef/zxkSQ7PkGAFxS5jvAWj+g3ChP6iLIErbvfcMeCN8oVrPUoYkgYRKZlJrtMmjyhSIFwG+desegmWqLF14v3ye8GCw79ChIeBGTtZTKHyznu8liDEL7BA4r1OwzyyIf9Lk0rrsDZeKP2ckqvd6Vfm4sc1JNUSTBCnyebUWoIEGeFptwrdaJroRxFkcmkhzVAo+nue+5wXjTvTn5YaraeKbvpT+PoW0WGvCxy2mMih10j0uOwlbKw6ofy9Mv3yg59tklLIx/dnh2XRMB7cvn8EP4qikjCfxDmmMAyPhMOEVQe+rI+dAchAS8oJqgoKoPkcwFwzreJ1YVEMJ6SKR19ueAn8xUoWuEfLGP74SCw2WZgPT8cZqW4/OrOMA9J15jgx2zGb6OSCOBdRcLGWG7ulAy62Po5WZDDEHWzZ271tYbCtdFx/Fi9yNvyP7pdrdsZum2vCqnfND70OrWo9lDvs0wynCub0GmDEzhDR9AYGgIZIIBVF1eV6dOnOws5brRMDHD3JCMsxIQDV2eEJF78CGm4TuPSFrSkUzaZosNfJxMrWm8mK959g0QPuJcTVwyhfMCNm3N+4uIurP8HL6wnaOIgJkDsBze06yhGcCVBgMatC3cStPScx6XEE0IwWd4QEHFZDy6/Em99Ul833ieKF+Kcg4QVAKsOmRZR8qDJR9gllpt75cUabDtYN5/2a1SpK52bDMinLpXqCzGc0MpFxZ+p/N+y+eEuC22HRrumUtzKJEAA961YCsnoKniuELqBMILXT59+W8vgM8+R3QdrHPS165yw2WN/TSDXTaS6GtHqbSzStpfINuogMWCIyEHXrJTGXZa4ONIBe+0Rzd6OM0AAjyEoeoznHGM3sWLeOu0zgnPNE55ZuN1SDoW290IhgZQnLCu8UwjPYfzlHUC8GdZVxnPvCRUUSn3dZJ/E6UPBbPvJ6lSE64vVYjVR3Z4XJPEMCvNeixM/zlaWaQy0KXdgiiuJYsooXAQYn6M9woIt8PtEIOW36cOgmFM10jnSgzrXUh2gnK5ebmR12VG3drrV0K2JblvqxuwAs8Vgdc9foKF3fE/ywdpZVftkZAgYAmWMADE5JLhhUMElDQEoGHPru0NsJNZC3FeZECCE+thgylAnkXstQldQ8CL2C/cqgtchn70V916sHp5wi0Jz7t1tOY+1M2jxTNSu54OrN5Mf4ipxBccVHm+AaCLmEusogineB6la2oNuYdE80cp64d1fQ9gdplpXNtrCNY2Y6kSDv6+bT5+41c5YkL9rLGKQ3KhB6i61e3e6Ufq2+dvqnU9YF2JfRo4c6az1jBVBd/bgvfA75LeGUgsPC7Lg1mkscvKDOq4UG6mCxWPu84wPuEzDKlQgfeved1ybicaFhn26Oz4Nt9QKRjERILQF8oJbsBDPE4UZ6xvjPhdcqsqX88tNYQWB8ORA0cc4jJIR8kvLMT4yTqKARDnoQy6woOJFQxJDnmc6nh++33iS8N0Lvq9c4xX43xQNH2FrqsJ+B1XktNet9nqLby5uG0+dYvpSP3YOuYmvNM67pVPGhsx4g2eH8oewoUTjMwpyxmc8xHxuFkwRJ+mWquDWUcvq8CyP6MacZ7KGW4xU9+h0qIYugddifc4L9rOhVPudTRvlXhetHQ8XV8RkhAYRixICgJEhkGsEEICIiUpGDDxsYZEXND0/Bj4EryBxzscUB89nso+1pY0G0iciJjU+cUWiculci2Ul8PX5jRfq75zlUUbK2f5W8vIzlUy1jTfqIId0HSHtG/XPy3soxE7hMktMDwojfudseA9E7zP2eGq6cUMnbJ7xlD+T3mfPA0XYbtxT19W98Fy5+abbYjI4/IMpMc/byb8RIJ8CFD1G+xJ+2alYCkrK+Jh/whJwq2bcxc0VLxqUm4TO+EzGCKy45fL9wK0WgRPLKO8CXLNRRpBxM3rJFd+XWJ/BUAbcA6PjS2PVqWjn5uvEnm2CxtW1UoEewRMBtHL16qHeKl5Rdes2Vat0LgTO6SU8kULt+AbMjN8YiYP8eBxvfPZeafxm8VToo5jFXpsjOZgDtcg2ut2pllVyhfjVNZLXFGmsHnX7jxmTStGkZf5+4yQtWrgFit0OUus/E9REk9TUuFgpQ8AQMARyhwCxjpvU6yE/L/s0d41kyZnEQYmSBnVvfoRmoR0hNavWz7Ilqx5EAGs98dWp0rEnHCEL5i2RQY+mWiN+ucOvE7nlkttlj933SmtSE5/jhncFLxDcWr3gmQ4CCIg+GR2T1GgiuRux9N4zhHh6whZYVg0vFMgvRO+tZyQOSkfgROEBIXhuiMKmu/nAvx90ks/2vnrxIHiu1AzSYdKOO/bWBE1PqSWa6L2waIa65L+reRXUt94oVATI3cGWKh2ky6mo40nGwqZvhxb30o0l6EguGrZy37eT6NNiOBOhY9cMAUPAEMhTBJrVDnfiEvZtJhI49+50kxy95X9DETZv2KNI2HZvPyzsW6jw/LBqPfnYs7KTrq5Svzj0Lqt7btNDZNvDNLnQmSe6jIxZMdtAK/ukO/GSPiWChTU3IZYlIVGQ37wwSUw+3l7E4EM+0z3uuZ4OPFBN1UoIo5CPKXUHKfwj/ANKtJJACmwqXJF1am2eoZlKf9K46Wv07g7SrZomD8yWhg69RHM4fKJswvR4uVEt3Ts59+1s+2f1M0fg0ksvlS/VxT5Ty2Z0ywfoiQ66na25PMqDNggLZ3kAa20aAoaAIRAmAqvWLZEZ89+Q6QtGafzmG/L72kVhsg+dFwLn3Gkl2TbZqKOzarZvtEvJC3ZULgjcdOt10lW9mXc4OrzmeyuvF6YudQnAfAbq8LhXfE7E05IUaPbs2S7ZR6p3TDw6cZoQVkzveuvrc40lc4jLZNkbBFti7CHCiLBIEt6BCy5E/gCIOkEiyVTQHZtru+++eyTTt4/7Zz1Oo9gIYLHak0u6bu5LmmQIy2d7XXKotuZoSJe22247QTC56qphWvVE3bZKl0VU+ZF6/JDmevgu6rwdliUCrK9JIkPNzybJg65S7xlC5y0ax3m4Jok85BDSDpUdmcBZdlhbS4aAIWAIpIXA/JUzVLgcpULmG/LtwnfSqlvehaNjOLs3P9LFa9aoWq+8u2btKwJkpZ3y6Reyd8i5mqpvJNKg7Qr5/KvibKsGdnoIsGTClVde6dYJT6emzzJOwrdoYRM+rKGK8IjgictucA1yBMiTTjqpRBw/sWW40uKmGyQSmERT0JrJ+uYQiY2MkiMwT58Jm4/3dMmG9FlV1rwHqdKVVw7VLPOj1J0aMfa3VKvFKXeAJjR82i2vF6eAnS4DBGbOnOlayVZ9EN1VHO1b60oDkydNMoEzGhw7NgQMAUNgQ0Lg+8XvOwsmlsxflk8t2FsPutTuoy60O7e5oGDvpSJ2nDT7a9esk5Y58MxuvJnItNEkMjFKFwGS9pAMBsGOjLTRxFImsTIBs+QVWzwirtfH9sZa348sltFL6fhJLzx93Gc8/v78qFGjXDI2W07DI5L6p4/3HLc+3hPL52Yac5cKffXVR4p7K5k/HyvpaN06pVItUOY/uj9Es8jXVcPrvdK5c0dLGhRAp6x3SRZUTwXDZuoCHza1Up7j332Xxd7DZp2Qn8VwJoTHLhoChoAhkFsE1v75u3zx2/Py/FcnydVjW8h9n+wso7+/vqCFTRBD4Fzzu8ixXUeZsJnbr1BG3EkG06xNFdEkl6FT07Yic2b+5hYxD515BWdIdnDW18QaiVKgrKh+/fqy0UZqns6CfvzxR7e+8R133JE1ryy6UfBVfbznG5pp+Km2bWXiv/4lC9bH3Sa6uXnzflD32ku0SGfd7k5UNHBtnu4TwztEeG5Tp37i3KvJUnz11VcHytluWSLwtY7PzXMgbHIP7XSbqutzljWZwFnWiFt7hkA5IYCrExuLeV922WXCWn2x6KGHHnLXfSyOL0OyCTTorB0ai+bPnx/3BXW/LlY8LTDAkRCDPvgkF7H4ce6ll16Kd6mgzy9eNVsm/HiPPPLZvnLF/+rKE1MPk09+fkiWr/61oO8r2PmeHfd2hxutLp0tM1gu0X5nXfh6v9Gj3ZaonF1LH4Hx4z+U5l3+TL9iCjWatS8u5N0rfZXrj68kbCOGhhmV5LlXnM9ddtnFWTcvv/zygrqpq666SlhTOZiEqKBuIA87u3z2bJlyww3yQo8e8pLGa35+662yUte0jkfXXDPYLYHTvv1TKjjuocXO0u0u3T7QbYVuc3RjDedbdTtFy+yuCYJWCOu/kqGYNcJf0xg/ltfheRJTPHHiRC2bOvkxm/HbKDMEvlCBs0VmVZPWwva9RJN7+fV4E1Xg+zd5+HC3sZ8NmcCZDXpW1xAoEATISoirFuui8ULhZXLsscfKvHloN/8mNOrE8vDC2SQqgcFonfgT/0NCi1g0SWMCrrjiCreAfPA6wuWgQYMEVytPpM6/9tprI0ku/Pnoz3+pZvdGXUi7ItCPSyfKOzOHyZ0Tt5Xr328rL087U6bNf12KJHyXmfLGa59ON8uJOz/mukEK9kypriYwadmvn9sy5WH1YiPwZ9GfUqlS7GvZnvV8o10/Gz4uwiavZRtnlm0P878+Lq6nn356/nc00EMEzXvuuUe/Vzn6YgXa2hB356myeML558sTmvDpzf32k2+feEL+Wru2FBS9e/eWKVPeUovl0Rq7qxatdk9JlSq7arm6urXRNSAHybbbTpALLmilyYGGqxL6Dbc+a5DR4MGD3bqsjRs3dsmrhqvQkSr5MZvx2ygzBIpUwZ+rX5HnGz0+x+opQuYkHYvYTOCMhZCdMwQMgQgCf/75p7NMEhfEQt+4TuE6Q+KI6JcI6fShu+++W19QVSI82MFKCcWK/+G8j/F55ZVXOIzQ2LFj3X50tsNIgQQ7Q4YMkYsvvjiytlyConl36a+idfL1/FflpWmny3XjWstdE7eXd2cOl5+Wlsz6mHcdz6JDTWt3kkHbjFEX2vOFiQpr/P32W26Fi1mLxwobVmOj1BHYftsd5NdvcqNznv99cT9irQWZeg+tJAmEColIRGTCZtk8sTmqFH7vuOPkYc00PEaVxD+9/XaJhuvUqSMDBw6Up5++U2bOHK+eSasFpfDcuXNl6dI5arV8Tm666Qrxy+CUqLz+oEOHDsL7HCUCGad32GEHl+E4Vlk7Fy4Cm+tanZmrahP3hfzDdXS9ZowPZUm5eduU5R1YW4aAIZAQgaefflo+1dTr55xzTqTccfqiwuLJi+SLL75w53GxxXp59NFHOzeaSGHdwUr17LPPulMsHk4K/mjya7txPUhvvPGGOyQdf7pEX6CLdO2yQqBlq+fKxz//nzw+5RDnKvvoZ/vLRz/eJ0v++KEQup9VH7dqcbSctf1kadewb4QPi0tnY+GMMEqwM+KTfsI2ae4jCUrZpWgEunfvLr/N/EtWLY2+kv3x/NkiLVs1llq1amXPzDgYAoZAXATWqWvkDA2DeV2TCz2lWYUnXnKJLFR3zFjUs2fPjNZHxdL+1VdfubokrWLZHaPcItBVXah/06RBuSD0gVuoMqGsKTd3U9Z3Ye0ZAoZAXAQe1sWmoWCaeqyXWDmh89VFBxdXb928/vrr3fngvyfUdQdCUIVixVaiPYW+/vrrSEp/XDa8oPrTTz/JwoULXZlU/2GNJc0/GRt/CWGR7FTbTafc3OWfyXuzrpV7P95Rrhm7ibzw1Sny5bwXZd1ff6TDpqDL7tv5FjnqH09KjSp1StxH8+bNcy5wlmjQDlJGAG8H6JfiVSxSrpdKwcUaYmbWzVSQsjKGQHgILP/+e5mi7+/nt966ON7zttvkd7VohkFtNXnRiy++qBlsR7ikVttuu628//77YbA2HjEQYEmixepWuyTGtWxP/agMttHY3LImEzjLGnFrzxAoQwSwLmG1ZFHvaGvD9ttv71xu3nnnHdlnn31k1qxZLknAZpvpmgYBIlnQvffe61Ld33TTTe4KiYWCtHTpUlffn/NutWTCDMaJcpwu+f54nunWz0X5bxa+JSOnnyM3fdBJ/jOhh7z13WUye8mHuWgqKc8aVepK8zpbJC2XiwJNa3d2LrQ7tT4vJnsEzly71MZs2E4mRQCX58236CC/fJO0aNoFVv5aW7bsuk3a9ayCIWAIhIOAi/fUpXAe11wMLt7zySflrzgJ/9Jp8dRTT3V5GtpofObOO+/skv+lU9/KpoYAAie0PjohtUoplFqkZX7WdXd7qXt0WZMJnGWNuLVnCJQhAlgboXjWhmuuucZdRyjdVBMR+HXa3Mn1/0gWhDB64oknCi6Su+++u8uCR1Y7T7jbQGRVhM8zzzzjjr077V13kSVPnGuv20njX8uWLV1pHyOaRtXQiq5cu0A+nfu4PPX5UTL0vfry4OQB8uEPd8iC3//GILTGUmDUeKP20qfVWXJij1Fy5a7L5PAtHkmhVrhFtm5xjJwd5UIb3YJZOKMRya/jkwaeJh8+rmtmjgmvX5+9KvLNJys1WYlmKzEyBAyBckfAxXtqksCHGzSQscR7qpI5G2rVqpXzXCLLPGE5uOqOGTMmG5ZWNwoB5j3HHHOMPK/C4e9R17I5JMCpq7rTHnzwwdmwyaiuCZwZwWaVDIHCQAA3Vig646zvPYMaSXkglimJtQ6bTxbkU90feuihrnwwVtPHgZKYCLdbYkYRUnHBQQA9/vjjXZ1MEgd5gXPOnDmOR1n9+23F1zJu9s1y/6T+cuXopvLMl8fL1F//K3+sW1ZWXSjRTusGvWXPDlc5Ie+iHb+T/bvcIZ2bDHBlNqnXUxrVKtaIlqiUo4N9O98qR/7jCalepXbCFsoihjNhB+xiQgRwp++9cw95776ExVK+uEiHm3dUt4RHBJkyjQwBQyB/ECDeczrxnnvs4eI9Pybec+rUjDtIRvsvv/xSOnXqJCzlQ1Z5o/AQIOzpzxo15IGQWE5WPh/odoeuNkBSqbImEzjLGnFrzxAoQwRW6gsGaqCazXhEnCQUq4xPFoR7BxnNWOKkf//+rjzaTdxtIe8q200zqxFzCbH0CoInwiYuvbyUfMZaVyDFf/Xq1XMlly3LvaA3c9FoeeObi+TW8Vvo1k1e/+ZC4Vx5UJXK1aVbswPlkK73y6V9f5Iztv1Q+re7XDap1yNmd7o0LV73MubFkE42q91FTttmrOzU+tyUOGbrUksq9hG6xAKbUW4QePWld2Sl5gB77rLs+f9Xc3vttMu2bi3G7LkZB0PAEMgVAsR7fka851ZbyUsaXvMF8Z4Z5ElAmU1iwkceecStq72V8jt3/ZjN+G2UOQKNGjWSuzRmdpqyKM71nzkvrKQIrreoV1t5KQNN4Mz8+VlNQyDvEWjatKnrI0mBMqHHH1d/OyWslbVr13abd8/Fejpu3Dh3ffJkdGfFrrtba8ICBNTbb7/dnfOuG7169XLxnOnG9K1atcrx8ZZOdxDSv9VqrZz66zPy7JcnyJVjmjlr5tjZN8lvK4pdhENqJmU29WtuJtttOkhO2OpluWrXFXL8Vi/JtpueIvVrbJKUR5cmuRU4caE9a/tJ0rbhzkn74guYS61HIn8/mdSQFOx7zfk16pbM+rlU8/ffowmlVywUeWPk/zJjYrUMAUOgXBCYN3GijCfeUz2e3tx/f/lW4z2LdDm1dOiEE05w1s4ttthCePO/kE5lKxsXAdxq2Z7WEpmmaPpU656n2yYaEnXepZfGbSvXF6rmugHjbwgYAuWHAC6NUCbWQayXPpMtLnJBFwyWQLn11lsFgZQ06bjK9tA03tWqVXPt8fIZOnSoc6flPITmEzdcYjERPlOlFStWuKIIL2HQwt+/k+kL3pAZC0bp9mYYLLPisam6w3ZWYRELZav622fMq3OTvdTFtY6s+bMYr4wZxai4X+fbZMfWQ2JcSXyKZ7Z69WpZtGiRINgY5ScCTGiaNWsme6ir3U9fiux9gU5OuqXW1wm6CtL7j4o03biRfPbtxBLjRDSHmtu2lnW/LpOa1Zs5i0r09ejjzqrUqqInO+gnFhgjQyBfEfhdE/RVBJrz6qvC9v5pp0n7I46Q9hqLvamur5oKMd6jvKquAuvzWuFInTfco671jCtGmSMApoQrEf7EGpoH6RbfZ61kOzo0ywTdempW8kmff17yYgpHNdQ7rkXfvq4k+9mQCZzZoGd1DYE8R4C19qBggp9Uu0wiIayYZLM9TV8+QSKrLQIn2Wr9NSybnrBqInDiTusXAvfLMOBm6wVOkg3FWnh6yJAh0q9fP8fOx26y6HSmNEczyE5XARNBc+6yzzJlE1o9rJGdVcDks1GttuHxVZ6f/1q8XmoYTJvV3lwO7jpCrZo7ZcTOKzxwzTaBMyMIy6wSycCWL18ug84+Rp48b6T0OU6k0aYqSLYRaaKbpzXqm7XgB5GFus3Rn9LX74kcP/BYefThYm8IXy7W5wkTZ8vYk0+W6RpDNP4jdO6JiRHFjSrffecsMIlL21VDwBAIC4G1qujld8pWTz2WvPDZWAWfZLSdFkBfNbpFC9lT1wflfc58wc8FktW366URYC3yAQMGyImqBLhFPc52WrNGmmkxUiry6Wml7rAQDttEXcfzR3VvHnbFFW4+5suk89lYDQX7jxmTTpW4ZU3gjAuNXTAECh8B4h/32msvGTVqlIu/jJUUKN5d+vU7fcKfYLmGDRu6pVRYH9MnD8KC6Qm3mq5du5bIhBYUOH05JrixljsJZrj0GXCZEKdK6/5avd6K+YZMnz9Klq3+OdWqOSlXp/rGKlzuFREykyXbybQTCLBhCZw9WhzrhM1qVTbKtDvirdK4UfN9MMpvBPBiePKhV2Snbe+T2++8STPYznIdrlajkjRuXSR/LBNZst6QU7NWdenefUvNVnlRJG47v+/OemcIGAKZILBMBZzPrrvObRurAtoJnyr4bKQCZTwiJc2wAw+UY9QqR3KyN9980wmdzEeMMkMAK+ekadPkwgsvlGfU6vnjeqt6DWWH0Km6QNGoBkeNNG9GH1225ikVNrfbDhVA+VMlXZi9KJNukJGKNMhYIVg2wcgQKEQE/PeYvmf4U8j72yZT7CGHHCIsUVKWg/0vmoAAgSMbreYa1eI1adLE9dsvtRIP8CV//OCsmDPmq5Cplsy/itbFK1om55vX+Ydzk0UITCfuMZvOrVgzT64aU+xGnQ2f/Trfri6052TDIlIXpQeLhR911FGRc6nukHRi8vDhrvigzF5VqTZl5WIgMFcXjccjgRhtFEskDGPBdxRKKJBq1qwZo1biU97CmbiUXTUEDIF8R6C1xnt2wO1Wt0pVcH4vJp/krad6OfXSMXzx4sVO6ESJfdZZZznBs6ou92GUHQLfk/hJw5vYSNq0vz4PPNLwbEM4zTX5uV2qcqA98Vw/EeNvCJQzAgcddJDstNNOwmBflgJniwTaz1QheVVjSbCCXq/Z9GLRT8smqQWTeMw35IelE2MVKdNzHRrv5iyZCJlNNaNrWVMdjY1r17CvzFo8NqOmm9XuqllxR0ibhjtmVD9WJVsaJRYqhXGORF1sZLC+6qqrZMaMGYXRceulIWAI5ByBOSNHCts4H++pgmeseE88ogi/wSXUWztvueUW2W+//XLex4rcQNu2bYWNpJC1atWSu+++O69v1wTOvH481jlDIHsE0EKh/eqrgd+TJk2KxE9mzzn3HIhbIGCeQRUq0r9iCyYJf96QRau+z30nErRQq2qDiJssQmatag0TlC6bSyQfykTg7NHyuGIX2sq1Qu0oVu50MxNn24FJcx+RyXMfTYnNoF7xPXTmLp8ir844t0z5kKCpZd2/3dOjGx8xaZfoUzGPk/F5dfoQmbtiasy6wZM9W54ggwffLn9q1kosntHZolPl07JOd9mvy+1B1rZvCBgCFQCBtaoUnq7LpLHVa98+7h2xlrcXOrHGnXHGGYLgmYmnRNxGNrALCxYs0PF5sBM68/3WTeDM9ydk/TMEQkCAZUrGjx/vrIUhsCszFmTH3aFfd5n088PrYzJHaRZWwuLLj5rW7uyS/ZBZtqNaNPON6Ncb31ycVrf2V0GgT6twXGijG0bgJGlQWdLiVbNl1qIxWTf5x9olZc6HNhNRqveVjA/CdCqKifcf+Ut8HDVJxKIFzlT5aMxCotuya4aAIVDgCGym8Zm4144ZODDunRBi8cADD0QET2I7ETpjJQ+My8QuRBDAYszScVg5cV3GmpyvZAJnvj4Z65chEDICm222Wcgcc8ful+WfOwvmrIaj5H9jxuWuoRQ5k6UVCybCXIu6uY+NSLFbMYs1r7OFyyDauFXMyyVOblynm7NqtmnQp8T5MA8QODPJkpxNHxrWauNci7PhQd2a1RqUOR/aTES4TKdCyfgksqJ6/t9NWSpP3l38+8NTAoGTGM4gpcKH8qmWC/K2fUPAEMhvBJr07BmJ46zTqvilk0jg9HdDXglv7STsZ9CgQU7wZL1vo9QQeFKXn3nssccihRmfTeCMwGE7hoAhYAjERuDbhe86IZOEP/NXlm+sGJlZETCdkKkuqnU1y2wh0axPRJIJnD1bHu+EzaqVa+b01ojh/OCDDzJqo6UmpcuEerUcKGzZEkLSoG3GZMvGCVth8KEjYfFJxb21+8ndpbKm1mdN3urVqzuBMxqMVPj4OiSAWqAJLowMAUOgcBGoo8prn6m2aYw1tUkWBCUbvxEu77vvvojg2a1bNyd0IowaJUYAV1oSMAUJgdOvBhA8n83+8tmz5ZtHi8NTOun66nXbtMmYnVk4U4QOF63Ff8xJWrpm1foJNbmp8qGhRJrsVeuWyC/Lk8ffhMmnRd3uQsxaPErFPYu6yfjgovXHuqXxmomcb1iztWDJMCpMBFatXfz30iW6RibH5Umsh9lZly5xgqYKmSKVyrM7WbU962ORbRK8s/fv8h91oT07qzZSrZyNSy0TlmSTllT7YeXSQ+AKTaf/9ddfO2HT12RCkw2RddjIEDAECg+BKjVqRITMVnvzfoxPZKZNh3Cn9dbOQw89VE7WtXpxs8X91ig2At6V1l+NpxD01zP9ROD043YLzQNiAmemSKZRjyQU784cnrQGQmIiDXSqfGjohj3ix7z8smyKpJo8Iiw+JNdo16hfXAxGfBL/WrBSMj4koUhFeN2t/VDZvf2wIGvbz3ME5q+cXrx0iVoxsWiWN7Wqv51zkyXRzqb1epV3d0Jrf84UkVXLRWrVLclyY3W3JQtt6wa9S17I4ZEXOFl2yKdRz2FzxjoEBN5//325+uqrS3BavXp1TAtniUIpHlSuVk2q16+ftPTSpUtl7dq1Uk3L10+hfFKGVsAQyBECRZpUa7XG0FU02kwzy2LN7HDkkVIlg2WQUsWDxEFkWfWCp7d2kmjIqCQC0a60XMUTJVuFYMlWwj+qGj5L42gIGAKGwN8IfL94nLNksnzJryu++PtCOexVrlQ14ibbRa2ZDWqmEOhYDv0Mo0msnN12/ZsT2UYPVmGzamWWiS47wqUWInFQGEvllF3PN9yWzjzzzIgrbRCFWboAfBi0ce/esr+u452M/DrJ/fr0sfW+k4Fl18sVgcVffSXP6vq0FYGa9OhRHJepQqaPyyyr+2KpFC90HqGCrk8qlM+xiWWFDe3EcqXlPIo5EzhBogJQ+4b9ROJne47cYTIXz1T5RBjG2aEdLHzZUjp8kt1bqv1JxqfnJgMTWlL9PTss/YF95g0CZJFlyZLp6iaLkLlizW/l2rd6NVoKFkyf9KesBa7yuvmgwHlAlzukd6uS8R5l1S8snJAJnGWFeHbtXH755TJt2rQSrrSe448//uh37dMQMAQqEAK1N900ImTGisssy1vFo+GOO+6ICJ7e2nnUUUeVZTfysq3zzjvPZaWN7hzLVoWlEIzmHdaxWThTRBJX0kTupCmycTzC4IPQFoY7aVh8uP8w+gOfbJJ94NaLO24y12baMQoPAdbDJNmPXyOTFTPLk1rW3ToiZJal+2h53nN02yQOqvx7Mzmt30tl6kIb3Q8vcJb1WpzR/bDj5AjgSnvNNdfELThv3ry41+yCIWAIFBYClTURWAe1IrZXS2ayuMzyuLO9NVYUa+cFF1wgRx99dMTa2aRJk/LoTrm3iSvt448/HrcfP/zwQ9xr+XDBBM58eArWB0OgABH4YelHzoI5Qy2ZPy2bVO530KnJnsUJfzS7bOONOpR7f8q7A6t1udKanw+S1geWXbxmrHtGW80EoazX4ozVFzuXGIGqVavKjTfeKGPU3XXcuHGyYsUKFz+5bt06XUazSPjkOXolQmJudtUQMATyEQEfl0lsZtVatfKxi5E+EZt46623RqydXbt2dcfHHntspMyGsIPLLGsgD9dM3++9955MmDBB1vw/e+cBH1XxxPEfNXRCFQJC6CC9hCZIAAFBsGFBFP5gAaUooqgUBbGBIiBIB6WpCIJSpFeVHkLvBAEhdELvkP/+Ft55KXe53L2rmfHzuLv3dmd3vy/u3bydnbl5U0cP5yvF179jxeB04C/VCBYUnLGwS6tvDjQlRYSAzxK4G3tLGZjKTZYrmeo4f9277nWZ0+e+F/DnfvqSoLTxIuT4LEkPdiw2jQcbs90U93H6+peh7d6nnCu1atUCjx49euhB07CsrfZbMtjTihUrdGJx7hMSgzPl/E3ISAODAPdl6uA/ysjMUriw3w2qcePG2L59u56b2rRpgwULFmjD04gR4HcDSmaH+eCWe9p5fPzxx7h06ZKO4ks3Y363MvXYlStXcP78eQQH284mkcxmTS3uMYOT6UA2RU9yqPP29gImRw+DZNjbL2gYkkl1aklUP12EbpqJuXsy8uz5a0mnTGF9e+60jupJyvA9eG6lQ1FeXdWTFN+kuMp13ydw4cax+26yNDIX4PbdG17t9ANZyqpVzKba0CyWs75X+yKNO06ABoozLrV7J0605ABroQweEc8R2Ldvn75nvXr1QrX7ufZ27NiBAwcOWD57rjfSkhAQAsklwH2ZhpGZJywsudWdLj9XGUUU5m0s1a6dfm/WP19//XWc1U6mT2lnchtm9dWdeozc1kOGDIERUGnjxo06cJAYnMrgNAy3pG5CUgano3po4NkzOB3Vk1R/Nx2b6JCBx3HZMzgd1WPL8DX6GRWz0uEULokZ0I7q4b6958tNNIrLa4AQOHYxUhuXXMk8fH6N10dFw9II+PNAloe83h/pQPIJ0OB0ZoWTOcCiHYhmmvweSY2kCHA/J3+4GMYmy5dTUTh5iAgBIeCbBCz7MtVKZqHHH/dKJ405m3kb3SENGzbEli1b8OGHH6J9+/aWvZ0FChRwR3M+qZPzc9WqVS3GJjsZ5sGHCs5A8dgKpzOdkzq+TYCrzSKBQWDvmYWWyLJnrx7w6qCC0maz7MUsnacpMqVLmQECvHoTTG6cbk+RkZEmaxV17iTAJ+h16tRxZxOiWwgIAZMIPNikiV7N1PsyM2UySatvqxkwYIBltdOIZPvqq6/6dqdN6h3n57p165qkzTNqPGJw0lWU7rRc3etYzTW3KOoY2DjWFDqO6vlgcSq77XUMW2n3uqMXzdLDaLFmRIy1pceIBOvouKSc7xG4cvP0/b2Y9/Zk3rh9yaudzK2C/JTiXkyVvqRkriZe7Ys0bj4BZ11qze+JaHSUAJ+gd+jQwdHiDpXrqAIPiQgBIWAOgdyVK1uMzKyhoeYo9TMt4eHh2LRpE3r37o3XXnvNstpZqFDg5ti+ceMGOD+//fbbbr9bIYqvWfO2RwxOroRxX6GIEBAC3iNw8vJObWTSVdYX/n8MDX7YYmSGZK3kPTDSstsJOOtS6/aOSQOJEjikXJmjoqJkhTNROnJSCHiPQGblNspVTKYyyevjLpSepMR0Tkyh8u6778JY7TT7gZknx2OvLWP/pr95oHjE4LQHTq75H4EWpYfi+q3zyJAu2P86n8J6HHVu+T0jU0WXPXVll1dHnzZ1hnsBf9QqJvdkZgsK8Wp/pHHPEaDBGRMTg+vXryNDhgyea1hacooAn55nUm55jFArIgSEgHcJpFYRSnXwH+bL9NK+TO8ScKx1uphu2LBBR3Ht2LGjZbWzSJEijinwk1Kcn8uXLw9/i9ArBqef/IH5UjdlNcqX7kbcvly/fcES8GfP6fm4euts3AIe/hScoZB2kzWC/qROlcbDPZDmfIGA8cXIwEGhKdT1yxfug6N9kP2bjpKSckLAfQQKqlQgxZWRmZL2ZZpBs3///glWO998800zVPuEDn/cv0lwYnB68c8nOjoaDD1funRpyWtm0n1IiauvZ67uv7cfUxmY+84uNomk82oKZgu7b2Q2xYPZazivSGoGBIFz585h//79eiw//fQTXqA7WLFiATG2QB0Ef9Awv5uIEBACniWQu1Il7S5LIzOl7ss0gzi9M9auXYtPPvkEnTp1sqx2Fi9e3Az1XtMRq/bBc4XTH4MjicHpwT+bKVOmYMXKFdi6Yz0O7D+EizFXLa0H58yMkmWKokKZ6qhZs5Zf/jFZBuPFNyll9fXQ+b+xR7nJ7lX7MaMvbfEicSCV+q/UfTdZrmTaS0Xk1Y5K4x4jwLlu/pw5iFBf+AeOHbO0y8AOPPLlyIGw6tVRtVYt9O3b13Jd3nifAB+E7tq1S/Zvev9WSA9SCIHMISEWIzOvmhdFzCPA75f4ezu7dOliXgMe1sSHgTdv3vTL+VkMTgf+WJj3kuKKMfN2904YNmQUStZOi+DCt1HnESBvUXWoB/1nDqnj8BWcPrQdf+3ehfHjJ2D6zJ/ww/gpCFETkYgQuH33ujIw51siy168Ee1VKFmD8llyY9LITJcmo1f7I437DoG3Xn8dw8ePRwXVJe6cqakOzmLMkMZYyPzLjVb7OaMXLcKXS5di9i+/YNQPP6BGDVkNV2i8LvxBkzZtWr/8QeN1eNIBIeAggTuq3M0KFfC0CnZTuHlzB2tJMWcI8LuF89pnn32Grl27WlY7S5Uq5Yw6r9bh6ib7/eCDD3q1H840LganA9RcSVeybds21Kv/MNJnv4rnvwRCq9xO0GLuUIBHaW3X3sGuFcCq8cvBJLbffPMNunfvnqCOnAh8AjHXD2OvNjLvrWTejeVXlPckf9YKFiOzSA7/yv/kPWopp2U91yk3pttXrqCzGnb5RIYepM4xqyqNUcr+O3cwffdu5dVRE/369Uuw2smQ7CKeJcAfNIx+SKPTbDm+apVWmT57duRSroMiQiClEchcrRpGRURgkxp4z2eeCUhjs+p9rxVfm7/79OmTYLXTE6lFzPwbp+HsyfybN8+fx9mtW/UQclWsiPTBzgcLNf8bxUyyfq5r5MiR6Ny5M2qprTB12zk+mIfqAzx++wQ6xDN/APz222+OK5CSfkvg3wsbtZssU5f8e2GD18dRIlcji5GZJ3NJr/dHOuCbBIYPH4633noLlVX3XlWHo18sJVTZ3uqYqA4anJEqn9ps5YprCH+w+NqPFqNvgfrKHzTN3bTiMuf+A4T89erhiZUrAxWhjEsIxCHAhys6yqzal7np8GGsqa9+4AWwVFNzua9KNWXwr1IPvr788kt069bNstr50EMP+WqX4/SL9sB3330X55w7P5zZsgVz7/+9tlixwqXvY0d/F7hzPAGpm4EyaGw+9RFQso5zQ3xabW06uBH4tc/vWLNmjYSodw6jT9eKjb17302W7rILwJy13pSM6XJqA7N07qY68E+GtM4/zfLmOKRtzxE4cOCANjYfVk22cbLZdqpemDqGz52LxYsXo7GKzijieQJnzpzBFvUDY+DAgZ5vXFoUAgFEIJPaDlWcAdLUkdd6u4AyOEW8T6Bnz54JVjt93Ztw/fr1uHz5st9ud/CIwckgIsY+SO//mXmmB02fCNf7M501No1eFlW/wio9DjzxVDOcPH4WadJ4P63EmI3hOBizSt9TV9yNjTGmtNdLN45r45IBf7iSeevONa8iyJu5NEqpfZilVeCf4jkberUv0rj/EXiqSRPkUt121tg0RlxWvWmqjiZKH9OnGKlUjOvy6n4CXN2k+FtCcfeTkRaEQNIEUqnfZ0Yak8ItWiRdQUp4lUDlypWxfPlyfPXVV9qbcMGCBRg8eLDOcenVjtlonKubzCnqr1HePWJwVgtpBx4pRYYNG4YNq7ej9TfmjLh+R2D82kvo2esDfDVwkDlKRYtHCRy/tFUblzQwD8Xc+1Hn0Q7Ea4wPgAwjM1+WcvGuykch4BiBXr16YefBg+jmWPEkSz2pSjCBytsqafe0339PsrwUMJcAf9AwnUCmTJnMVSzahEAKIND+wgWky5w5BYw0sIb4/vvvW1Y7K6hATvTw4DlfE87Pnty/afb4PWJwmt1pX9bH5e4vBvZHtaeBgib9jk+nIm2Ed7yLr7/8Bo0ebYJGjRr5MgKv9s2XVl/3q5yYdJNldNkzV/d5lUv6NJnvucqqVUwamlnS5/Vqf6Rx/ydw8uRJvQ/mGTWU0iYOh0bnN7Nn47mZM9GyZUsTNYuqpAhwhTNcAjUlhUmuC4EEBHKUpY+GiL8SoKG5ZMkSvcL57rvvWvZ2chXUV4Tz84ABA3ylO8nuR+pk15AKdgkwUuPJ6LOoQN8wE6VMOBD8QDrs3bvXRK2iykwCV2+dw+bjU/Hz9tbouzwHxm9qgr8PD/WasZkzY1HULtQFr1SZj08bXsZLFaejqvI0EGPTzLuecnVtvR+5rpLJCBhIKL9yTdukAgiJeI7AxYsXsWHDBr9+gu45WtKSEBACgUiA+zh37tyJoKAgVKlSRT9U9YVxbt68GYwN48/bHWSF04G/pKVRKlyskuCMhZN0DabBmSMkFXIXjnVAc/KKBBe4hW07NyavkpR2K4FTV3bf24+pVjEPnFvm1rYcUV4ouOb9oD/NUCBbVUeqSBkh4BQBznVZU6VC3ljz57q8Kl3KWrW3RsRzBGT/pudYS0tCQAj4LgFGrOV+zm+//Vbv7Vy4cKFOUcgIt94SutMyVWKZMmW81QWX2xWD0wGES6L66VLc95bUXtS/1i1BSBnzf4CxAzkLAjt2b9N9kX+8R4ABk+gmy6A/Jy7v8F5HVMtpUqVDKeUmW5pBf9SRPYP6IxERAh4gsHrZMuR3g7HJrudTx3r1lHnvxInYN2mSHg1Dsou4jwB/0PAHVbALedbc1zvRLASEgL8QMNJolPzf/1CqXTt/6XaCfjJH52OPPaaNzrCwMHz66adgLk9viL/v3yQzMThN/svZsG4Tirhpi2XeYsDmXySktsm3LEl1N+9c1gYm92PSyLx881SSddxZgEYljctSTF2iXtOkTu/O5kS3EEiUwIbISND91R3yoFK6SO2Hnz95MjJJvkZ3ILbovHTpknZf/uWXX8An+1FRUX4bBdEyKHkjBISA1whE35+zmW/X36VUqVKYN2+ezn1pvbezhnWqGzcOMjo6GpHqu5arrG3atMHRo0dRsKB/LiyIwWnyH8qJ6NOokNtkpffVZc4BXIi54h7lojUOgbPXDmKvWsVkVNm9ytD0thTIVuW+kdkMhYNrebs70r4QwF21upnKTRwMvetU3rEG99vInj07SpQogZIlS+pXvjeOnDlzuqkngamWeZ3HKXexDep1l/oBE5Q6NfKlTYu/jhxB8eLFkTdrVlRRQTRqqgB1ffv2DUwIMiohIASEgIMEunTpYlntrFmzJvr16+e2uXHKlCmYrR4ARqg99YdPn0ZWNTfnU3ENpo4di1GjRqFAjhyoXLEiqqkAb/40P3vE4Dx4bqXO28j7+mixwP7yqhJWDtG7N6C4G2yCU1FAaLEQB//3kGLJJXD4/Fq9gsmVzGMXvR+wxFjBZH5MBgASEQK+RKCqMkiilFutO+SYUpo1Y0b06NEDmz75RDcxYsQI7N+/Xx/z58/Hvn37wEA3FObsNIzP+EZpRqVH5D8Cg5RLWI/PP0dzdYpfU8+qI+TuXeDmTV2IjzSj1apn9OrV+Fk9WZ+lXJrH/vwzzHii39FNLti64/KPEBACQsCNBPgwbraKoE6jz3q1k6mk4kv//v11mczJTJPT6cUXMVO1UefFXf52AABAAElEQVTaNTRTSrmWmev2bYCHEn7jHY+JwXG1ivxTRAR+//FHjFYGqhnzs24g3j8hyqg1a972iMEZFbMSRuCdQDc46zxcD1Nmb4h3y8z5eEZ505YqVdIcZaIFd+7eRI6SZ9CkIlC0OjByQ8JJw5OYsqTPcy83JvdjKiMzfZosnmxe2hICySJQ85FHwH2c7pATSmlZ9eVuLS+//LL1R/2e7kaGEcpXGqE/qi9gvr9534AqXLhwoiujXClNScKUXY3VHs2zKtJ5bzVwui0nJswiSFdpHvXUj55f/vkHfKLfW6UK+GzQoMSqyDkhIASEQIoh8Oabb1pWOx9++GF89NFHoIFpCKN9c+WR30NcrXREGITvETXPllZzbndVIdhGpWzqPI9S6ghXc/ok1QbnZ7bHVVdfFo8YnL4MwOy+VatcC18NSIUbV2MRZHLu7JijqVE53LtGEXlVLdAORXOGI0fGULPxuV3fhetH77vJ0l12AUq0vPdU3+0N22ggX5Zy94xMZWAyKJWIEPAXAhUqVcJ51Vketr4cnR0Ld0k/rb7Ik5KQkBDwqJfIXqGDBw9qA9TaIOU+GJ6npFZupPHdc43PDz5oyxxLqke+ef3AgQPa6H5Ode9/yeziC6o878SwoUP1XqL5Ej04mQSluBAQAoFGoEiRIpg1axbGKjdX69XOunXrgu63aZUb7NSpU1G9enV07drV7vCHDx+Ot956Cy+pUnXtlkx4kfN5eXV8ojyBNm3ciLl//JGwkI+cEYPT5BvB5LG3b8XiyBb1hNhE2/DiaYAGJzcwe1uSitTr7f7Fb5/usUbAH7rNeluK52yoIsveC/iTN7P/hrj2Nkdp37sEypfn1xzwjzrMTI19Tuk7rfarVKulHD7vG4dsJ7lStGhR8GCUQWvhyqdhhHJFlO+Z83PatGngiimFblDxXXMNl908efJYq/OL97VUPrnnVU8bONlbunV9pVLVdF+1CvN//x3NnnrKSU1STQgIASEQOAQ6dOhgWe18RHn98OEnv0/ucpuCEhqSjHDLVcjEhA8DWYbzc3KNTUNfFfWGvifvqa0mixcvRuPGjY1LPvUqBqfJt6NYsWJ4rtVTWDX1d1MNzg3TgXIVSqN5c+68EbFFwFh9vXH7Imbv6aqjy55TAYC8KRnSZtcBf+gmW0q5y2ZKl9Ob3ZG2hYApBPiEt/ULL2DGjBkopb5czXLomK96V065uz7zzDPY4wYXzvTp06Ns2bL6iA/iwoULFmPUMEqXLFmCkSNHIkbtm6Hkzp3b5n7RLFnc6wYfofbsJDcXXKumTZFF7cl01ti0ZvSOus+PP/00Tpw4offNWl+T90JACAiBlEigUKFC+nuQrrWfffZZHAT0pGmnUsPsVGm+0qgHqfHliYYNEapOujo/85unrTqaNGnis/OzGJzqBpkto0dMUMF9FmH56Gto8Ibr2g9FApFzgNWrx8DdP2hc7613New9vUC7zDKViTcld6aSeh8m05aUyNXIm12RtoWA2wgMV4ZYURUyftyVK3jbhFYYqutvdaweP94rcx0j4dKgS8yoO3nypDZGjVVRGqQ0tvn5+vXrevQMV5/YyihddRP7sZEcZKdOndJPyl977TX07NlTr94mVZ/9m7FoEXolVdDB63Q05iPPTq1bY6ab9u862BUpJgSEgBDwKQJLly7VrrS37wf4Yee40nno0CFw3yfdb62l5wcfYK+KDG7W/Eynyq3qeFutuk5TgYd8TcTgdMMdYYj+Ud+NA4NcmGFwTu8JDPn2KyQWCcsN3fdzlbHwlrEZmqPOvZVMZWTmz6oiEYkIgQAnwLluxJgxeq5bpcbqyi7kq6r+OHUMGTjQJ+c6RsLlUadOnQR3lT8ojBVRwyBdrvY68pwhhkuu8WrsFw0NDTWK2H2lXgr3BY1XBvn777+vDc/gYNs7aL/q1w9Pq8iwdIk1S+is9a1aaZ05cyZatmxpllrRIwSEgBDwWwIDBgzAepXGKzaRSNw3btzAuHHj9APD119/XY+RDzAHfPUVOIOaPT9/PWcOWqvjiSee8CmeYnA6cDsGNo51oNR/RfgHxyfkdR6phe9f3YQXB99Exuz/XXf03QUVqvG3vmnVk/FY7NkVhTnqD6hZs2b6CYqjOlJauVLKbXXbyRkeGXa61Bn1KibdZLmSmTUon0falUaEgC8ReOmll7BgwQIdHTa16pgz+1CUEwf47LdAvnzopgwpQ7IqY8wfkofTaOTRSOWttJY7at+jYYAaBun27du1scYE3pQMGTIkuipKwzSf4mEI66dLl86ymvqtyqPJVDEff/yxNj6NcsYro9JuVRFpzf7JkV418IBKR7Nu5UoxOA3Y8ioEhICFgDFnc/5OCbJ582b98I9jDQoKAg3MxOSNN97QRmclFXBv61auRQLVEivowrliqm5+5bq7Z88eMThd4OhzVY8fP65/TPAHBY9du3bp4/Dhw/opxxj15P/U2Ub46Pn+qN8BqKK++dOkS3oYt5R31pLvgB1LVJ2wh/B+l7Zg3rknn3xS/zih0dlU7cvhKyM0ivxHgIafO4WReUvnbmqJLJsK/IktIgRSNgGuujFg2gfKReiAQvG0Omyvu8VlNUl9ZCivaiqR9cYtKtqalZRSe194+KvQjbZMmTL6iD8GGoSGEWoYpatUUB6uXp45c0YXz6ESfBsrokeU61WqVKksavijhkfv3r1B45NRCuluawjD7DMDaV7jhImvNIPX/flnsjWOud9//iB9QhmsIkJACAQegZT2/3blypXBFcvVKnfx33//jWVqu4FhUPKBorHlgvs527dvDxqom5WXSCF163O44fbnVQ86t6q+mCHRap6eW7++VtVixQowL6ezIiucDpLjHxJdpPjDgF/kUVFRuKL2LVEYhII/BKyfanz66adg9CrKE4+3xEuvPImpy/7FQw3v4IESQKEK+lKcf6J3q4Tb6tj0O5A2Novy9x4MY/mdYZf5I4SGJ4/u3bvra3TvouHJo6L6wZbSJUv6vDq9yMGYVaaheDB7db2CyZXMB7OHmaZXFAmBQCJAF09GhH3l+ecxSEWXfeTWLW3s8JGYtdHDWTP6/rFeGWT/qvf9VLAF5hFLScL9+PyhwiO+nD592mKMGkYpv3eM3KLW5blfiNF1+V1hGJ4MuBSpftDkUz883CEFlNJlVu7C7mhDdAoBISAE/IVA3rx58bQKqMaDwgeKhgFK24HutpyruTDFqLSHd+9GETcNjg8E9yj9viYeMTi5KuTvOQaZ3JUbfhmEwdqw5A21/hHAJxidOnVCnz59LPeaT/63R/yD7u91w7RfpmLF0bNInykVHigei0LKRozepY49ymC9EouQQrnwwjNPYtDAociaNatFB98wOmLbtm31wc/MKUfjc8KECejVq5dOmWKsfsZ37WJ5s2Tunm6IvrwVIVkqokXpoWapNU1PKbUC6YrBmSpVGstezNIqfUlwhsKm9U0UCYFAJsC5LkK58vTo0QO/qITX/6qnvpQgdeRXD+Uuqe0GZ/UZIKea3+qola6f1FxZo0aN+2flhQSYeoWH9b59psRiFF17smPHDu3myrD4t1UC8fz2CrtwjcGDLij9fADLvagiQkAICAEh8B8BPlBkxFgeXIBi8CCuftII1YcyQO+Zpv/VMesd5+f191N8maXTDD0eMTiZt9HfcjcmBpdG3eTJkxO7pM/Rd5ubdJnENTEZPGgoePBLmnl6Nmxcq1+ffbQmavWuo13SmFbFUeFqAo9hw4ZpPcbq55AhQ0BXLGPlk6/2Aks42p5RLvrSlnsGXSKbo40y3nxl+pEF+z9MVhcunwPql3/VElk2beoMyaovhYWAEPiPwNdffw0e//zzzz33IeVCtEW5yzJ8PPOR0RuDxqmI4wQYmCi+WLtrcb8n2TLReNWqVTFafS+oaU1ECAgBISAEvEyAi1HM08mDEqJWRC+odFUpSTxicAYKUD5hpivrd999F2dVk+PjFz9do6ZPVwkzkxA+Eebx4osvJlHS8cv8gcGDeYD+/fdfi+stV0QZuIJPvA0DlHuCAlWM1deM6XLg2q0Yu8MMyVoJEYtisOLXwzi2ExgRO95uebkoBIRA8ggwVycPuniKOE+AcQHoScOtGwxKlytXLh18givDDFDHuT9//rjrmfvUSvPwP/5wvlE7NekGnT1jRlndtMNILgkBISAEbBEIUw9dz6o0Ku4Qzs9FC3Djg2+JGJzJuB+LFy/GunXrtB+2dbW0adPqL/vffvvN+rTX3j/44IPo2LGjPrhZ2Vj5HKjSDXTr1g2MkGUYn3QVDiQxVl+zZyiYqMFZMldjS2TZ3JlKoH6v+srYPBxICGQsQkAIBBgBesUw9yYNSxqYhQsn7eZfRZU7wUTjbtjHeUzxLZcMb5wAux0yHCEgBISASwRqq0A8I91kcKoEFyhburRL/XNHZTE4HaA6Z+U4jBo8FQvn/onnVUCMzp07Y9SoURbDM6N60jtbJVlljjZfE668cnXBWGGgD/kf6qn3PJWs/YsvvkAB9RTEMD75ygBIyZGzZ8/qp+3JqeOJspnS5cSF60eRKV0ui5ssg/5kSJvNE81LG0JACAgB0whwT35y9+XTZfma6sEpdVgHbTKjU/xBUzORfKRm6BYdQkAICIFAJ1BZPRA8ogZJP7wcJg/2lHrQ2NoHF5PE4LRzoxmGnkYZ05sUqgR0G1EBQzr9omv8+uuvYFoUyqxZs1C+fHn93tf/YVRbHl9++SX2qhxtxuonI2sxx5u18VmwYMEkh8P6DPE8ePDgJMt6skDGtDnQodoyFMvZwJPNSltCQAiYSIAh2Y+rVCGUqiksiq2rGBm0oqLaBrJVRUVs5Koyq/o31fsTSnftRx+1OitvhYAQEAL3CGxSKZooTH/kShqNe9oC818jq0SEGp6Z83OU0ndcebWU9sEVztSBeStdG9XVq1d1lNnQ0FCsXbsWT/QCWg0ESof99xyCOc8oP/74Ix710y9e7kl95513sGTJEnCl8vvvv0emTJl0EnG65dLd9vPPP9cGpS2iXC1lkKRatWrpRLO2ynnjvBib3qAubQoB8wjQ4Izo108f5mlNOZreV+x+U/s+j5o45MVKV4UqVXQ0XBPViiohIAQChIAxZ3P+FkmcAD0iP1SpxLgRz+z5+fkWLXQA08Rb9t5ZMTjjsR86dChoaP7www/akGLy1tL14hVSH5nzjNFgW7dunfCiH57JmTMnXn75Zfz00084f/48Fi1apINSTJo0CVXUjwsGGnr77bf1eevhzZ07V7sWR0ZG6qBJNMBFhIAQEAJCwPsEnnvuOTynwvJPNakrDEYxTx2jHQiOZ1KTokYICAEhEJAEvlRxVUqpyO1mzc9rFKWt6hg2bpxP8krriV4dPLfSkhfx0WK+mdx76tSp2n02KipK57RkChS6mNoTBuAJVGFUWx40wOkya7jeMgVLmyEqSNJDwEGV3J2rwRQjFymNVhqf33zzTbLRMDBGGuV7npzUMMluRCoIASEgBFIQgWkLFiBPtmxYrkLwu7rBYLAK7T9v2jSn4xW0WLFCkw8KDk5Bd0CGKgSEgBBInMCcZcv0gs5yddmV+fmyqj9ZHVwsMjOeTG4VZNSYt/neFfGIwRkVsxJLo+65oPqawblw4UJtaP7111/o0qULlqmbHz+8vCuAA6Eu073w6N27N44dO4ZxmxqoYBT7cPToUR1kyDA2jbHSKF2zZg0mTpwIuu3akpkzZ2JjxDqsXr8C27fswYWYK7pojlxZUbV6RdSp8ajO1cf9pSJCQAgIASHgHIG16iEgvVRiVfWGTqg4quoMUw8Dw1UOucfVqqmzIvu5nCUn9YSAEAhEAsWLFwd/M7/11lvgElddJwYZqeqMVUfzpk31QpETKmxWSa8eDpo1b6dYl9qIiAgdubWpukGM1Lpt2zbtQivGps2/O32BrJpX6wk+OPh3Q7BlZdO61u3bt/UqJ9Ov0EU3vhw4cADNn26AZ599FuOmDsKJ65tQuvEVtOgJPNkbeKjZJfx75W+MmNhf36OGjeuCdUSEgBAQAkIg+QT4o+aSWuH8V+V/Zng3usY6KgyT95k6uqgfRPOX8zm8iBAQAkJACJhFoGvXruD2vXkq48UPSun5ZCiepMrS2Py4Tx/MVUE8fVk8ssLpSwAOHTqkVzTHKR9nBvtZrr5A66t8OCKOE6gW0k4blH9Nv7dqnVhNY9XzpZdewqZNmywutlOnTkGbNm1RvBbwkvrlU6Bswtr/rYnexSH16GbVhL/103k+BeL/mCJCQAgIASGQPAKMWrtWRSYfpDxVeqjo681VdSbyYnrwECtV9DOJvn9sCgpCUL58WPfLL6hRo4ZVKXkrBISAEBACZhFgGqvzaotapxdfxGCVZrHOtWt6fmauiFxWjVxU75kfg8cmFeQzU/78WKdip/jD/OwXBuc1BZ77CLds2YLIzZE4deokatd6WAezYWhhR/yVL1++rA1NpgPhytt0FfSAARVEnCPAPZ3M8cnVTO67TKUiIcbGxuLu3bsqz/gd/WpoZsoU7uvct38voo8dR9PuQPkmxlX7r6FVAB4Lh0C7HMz7Yw4WLVxis1LVAu1QNGc4cmQMtVlGLggBISAEUiqB91Tk8dqPP45x336LtWrrwy61NSJI7c0skDYtLqn5+7Sa0/Mq47RyuXJo/dhj6CvpaFLqn4qMWwgIAQ8TGPnzz6g1ZQpmq4d88zdswOHTp5FVzc0F1RF96xYuqN/XIdmzo5IyUF9q2NCv5mefNjgXL16MPh9/iG1bd+DG9VvI/kBqPFDiLrKpLNYjpizE0Z639Z9CmXLF8Wq7N/Duu+8m+qdBg4f5NDOq5eqRI0fizTffTLScnHScwMaNG3VY/GwqGAWPrFmz6ldbn1s80Vwbm11nABmzOd6OUfKxd4BaKiDwmLZL8fPPP+HFF9WHRISrryJCQAgIASFgm0Dt2rXBg0JXW3qh8CikIiYyKrkEbrPNTq4IASEgBNxJoE2bNsoTsI1uIjo6Wi/YcH4OCwvTcU0KFuS6p/+Jzxqcnw34GB/1/BTlG6ukqG+rBLLKzzJHgbtWhG/jhgqQGr2bxwG89957+G3uNMyZtQhM8UGZPHmyNjQPHz5siTzL1TgR1wnMVkv+jgpzlkZu2oynPnLO2DTaya78v1p9DZWK5iVUr15DfhQZYORVCAgBIeAkAT4sDA8P14eTKqSaEBACQkAIuIFASEgIeDRvzk0Q/i0+Z3CePHkStepUxT8HjqHFh0AZO9srgzIBRareO3Iqg3/56AjkypULPXr00FFSV69erd0wmeLEEbdbW7dyYGPG9hNxhsD69evx+eefoVwjoGQdZzTErVOoAlC5BdDxrZew9I91cS/KJyEgBAKKQNbQUOSvVy+gxiSDEQJCQAgEMgFjzub8LSIEDAI+ZXCeO3cO+VSAglyFVES86UCm7EY3k34tE66MU3XMUFFOv/76azRo0AA7duxA2bJlk64sJdxGgHtm85dIg2bv3XN/NqOhsGeAn7pHgiunsr/IDKKiQwj4JoFS7dqBh0hgEBij9vpT+IP0iZUr9Xv5RwgIgcAiIP9vB879jFbz9Nz7gVWZj9OVFCk+lRal5sNVkSEL8Oq45Bmb1rf2uc9VQBrlhsvos4ULF7a+JO+9QCAici1KNbhhasvBKqRiaLVb+HvdUlP1ijIhIASEgBAQAkJACAgBISAEzCXgMwYn92Du33MIz3zi+gCbqthBOZRR8ubbbV1XJhoSEJi7pxvGRNQHX+3JaRVd69i/p5A71F4p567lKwFsjtzqXGWpJQSEgBAQAkJACAgBISAEhIBHCHjE4GxUrB+4D9LWXshly5bpPI01WwEFy5kz7mbvAVO//w2jR482R6FosRCIvrQFB8+tBF/tSVRUlL6cJ9ReKeeu8e/k7KlL2Ldvn3MKpJYQEAJCQAgIASEgBISAEBACbifgE3s4f/31V5SolgGPtL9u2oALqK2bD6uowkOHD8Ibb7xhml5R5DiBPXv2IDhfGmTIesfxSg6WzFsMyJw9rQ7lX7JkSUstrrpGX96KkCwV0aL0UMt5eSMEUiKBlWr/BY9AkBwql3LlAwccGspylWvZliRHz+bixRGjclLakgYqN7QjYpaef1SMAx62pIriE6w4JSXn1Zgi1dhsSZETJ8AjKUmuHpXRTMvWrVsx5P6+oKTakOtCIFAJBNL8bOseXVBz0i0H5qRMal7jYUuuqvmIR1KSTs1t2e3MbY7qYTu57XyPcEwcmyNilh6Oi+OzJWcc/D5Kjp5YlZf51v0GJ73zDoKCg201n+R5nzA412/6E9kLmWdsGqPOp75PV0+JAvPYMKywiGcJ0KU2czAX0c03ODmS4LwqEa66t9aiV19jVgGxElnYmou8T5kEAsXY5N3jY6UmDt5Ge+NOjp6dERGw50PRysH+mKVnw/nzWKke5NmSKuqCYdTZKsPz56nn6FGbRfiTpobNq/9dcFbPcbYfIA9C/qMh74RA8gjI/wNWvNScADtzm1VJ+2+px87cZr9yvKtmzVFm6VHfR6aIs3ocNGht9dHrBudl9ZRg88ZdeKyurS46f95wz42MjBSD03mMTtcsXbo0zhxxj7F5Sz2fOLb/OirZeQLldMelohDwcwL9+vULuB/0XJk85eAT5XA780Jy9JRVT5RD7DxRPuXgF7BZevKoFYBwO6sAUHxOObCaADWmcDurAHnUSsIpB1YTnNVzVqVLCE/Gk3LmCRURAoFAgH/LXbt2xfbt2wNhOEmOQVY4zVspTc7KpL0b44yeNBkyIIf6TR9fmMe5e/fu8U8n+tnrBue2bdt0x/K4IaBskHpMm79kGu126UrS1A8W3wvlXjRHPXQMW5koSDmZkEDRokVx4+pdnD+uViPzJ7zuypmjO+7Vrly5sitqpK4QCEgCTBeUklMGfRSQd1UGJQSEQCAQGDZsWCAMQ8YgBJJFwCNBg+z1aP369fqyOyKZUnGBsncQEbHRXhfkmpsI0OBMnSY1zh4xv4Hjys+tSMn8yJkzp/nKRaMQEAJCQAgIASEgBISAEBACphDwusFp7K28dsGU8SRQcvE0kC+fyctrCVqRE4kRyJgxI4qVKOwWg/OU2qtdrYoju4wS65mcEwJCQAgIASEgBISAEBACQsATBDxicMZcO4SDKpALj/hi7ME7+2/8K+Z8PrU/DcTt0hyWzmh5rFFzRPyaDqcPOVM78TpHlBf2vtVAi2bPJF5AzgoBISAEhIAQEAJCQAgIASHgEwQ8YnBGRE/EmI3h+og/6lKlSiE4ZxYVXCb+Fdc/xxxT0fhO3pHAMq6jdFoD9yrkypkXi03csrBqPPDGW23Qpk0bp/slFYWAEBACQkAICAEhIASEgBBwPwGPGJxJDaNK1Upucbs8vhcIypBOVjiTugHJvB6StRIYQImvjsio78bh2E5gkTI6/3UxMNuGGcDVU5kw6tvJjjQtZYSAEBACQkAICAEhIASEgBDwIgGvR6nl2KtUrImff9+Mq+evIFOweTQObwYqVqoA7iUUMY9Ai9JDHVIWq3JhLlq0CEuWLEG2bNmw9Y+LKPiQqlreoeoJCkX8BqxUq5tbt65NcE1OCAEhIASEgBAQAkJACAgBIeB7BHzC4OzYsSMmTh6PJSOAJ3ubA2n3CmD7YrWqtugLcxSKFocIHDx4UBuZ8+fPx+LFi3Hz5k0EBQWBOTkbNGiAIV8PwQFlL9Z/A8iWxyGVOBUFLFQ27gkVmZYuuhUqVLBZ0Vh1NV5tFpQLQkAICAEhIASEgBAQAkJACLidgE8YnMVVAuqP+/THW2+9hU1lgapPuTbuqyri7dwBwOcD+qFx48auKZPaSRJYtmyZNjLnzJmDvXv3Il26dLh165al3o0bNzBy5EjUrl0br7zyClq3ewrj2h1E/Y6xCK0C5CxoKRrnTUw0tHHKPZslHwrFX/uXgH8r9sTR1Vd7OuSaEBACQkAICAEhIASEgBAQAuYQ8AmDk0Pp2rUr5v0xB4tHLXXZ4Pz5PZWjsXhB9PqgrzmUREuiBJ577jlwJfPq1avIkCEDrl+/rstZG5tp0qRBnz59tLHJi+XKlcO2iAP44osv0Lv3veXsjNmU0fkgkCcUSKV2FZ85DL2n9+p5rQ49e/bU5e99kn+FgBAQAkJACAgBISAEhIAQ8BcCPmNwEtiihUuQKlUqfNUEeP5L6NWv5IA8ugOYpWzM65fVvtBGZbQBREPIVelYTfnnKsmQLthVVQFVv3///pg9e7Yek2FsWg+QxibT3vTr18/6tH7fq1cvtGvXDpGRkYiI2Ii1G/7Elk07kCZNaoRVr4K6bRpq19mKFSvigQceSFBfTggBISAEhIAQEAJCQAgIASHg+wR8yuAkLgaaefe9tzG45zBUfw6o3RpInylpkH/+AKybBpQoHYoJYyfjtdde03sGZ8yYgQIFCiStwE6JojnD7VxNuZfKlCmDMWPGaDfZxCjcuXMHI0aojbk2JCQkBDyaN29uo4ScFgJCQAgIASEgBISAEBACQsCfCficwUmY3wz6Fk0aP46Ondvj163nkSP0KnIomzF3YXUUATKrhcYzh4DTyvWSr+ePBOFAxA28++67GDRoEFVg+fLloMsnA9XQ6LQXaEZXSOY/MdcOYVP0JIdqPVpMLbvakOToqRryP+TIGGpDE7A06hOb16wvMKWJPSOaeVPPX1NwkxDqad++PSZMmIANGzbE2beZOnVqdHjvKVzKvVD1a6FdTcEZC6NaSDubZQ6eW4mDMatsXjcuJKXHKCevQkAICAEhIASEgBAQAkJACHiGgE8anBw6g/2s+zsSgwcPxrZdG7F39W6smXoKt2/dtZB5oGAwSpcpofYH1sITA55Aw4YNLde4qmkYnTw/ffp01K9f33Ld1Tc0FJdE9XNITVIGp6N6aODZMzgd1cP+2DM4Nx2b6JCBVzzb4/j4u/FYvXo1cufOjXPnzuHu3btImzYtqlatikrPnnWIEcdlz+CMilnpkDGdlB6HbpYUEgJCQAgIASEgBISAEBACQsA0Ah4xOBsV6wceyRXu3Rs4cGCcagcOHEBMTAxKliyJ7Nmzx7kW/wP3b86dO1fvFaTRyZXOli1bxi8mn50ksDpikdp/WQyMUkvWDz/8sNZ0+/Zt7UobcfddJzVLNSEgBISAEBACQkAICAEhIAQCgUAqtWcy1pmBcLVw5cqVCA8Px4oVK5xR4dE63bt3x5AhQ/Seww4dOni07UBrrNevRXAn2yHcPPUAPn/mIDJlurfJlqvRdGtmBFpGlvUHMf6O2Vcn/1fwh2FKH4WAEBACQkAICAEhIASEgFcIeGSF0ysji9cojaFcuXKhY8eOOHv2rN8YRPGG4dWPhw8fRqdOnZC9wSE8WB4oXaq0xdhkx2jUr1+/Xth69S5J40JACAgBISAEhIAQEAJCwHcIqKyHKUeY93HUqFFgSo733lPJOkUcJvDLL7+gSpUq2p25apWqNuuNHDnS5jW5IASEgBAQAkJACAgBISAEhEDKIpCiDE7e2jfeeEMHEPrmm290hNWUdbudG+0777yDVq1a4X//+x/WrFmDLFmy2FTEVWQRISAEhIAQEAJCQAgIASEgBIQACaQ4g5ODZroUBrqZM2cOnnjiCdy4cYOnReIR2Lx5M2rVqoXJkyeDK5x0SxYRAkJACAgBISAEhIAQEAJCQAg4SiBFGpyEw/ycTJuyZ88e/T46OtpRZimi3OjRo3Vqk5w5cyIyMhLPP/98ihi3DFIICAEhIASEgBAQAkJACAgB8wikWIOTCCtWrKiNTuaOZNqU7du3m0fWTzVdvXpVp5F588038cknn+CPP/5A4cKF/XQ00m0hIASEgBAQAkJACAgBISAEvEnAIwbnkqh++GBxKn14c7CJtV2wYEHtXlusWDFtdDLVS0oVrvgyMNDq1auxePFifPTRRykVhYxbCAgBISAEhIAQEAJCQAgIARMIeMTgNKGfblXBPJLz5s3DY489po3OWbNmubU9X1TO3Jlc5Q0LC9MutI0aNbLZzZCslVA0Rz3wVUQICAEhIASEgBAQAkJACAgBIWCLQIrJw2kLgPV5BsdhlNWWLVti7NixeP31160vB+T7o0eP6tyac+fOxXfffYfOnTsnOc4WpYcmWUYKCAEhIASEgBAQAkJACAgBISAExOCM9zcwZMgQbXR26NABZ8+exYcffhivROB8nDlzpjY2uUdz48aNqFatWuAMTkYiBISAEBACQkAICAEhIASEgNcJiMGZyC3o06ePNjo7deqkjc6vv/46kVL+fapHjx4YNGgQunbtimHDhvn3YKT3QkAICAEhIASEgBAQAkJACPgkATE4bdwWRmmle+0LL7ygjc7vv//eRkn/Os1IvDSk+frjjz+idevW/jUA6a0QEAJCQAgIASEgBISAEBACfkNADE47t4q5J2l0Pvfcc9ronDFjBtKnT2+nhm9fGjdunDY2GRyIuTWLFi3q2x2W3gkBBwgw4BdX6g8dOuRAaSkiBFIOgfDwcKxYsSLlDFhGGjAESpQogQMHDgTMeGQgQsAMApzTf/vtNwQHB5uhzqM6JEptErhpnC1btgy7du3SUVyPHz+eRA3fu3zz5k28+uqr4L7U3r17Y+HChWJs+t5tkh45SWDTpk1ibDrJTqoFNgGm+UrJqb4C++4G7uiYA1yMzcC9vzIy5wlwPt+yZYvzCrxYU1Y4HYBfuXJlbXRyxZMG6PTp01GuXDkHanq/yJ9//qlXNa9cuYIFCxbo1C/u6NXKQ18hPPR9d6gWnULAYQL9+vVzuKwUFAKBTED+Xwjku5tyxiZ/xynnXstI7RPw9/8XxOC0f38tVwsVKpTA6KxXr57lui+++eqrr/DBBx+gVatWGDlyJHLkyGFKNyOiJ+L8tcMIzlgY1ULaYdauN7D+6BhEX9yMlg+NQVDabKa0I0qEQHIJ9O3bN7lVpLwQCDgCXCESEQKBQEDm9EC4izIGVwkEgqeKRwxOGiXFcoS7ytvr9TNnzow//vgDbdq00Sud3NP59NNPe71f8Ttw4sQJvapJP++hQ4fi7bffjl/Epc+bjk3EwZhVKJqjHm7fvaGNTSrcemKaMjoj8UzZsfqaS41IZSEgBISAEBACQkAICAEhIAT8noBH9nDmyBiKojnD9eH3xNQApkyZgi5duuCZZ57B+PHjEx0SjVFnhPvRxowdg5fbt0TZSsWRKlUqVAorjQ5dXtbtbtu2za7a33//HVWqVMHhw4exbt06041N68Zv3L6E39TqprWcvroPYzaG4+/DQ61Py3shIASEgBAQAkJACAgBISAEUiABj6xwBiJXrhwygu3rr7+uI9jSddWQb7/9FmPHjtXRbY1zSb3SkOz0djts27IT2QvcRo6Cd5GvFlDhZeDs4b2IUMeiv3/G+SPpUKx4EXzU84sEq6sffvghBg4cqFc3R4wYkVSTLl+ncWlL5u59B8cu0cV2LNKmDrJVTM4LASEgBISAEBACQkAICAEhEMAExOB04eZ+9NFH2ujs3LmzNjq5Z3Lq1Kno1q2b1jphwgQdHTapJkZNGIROr/VA+GvAmz0Tli5UwTh3V725gVUT9ujV1WdbNceMn+di9+7d2siMiIjA5MmTtcuvUcOdrzfvXLarPjJ68j0XW2V0Fg5W1rOIEBACQkAICAEhIASEgBAQAimKgBicLt7uTp06aaOTgXm2bt2KxYsXWzQyohTTkdiTqjXKIvrcXrw2AchZ0F7J/67VUyqLhAELBs1DUFA6dSE16tatC7rjlixZ8r+Cbnp34cYxhzWfuLwDIzfUxlNlRqDWg50cricFhYAQEAJCQAgIASEgBISAEPB/Ah7Zw+n/mOyP4IUXXsDw4cOxdOnSOAWZs3Pw4MFxzll/CC1WABlL7ULbEXccNjaN+lz17DgZCHvhNphnk217wtg8FPM3zl49YHTD4dffd3fGzJ2vI1b9JyIEhIAQEAJCQAgIASEgBIRAyiAgBqcJ95kJij/77LMEmu7cuYNPP/0U169fT3CtY+f2OHnqOB5WezRdEdZ/oDjwYtsnXVHjUN2bd65g5q4ODpVNrNCGY+MxfF01HLu4KbHLck4ICAEhIASEgBAQAkJACAiBACMgBqeLNzQmJgZPPPEE+Hr3LvdYxpWrV69iwIABcU4uWLAAY0dORN325qz2tVD7PqdNmaOj2MZpyOQP6dNkxuvVliBTupxOaz6m0qYMXxcGGp8iQkAICAEhIASEgBAQAkJACAQ2AY8YnEui+uGDxan0EWg4mWD7yJEjuHXrVqJDo7vrF198gVOnTlmu9/30Q5SoDVRubjnl0hvu/WTAobe6dUZSaVNcakhVzhZUAPmylHdJDd1q6V5LN1sRISAEhIAQEAJCQAgIASEgBAKXgEcMzsDFBzA9ysWLF3U+zurVq+uhZsiQIc6Q06RJY1nlpEG4ce021Hg+ThGXP1R/Drh99zoYqdZT8kCWssicLrfTza39dyRGrK+FE5e3O61DKgoBISAEhIAQEAJCQAgIASHguwTE4DTh3qROnRqvvPIK1q9fjw0bNuj3QUFBoKFJ4R7OIUOGgHs9Gck2c3BqhJQxoeF4KrIXuIWdu7fFO2v+x45hKzGwcSy6196BLjU3oESuRk43cuTCOu1iu0mlUBERAkJACAgBISAEhIAQEAJCILAIiMFp8v0MCwvDiBEjcOHCBf1auXJl3QJXPbmXc+3GlchfJuFeTzO6QdfabTs3mqHKYR05MxbBa1UXo17o+w7XiV/w9t0bmL7jf5i79534l+SzEBACQkAICAEhIASEgBAQAn5MQAxON908rnB27NgRkZGRWL16Ndq2bYvJkydj+bKVblnd5DDyFAH27zvophHZV9us5EC0rvAzgtJms1/QztW/Dw/F6I31cPrKXjul5JIQEAJCQAgIASEgBISAEBAC/kJADE4P3KnatWtjzJgxOHfuHPbuOug2gzNvMeBw1AkPjCjxJirma4WuNTagaI56iRdw4Ow/MX9i+PowbD0xzYHSUkQICAEhIASEgBAQAkJACAgBXyYgBqcH706WLFlQOewhRO92T6OnooDCxfK5R7mDWvNkLgXu8axTuJuDNRIWu3H7En7a9iLm7/sg4UU5IwSEgBAQAkJACAgBISAEhIDfEBCD08O3qmbNGm4zOE//A5QoqZY5fUBalBqCF8pNQtrUcSP2Jqdrqw59hfGbGuHcNTUwESEgBISAEBACQkAICAEhIAT8joAYnB6+ZbXC6uP4bvdgP3cUqFC2modHZLu5KiFt0VVFsS0UXMt2oSSu7D+7VEex3XFqVhIl5bIQEAJCQAgIASEgBISAEBACvkbAPZaPr43Sh/pTsWJFXDl/1y2rnBeOpUPZMhXcPtroS1twMGYV+JqU5MtSHp2rr0GtBzslVdTm9au3zmLKlpZYdOAjm2XkghAQAkJACAgBISAEhIAQEAK+R8AjBmexHOF4tFhfffgeAs/2qEKFCgirWR7rp5vb7oYZ0O6r1aq5f4Vz7p5uGLMxHHx1VJ4qMwIty45DKvWfs7L84Gf4IfJxXLxxzFkVUk8ICAEhIASEgBAQAkJACAgBDxLwiMFZNGc4GhXrpw8Pjs1nm/rk44HYvwbYPM+cLtKVduV4YNjQEaBB66tSvcBrysV2Iwpkq+p0F/ecmY9h68Kw+7RJ8JzuiVQUAkJACAgBISAEhIAQEAJCICkCHjE4k+pESrvetGlTvP5mW/z1g/OrfdbM5n4JPP/y42jTpo31aZ98T2OTRieNT2fl0o3jmLi5BZYd/NRZFVJPCAgBISAEhIAQEAJCQAgIAQ8QEIPTA5ATa2LsyEl4IE8+rJ6a2FXHz7H+yQPAL1P8Z8WPbrV0r6WbrSuy+MDHmLzlaVy5edoVNVJXCAgBISAEhIAQEAJCQAgIATcREIPTTWAdUXvoYDSu7i2DyZ1Tg26xyZEj24DRbYHImUHYsGFDcqr6TFkGEmJAIQYWclZ2nvodw9eHYd/Zxc6qkHpCwOME+P9sbGysx9u9evUq+vfvj08//RQ3b95M0H5MTAyuX7+e4Lw3Tty9excnT570RtNx2oyOjsaRI0cSPchLRAgIASHgq3M65/wLFy74xA3ylTndFgx+J546dcrWZTnvIgExOF0E6Gr1yPW70OeDLzD+VYCBfxyRVROAaT2AhuFNcPXKdYSFhTlSzSfLMGUKXWyZQsVZibl2GBM2NcHKfwY6q0LqCQGPETh69Chq1KiBNWvURm4Py6VLl9C3b198/PHHCQzL9evXI2fOnChRogRu3LjhkZ7dvn3bZjtPPfUU8uXLh3Hjxtks44kLBQoUQOHChRM93nnnHU90QdoQAkLAhwn46pxOY5NzaHBwMLZtU6sUHhB/mNMTw8AHwK+99hqefPLJxC7LORMIiMFpAkRXVXR+7QNs3boVNw9UwsgX0uPH7sCCwUDEbwBXMjfPBZZ8B/ysjMxRrdLj+sESmDVrFmZNW+hq0z5RP23qILxQbhJalBriUn8W7P8QP25rheu3feNpnkuDkcoBS2DSpEl6bFOnuuhPbzIh48kufzxdu3bNZO1x1f37779o27atNrzjXvnv0+HDh/UHo1//XfHOu6JFi+qgbAzMZhyhoaHe6Yy0KgSEgM8Q8NU5/fLly+BDRoq7Vzn9cU63/gMaNGgQpkyZYn1K3ptMIK3J+kSdkwT4A2b1is3YtGkTNkasw1/rlmDLmm1YPvofVKhaAtVqVMLDLR9Ftco1fToSrZPD19XqFO6GkGyVMWtXR5y+stcpVdtO/ILoi5Fo+dBYMDqyiBDwJQJ8+jts2DDdpdGjR+Obb75BpkyZfKKLzZo1w7x581CwYEH9RNydndqxY4f+cn/ooYdsNsO+rFu3Do8//rjNMp688NtvvwXs3OtJjtKWEAgkAr48p+fNmxcbN27EmTNn8PDDD7sVuz/O6QaQBQsW4P333zc+yqubCHjE4IyInohN0fee6nestsJNQwkMtVWrVgWPNzp2DowBJXMURXPUQ9caGzBTGZ1bT0xLZu17xc9c3Y8xEfXRvNRg1C0sLm9OQZRKbiGwZMmSOHtE5syZg1atWsVp686dO/ppdJo0aZA9e3bwyTH3EBYpUgQhISFxyvIDn2Jz70mWLFnAHz+7d+9G2rRpUbJkSWTMmDFB+cRO8Cn4rVu3UKtWLV03sTI8xx8uBw4cQI4cOVC8eHGwj9ZCtyT299ChQ8iaNStoUAYFBVkX0aunxtN27hc9d+6cvs4+Z8uWTY/h4sWLyJw5Mxo2bIj06dPHqc8P3AvENk6cOKFdgPPkyZOgDLmQJRlSuBeTdbgqmRjHBArkhBAQAkIgCQK+OqfTS4UHPTN4pE6duEPjlStXsG/fPj3KMmXKIEOGDAlGzHmfZaiDZYw51SjIdlyd06mLe/b5/WJsY0iVKpXRhH51x5zO70s+bBVxP4HE/wJNbjfm2iEcPLdSHyarFnUBSCAobTa0rvAzmpV0bU/mvL3d8cv2trh11zeCoATgrZIhJZPAmDFjdI2aNWvq14kTJybQwL2UuXLl0quMjz32GAoVKoQ6deroL+EHHngAixfHDZD17LPP6vJNmjTRRlq1atVQqVIlvXLKPSk03pISGnZskweNzvjC1b0HH3wQNOx4vXTp0tow/fDDDy3Bh+iSxP1C3O9Yr149VKlSRf94+eijj7QRaej85JNP8OKLL+qPBw8etLRbv359fW7p0qWWc+zPH3/8YVTVr9OmTdNsihUrpp/a8yl+2bJlsXnz5jjlyIV7l7hdoWLFipofn/LzxwyvGa5mcSrZ+UCDnPtu2T+6HYsIASEgBHx1TmdgOGNO5yuNRmvhnk7O5XxQybmaBx9QPv/88/rhHMty7uXcyXmfcyfLc07l/Gm91cHVOZ1zd6lSpfT3B7/r+HCVRi3nemsxe07nw87mzZvrJvg9IuJeAh4xON07BNHuaQIdw1ZiYONY8NWdUi/0fbxWdTFyZizidDORx6dg+LpqOHze8wFanO60VAxIAjRSZs+ercc2efJk/bpo0SIcO3bM5nh5nSugTz/9tF4x5Jc8DUu6m8aXlStXapfPV155BYZBO2HCBDz66KN6RTB+eevPNFK5IpqY9O7dG88884w2suj6T/2NGjXSRfl033gKTUOY/aNLrnGdhT777LM4gX9oDNetW9fS1MsvvwwehussfxyFh4dbrlu/+fbbb7WxSmORbXTp0kWvou7atUv/YIqMjLQurt+zvX/++UczZN8oM2fOxODBaqN8MoSM+KOL7dL4bt++fbKN1mQ0J0WFgBDwcQK+PKdzVdP4HoiP0TAkuWWBhhbnX+M7ZuHChdrbhXVolPKgt0qDBg10WZ7n/Mm51xBX5nT2gcYuV1DpEdO1a1fdFud4PpgcOXKk0Yzl1Yw5nd5A/G7lQ09+9/C7RcTNBJQLlFOifhAwpn8sX5OSxQf6xr6/CPpIqqxcFwLxCag8m7GTNz9j+Rsy/paS+7r6yHfxVeu/X/4d8xDxTwL9+vXT98/X76F64qz7qb7cNWhldOnPX3/9dRzwq1evtoxHuYBarqmnsbHK2NHXlHEYq1bc9DVlgOpzKmJqrHI1tZT//fffLXp+/vlnfV65oFrOKRcoS1m+UUasvqa+9C3n1Z5yS3n201q/cn2Kte5fRERErHoibamrfjDEtm7dWtdXP34s5/lm/vz5CdqKU0B9UD9CdBmOg3L8+HFLXwYMGKDP8R/llhurfjDoa+RjiMFFRfGOVa5a+rT6kRH7wQcf6LLK+DSK2n015gfq4b3jWIxzb731lt263rpo/f/EihUrvNUNaVcIOEXA+u/XKQUequTrc7pK22SZq06fPq2pKI+XWGVk6vNqNTNWudRaaJ0/fz6W87gh/PzVV1/FKpdZfYrfOSoGgUWnMriNok7N6WrLQyznVc6nKjqsnssNhepBpT6vjN1YfvdRzJrTqatbt25aP79LOc7p06frz8pI52WfE87jxveOv87pssKp7qCIbxPIlC432lSaicbFP3Wpo7N3d8GvO19TluVdl/RIZSGQXAJ8mjp8+HBdzdiz+cILL+jPttJ+8Mkz3VMN4b7JIUPuRXLm0+C9e+MG1uLqo7HayDoM7073KApdYp0R4+kyVyTfe++9OPrp0mrdP+49N8bEcPxcuWVqEwqfIpOBK6IMT12dXLp3V6G87wv3iNJtl8IV1/iuY506dbI8meee05deekmX5eoE3WSTkj59+uDXX3/V+Y4ZxXDnzp1o0aKFrsYAUJKLMymCcl0IBB4Bf53T6WVDTxSuWo4aNSpO0Dq6sXIeN4Sfe/ToobdGcK5kLAF6eBjCPfGuCD1TGNSIMnDgwDj7/Zlyin3kSueff/4ZpxlX5/Tvv/8eQ4cO1fq52ht/T2qcxuSDaQQ8EjTIurdLoz7RHxkcxl4UUQYaOq/yKyYlwRkLo1pIO5vF9N7RmFU2rxsXzNJDfY8W62uoTfDK/axGAKUEF+OdMEtP1ZD/IUfG0Hja//to3JP/ziT+Lik9idcy72zDon1QIGtlHVDo4g3bboj2Wtx4bAKOXboXxbZgtmr2iso1IWAaAbrGGl/yxhc2947QLYnGI790Hcmny72KhvDL3/qzcd769ZFHHoF6cqsDMVifd/S9kbvNMFzt1WOAnhkzZug8nxxTfOF+GRqLzoqhky7C6dKli6OG+4wM4Y+g3LlzGx8TvFr3gT9mmHvUnnAvlLUwqAYN3Llz5+rTDHLhyL2z1iHvhYAQ8G8C/jqn08ijcB5Nau5jObq8Mm8zH+bFF36nuSJRUVG6Og1L7uG0FkZvp+vsX3/9pR9YWl+L/z45c7ryIMKrr76qVdD4ZvA7Ec8Q8LjBuSSqnx4ZjSl7BuemYxNx0AFDkYarPYMzKmYlHDGozNLDwSVlKBoMNAg7/5ilh2OzZ3A62p+k9NgZimmXSud5HF1rbtCpU3afTriPzZGGoi9uVvs6w3TqFEfKSxkh4CoBI7BEmzZtLFFXuTpIQ4XGJnNyOmq0cG8OVwwZ0S8p4X5IClfznJE9e/boavnz57dbXfkfaeOZqV4o/AFAI5VGKJ+iU1jGFTHGm9iPJEa4NUS5jhlvE31NLOptogXtnLT+kWL0y05xuSQEhECAEfDXOd0w8oz97PZuy48//qj3dxpl1BYJvX+dni98WOfqnG7M1ba+X4yUYUnNsY7O6exz06ZNjeFAudVa3jPiOYUGNh9g8v7a2gNrqSRvkkXAIy61xXKEI0Pa7MnqmBQWArYIZAsKQbvKc9Gw6D03Olvlkjo/c1cHhDZJuBKTVD25LgSSQ4BpQozVMLpk8kmucRjuRD/88AOYIiQpYToQGpsUR1J7GAGJaKQ6I4wWSEnqC//vv/+GYWzOmjULar+ldiGmm5Q9SY6bLaPLUtR+mwQqrSPUGuUSFDLxhPW9sn66bmITokoICAEfJeDPczoDnlEMA8sWYhpnDCZE6dmzp06xQgNU7Z/XUcpt1UvOnM6o5hR+X8QX6uEqMsXoc/wyyf3MFDAclyFGUCS+Wq/W8rP1HG+Ul1fXCPz3WNg1PXZrcyXzkwYJfyTYq2RWBNRGxfqBh6tilh6yYIRXV8UsPeyHGf1xdTzO1G9cvD8KZKOLbQdcuRk35Lej+vJWicbLKjjZoqGO1pByQiB5BKxTnzC8vPUXsrFayS9BJp9mpEB7ooIFWC5zD2VSYuzdZBoTinUuNhpuzHtpT7gvlF++3D/JfTO2xOgXo/0lNQZrHXSTvXHjRpy9O9bXrd8b42U0XjK0XtW03uPDPJvuFhVYw9KEWT+GLArljRAQAj5NwJ/ndObRpDAaLfefMzZAYmI9x/Xv3z/OfJtYeeNccuZ044Emv/+4NcHac8Q64rizD0yNPhmvNHBtrcpyOwi9criquXbtWqOKvJpIwCMrnCb2V1QJgTgEyuZ9Gl1rbETJXI3jnE/OhxD1W7ztd3B4b21ydEvZlE2AhtF336k/LiUMPkN3pviH4eIzadKkOLCYTNv6qSuDBBlGH/eBGgaYUYmpPwxhu1988QW4X4Xy+uuv61e62BorcgycwBVTe9KhQwd9mft3qM/aWKbrEV2BKYabK3NeGjr5+vnnn+vr/Mc6uI71KqRhrLKMUZfv4wtTs1DoHmwEYOJnMho7dizfgi5fSRnRumAy/uncubMep9E37p1l/lEKf5wYT+mToVKKCgEh4KcE/H1OZyA3IxgPvxes52Wu3H755Zf6zjDfpiGc8wyhB4vhmcN9+YY4M6czDYrhttqrVy9LTmcyNtKU0PXXyNFstCWv/klADE7/vG/SaysC3J/6atVFCC/ygdXZ5L1No9b6p+9ohzl7uiWvopQWAnYIGIElWOS5555LtCSTWVMYwMDadZVPffnEt1mzZqhevbp2YzIC5wwaNCiBLj6FZhAhBiPilz/zZ1LatWuH2rVr6/dc4aRRRmGybuaVtCeMTmvkW6M+6qV+7nFhEnAawCrMPh577DGthsZgiRIlwL2qfJJu7VKrUrNYmuI1I+8nDW6OjyuFRn5SS0GrN1wdZlRYCqPUsg6fSJMRufBHFHN+mil86s79ShwPf4CRL/feGoE3VKqYOJF7zWxbdAkBIeB7BPx9TudDRyMyOvNpcj5r2LAhGGCuUKFCoOHHLRKc64w5msF7GIGcZVq2bGm5KZzTjUjfzszpjKpuRF7nCiN1sB22/dNPP+l2+HCRgdpE/J+AGJz+fw9lBPcJNC0xAK0rTFP7he27CdoDtvrItxi98RGcvrLHXjG5JgQcImCsWvKLm+6piQkNOEOM1B/GZxpZdLU1nigz+TaNoMR0ceWShhDDvHPVj58HDx6MCRMmGOr0K1fn3n//ff3eegXVcLe1dlVlIX7hcxx8acHlGgAAQABJREFU0szy1E83W+pXOS21my6NPu5TpdHHPaZc+aTLLvesqpybui0axIYwyix/7Bg/aDg+GqvWT9KZwoRi9IvvmRScT9jZF9bhjxQa5jRama7EcNFiWUOs6xvnjFfrNDLGOetXGtg0mo0VAcPQNII91alTx7q4vBcCQiDACfjjnM5bYj0P0qij26jKdaznz+XLl+tosCz35ptv6geLDMTDh6D8ruEcy2jnW7Zs0d8d+/fv1/M/6+3evZvVdORwZ+Z0rnBu375dr3Tyu4Pt8AEiVz8ZodZIraUbuf+P9Visz/N9UnN6/PLGZ0On8b1jnJdX8wikUv7MTm0o5BI399KEh4fD2iXKvK6JJiHgHIEzV/fp1ClMieOspE+TBS3LjkWlfC86q0LqeYgAV+pUonDdmpPTmYd66lgza9as0SuPNOi44slIflxF5NPn+OlAqJGri3zqzsBDXL2k4Ua3UnupQViPOukSlZw9jyzPg7qtXa6oj0JXKBqNvGa42fI8gzVwLPH34vB+sb+MZssgSI5GG6ROuoKxL7a4sIyZwiAbHAeNXeb+9GWx/n+C38/8nhYRAv5CwPrvV+Z0987pRs5k5qLkA874Bhv5R0dH672PfABnXOdczxRUXCG1/l5yZU5noB5+H3CbQpYsWfzlz9Uj/aS9ZbgW++ucrhwJRYRAYBHInakkOlZbgXl7u+Ovw0OcGtzNO5fx87bWYAqVZiW/ckqHVBICZhDgjwAejggNtvhGna16NEqTu9+RRqS1IRlfN1dHE2s/c+bMiZ7njxdng+4w2IWtgBfx+2XGZ9mraQZF0SEEhIAvzelMPcJtELaEczQNzfjCud46yI9x3ZU5na6ziek0dMurfxMQl1r/vn/SezsEmpcajBfKT0ba1M77/6869DXGRTyKc9fupaKw05xcEgJCQAgIASEgBISAEBACQiAeATE44wGRj4FFoEr+Nnir5kYUDr4XNMWZ0R04twzD14Vh+8mZzlSXOkJACAgBISAEhIAQEAJCIMUSMMWllv72IkLAtwk0Rtoit3E73wanunn11jlM3fosGhTtjSbFzY2E6VSHpFLAEmCQBqYzcTQyH6P8cT+jPbeogIUlAxMCQkAI+DgBmdN9/AZJ9zxCwGWDkxtZeYgIAX8gUEFlb2jytopk5uTa/vKDn+PYxUi0fGgssmco6A9Dlj76GQEGSzDSmDjSdYaSFxECQkAICAHfJCBzum/eF+mVZwk4+bMbOgeaZ7sqrQkB1wlsWwhM7gqc2O+8rr1nFmD4+jDsPj3XeSVSUwgIASEgBISAEBACQkAIpAACThuczE3G8MdyCAN/+xs4sT8WkzrHonrB153+X/zSjROYuPkJLI3q77QOqSgEhIAQEAJCQAgIASEgBAKdgNMGZ6CDkfEFPgG6xT5dZqRLA10S1ReTtzyFyzdPuaRHKgsBISAEhIAQEAJCQAgIgUAkIAZnIN5VGZPDBGo++CY611iLfFkrOFwnfsGdp2brKLb7zi6Kf0k+CwEhIASEgBWBO3fuYOLEiXjppZfQrFkztG/fHjNmzADPmyV///03PvzwQ1y8eNFhlUuXLtV5aceMGROnjjO64ihIxofIyEjdh/797XvODB8+HEeOHElU86effgoGEosvV69exY8//qi5NG/eXHMfMWIEeF5ECAgBIeBuAi4HDXJ3B0W/EHA3gULZa+KtGhsxc1cHbIqe5FRz568fwYRNj+GxEl+ifpEPndIhlYSAEBACgUyA2y9efPFFbWBynFmzZsWlS5cwc+ZMPPHEE0iTJo0pwx88eDB+++031KtXD02bNnVI57Vr13Rfbt68Gae8M7riKEjGh9u3b+s+JGUEjh8/Hr/88osO2Jg2bdyfcV9//TXy58+Pd955x9Ly5s2b0apVK+zbt89yjm9o+P/6669YsWJFnPPyQQgIASFgNgFZ4TSbqOjzSwJpUqfH8+UmokXpoS71f+H+nvhx6wu4fvu8S3qkshAQAkIg0Ahs2LBBG5sFCxbE/v379QokjaAJEyYgKCjItOF+9tlnGDduHOrXr++yTjN1udwZKwVMnfT5559bnUn87dGjR1GlShVtbLZp0wbbt28HjeqDBw+iW7dueqUz8ZpyVggIASFgHoG4j8bM0yuahIBfEqhT6G0UyFpZr3aevrLXqTFsOzkdxy7dS51SLKfrP3ic6oRUEgJCQAj4GAFjhY3utMWLF9e9Y/7YxHLInjhxAlyZu3v3LooWLQrr9D9cKd20aZNeIS1WrBjoivrPP/+gatWq4Erl9evXUbFiRaRO/d8zda4abtmyRZdjnRo1aiBVqlR2CdE4s9bFFUj2yZaEhoYiT548+jLLbtu2DYcOHdLnypcvj+Dg4ARVDx8+rMdSpEgRHYQxQQE7J/r166eN6kceecRmqQ8++EBf69q1K4YNG2Ypx/YSc721FJA3QkAICAETCYjBaSJMURUYBIrkeARda0Zg1s4O2HLiZ6cGdfbqAYyNaIDmpb5B3cLdndIhlYSAEBACgUSARiBl+fLlepUtffr0CYZHY5JuoYahZBRo0aIFvv/+e+TOnRsxMTEICwtDgwYNcOPGDXC1jzJ37ly0bt1au6Xy87FjxxASEoI1a9bgscces5zntZIlS+Lbb7/V5/k5MXn44YctdaiLe0KrV6+eWFF9bsmSJXj00Uf16i2N6o0bN1rK0n147Nix2rWVJ2mQ9u7dG1999VWcMpYPDrzhSvFzzz2HXbt2IVeuXAlqnD17Fj/99JM+37dv3wTX5YQQEAJCwFME/nv856kWpR0h4AcEgtJkwYsVfkKzkv/9GHCm2/P2votp29vg1p1rzlSXOkJACAiBgCFQrlw5PPTQQ9oQq1y5MhYsWJBgVY+BbWhsVqhQQbvazpo1S+/DpDHZuXPnOCxouNL45D5EGqM0Krl3s27dunHK0bjkyuN3332H+fPn6/2NXG393//+p1cw4xS2+hBfV758+TB58uQ4x5NPPqlrUBeNTa6IMigPjc0uXbqAwYg++eQTXYb7V7nqSRkwYIA2NjlO9olGM43o5AhZnTp1Cq+//noCjtRDt2UK97EmZpDqi/KPEBACQsADBGSF0wOQpQn/JVAvtIfFxfbctX+cGsjm41MRrVxsn1FpWEKDH3ZKh1QSAkJACPg7Abq4rl27Fq+88ooOFMQotTQOp0+fDhpzjFTbqVMnPUwao1ydpHB1s0CBArocA+ZYy+zZsy3uuTzfsGFDxC/DVVEaX4aLLQ0w7ielkUdXXGt3XWvd8XXRJZb7IA2Jjo7WRjBXGocOvbf/n4YvjVkG7WHAIQr1cHwdO3bEtGnTdFCfjz76SF9bvHgxHnjgAf3+yy+/1K6++oMD/9CVtmfPnmC90aNH480334xTKyoqSn+mC7G10O347bff1u7KPE+jmYxFhIAQEALuIiAGp7vIit6AIVA816PaxZZRbHecnOnUuE5e3oVRG+rgydLDUbtQF6d0SCUhIASEgL8TyJYtmw4ctGzZMvTq1Qt//fWX3m/JFcFbt25pF1a6n77xxhtxhkojicI9kTQ+KVwdNPaC6hN2/qHByVXFP//8UwfMMYoeP37cpsFplEnslcZxu3btdH/nzJlj2Z9puNEynQoj7xpy+vRp/fbAgQOWfaB0CTaMTaNccl+5j3PRokXaUK9Tp06c6mRNuXz5cpzzXIVlUCVDuJ9TDE6DhrwKASHgDgLiUusOqqIz4AhkSpcTbSr+iibFP3NpbLP3dMWMna/ibuwdl/RIZSEgBISAvxJgsB66n65bt04bbXQL5YomI6pSsmfPrlcEuSpoHEzr0aFDB4uxyXI0lBwR6i1durROA8IARd98843e/8m6DErkjHBFk3s2e/TogfDwcIsKGsQUrnoafecrgwbR9ZWGHVdVKU2aNNGvrvzDfbB0raU8++yzlj2n/GzwMdx4eY6SI0cO7YLLlWURISAEhIAnCMgKpycoSxsBQ6BB0d4IyVYZs9Rq54Xrx5waV8Sx7xF9UUWxLTsWBbMlb8+OUw1KJSEgBISADxKgiyv3PjIfJN1bGzdurHtJF1cG2LEl586ds3Up0fNvvfWWPj9lyhS8/PLL+j2DDXEPaFKRahNTyKi47733nt6P2r9//zhF6L66cuVKfb127dpxrhkfuC+VcubMGeOUS680pulSG39VmNF9uVrM/q5atUrnJXWpIaksBISAEHCSgKxwOglOqqVcAqVzN0PXGhtRJo/ze16iL23B8HXVsf6o7R9VKZewjFwICIFAJcCIqtbCPYwUrmoWLlwYefPm1SuH69evty6mgwPFOZGMDzt37tSln3nmGUutI0eO6PdMl0LJnDmzfqVxZk+uXLkCBv+hMAJshgwZ4hQ3jExGn6XbrSF0Fzba4monhUaitfHs7GordXH1N75bbKZMmUCXWwr3zcZf6TTcfHUB+UcICAEh4EYCssLpRriiOnAJZA3Kj3aV52BJVF8sjYr7hDs5o561q6Na7dyMpx8alZxqUlYICAEh4HcEuF+ybNmyOhor9xsagXs4EBpEXPHkaieDCdWsWROPP/64jjxLQ4l7Dg8ePGhxE03O4LnXkvtFGeW2Xr16emWTq52USZMmaUPNMBTZfrp06WyusHbv3l0HBeLK4ciRI+N0g6lQ2rZtq/vKYEYMesRAQTT8fvjhB+1GPGLECJ13lCu7bJurucyRSYOXUXSdFa7UklH8nKYMJPT7779b9srSKGWkYAY2YhReijOrvM72U+oJASGQMgmIwZky77uM2iQCjYp9gpCslcGAQldu3gsKkVzV646OxjEVxbalimKbP+u9PHXJ1SHlhYAQEAK+ToB5LJknkylOjOA6DPwzfPhwME0KhRFk582bB7rB/vHHH/qgcff8888jbdq4P1mMqLO2xm1cf+2117B7925tzNKgpMHFQDs09GbOvBcIjiuV3NPYp08fbbjFd+mlrs2bN1sM0UuXLlneG+1z5ZKRY5nmhC63jFj76aef6stss2rVqkZRbazSyKTRakSs5V5QGtU0eJ0RBiD6+eefQaPYkIwZM2oD+7PPPsOoUaM0e/KnMF1M+/btLZGBjTryKgSEgBAwm0AqlWQ51mylok8IpDQC568fxsydHbHv7CKnh54mVTo8o/Z1Vgtp57SOlFiROe4MtzGZzlLiX4CMOT4B6/8nVqxYARoyviS3b98Go7XSfTZnzpw2u8Y9jnRLdTWSq9EADV6KEb2VuhlQiK68hvAc3W2NgDvGeWde6UZ77NgxnQYlKCgoURVkwaBJTN3CAEBmCNO1GCll4utjW8xdyjHHdweOX9Zbn63/fmVO99ZdkHZ9iQD3hdevX193yRfndEdYpXakkJQRAkLAPoHgDIXxatWFCC/yof2Cdq7eib2FGTvaY86et+2UkktCQAgIAf8mwJVKBrqxZ2xyhDTCzDI2qY+GpmFs8nOaNGniGJvGOTOMTeriSmVoaChsGZssQxY0Ds0yNqnTlrHJazTyS5Uq5bPGJvsoIgSEQOAREIMz8O6pjMiLBJqW+BIvVfgFGdJmd7oXq48Mw6iNdXHqym6ndUhFISAEhIAQEAJCQAgIASHgCwTE4PSFuyB9CCgCFfI9j641N6JYznvuD84M7lDM3yqKbRi2HP/JmepSRwgIASEgBISAEBACQkAI+AQBMTh94jZIJwKNQO5MJdCh2nLULfxf8IbkjvHmnSv4eftL+GNfj+RWlfJCQAgIASEgBISAEBACQsAnCIjB6RO3QToRqASal/oGrcpPQbo0GZ0e4p+HBmFcREOcvRrltA6pKASEgBAQAkJACAgBISAEvEFADE5vUJc2UxSByvlfRtcaG1E4+GGnx33g3HIMXx+G7Sd/dVqHVBQCQkAICAEhIASEgBAQAp4mEDepladbl/aEQAoh8ECWsuhU/W/M3tMVa444l9z72q0YTN36HOoX6YXHSnyeQsjJMIWAiwRungfObnVMSS6VBzd9sO2yx1fZvmZ9JYtKs5E11PpM3PdntwA3L8Q9l9gnJ/UUTn8IobmAQ2cTUyrnhIAQEAJ+TsDROTS9CuCYq5LtwV46BFw+bPu69ZX89aw/xX2fnO8ZJ/QEX9uKeiWBVfviNutPn8Tg9Ke7JX31ewJPlh6OkKyVMWtXB9yNvePUeFb88wWiL0Wi5UNjkT3Dg07pkEpCIEUQ4I+SmZUdH2rzFSqnRLjt8nPtXLOuVbUvULWf9Zm479d0AxwxXp3U004Zm+2+AN6ZHrfZlPhp7969etibN2/G9u3b0apVK5QvXz4OCubL/OKLL8Ccjx9//DFSp/7P+evu3bvo0KEDRo8erVOYxKmoPmzYsAFRUVF48cUX41xifk3mk+zVqxcyZry3pWLXrl348ccf0bBhQzRo0CBOeePDoUOHwPyj1apVM07JqxAQAtYEFj+lnqbNtj5j+z2NuxYrbV/fNxHY9Int69ZXOsRaf4r7/oz6rpn3//bOBLyG6/3j39h3Yl8r9n3fKaKKLuimSlHUVq2qVlXpvz+hLdXaSje6oVr7Uqq0imhrq8S+E6KInRDEnv/5npi4Se5N7jJJ7vK+zzOZuTNn3jnnMzfnzjvnfd/TIv4+W5+c0FNT6QoeDEReB9SVPFIe9KoeWX2ptBDwPAL1ir2ss9gWz+X8A8WB8ysxWWWx3XtuqecBkBoLgdQiEL4kta7kltd5mk8pPiw0MuvVq4dbt27p+TBpVPbv318blpZYvvnmGwQFBeHKlSvxjE2W4STr3333HdasWWN5Stz2okWL0K9fP9DAtJRNmzbhww8/xO7du+N20zBlHajTlnA+TtaZE72LCAEhYIWAvcamlVM9fVeebECe655pcsoIp6d/+6T+HkmAo5ycOmXR3n7YfGKaU224eusMZmx7Cq3KBOHRMiOc0iEnCQGvJsBRRi6harFHknKD5fkccbRHigQmXapCj6RHUo2zndVj7xt74zpeuL579y569+6N6tWrx41o9ujRA9OnT8eCBQvw/PPP61ZfunQJ7777LnLmzIn3338/EYlp02L75xkzZqB169aJjm/duhVRUVHYuHEjmjZtGnd8+fLletsweuMOJLNRtGhRXbdevXppY9UYHU3mNDksBHyHAEcII4KVl4hakpPk+nT2sXWSU2LHcV7H3t+HpNTZ0BO5dwnoVuvJIganJ989qbvHE3i28lTtYrt4X3+n27IqLAgn77vY5shUyGk9cqIQ8FoCSbm3OtJos/SU7+HIVW2XtaVHDE7Mnj0bNAbnzXvgV/zRRx9pg/Ott97Ck08+iWzZsulRSBqMNCz9/f3jsT59+nTc+T///DM+//zzRGU4kklZvHhxPIOTI5+U0NBQvXbkz2uvvYbAwEB89dVXYF1FhIAQSECAoQ9JhT8kKG7zo1l6tKEYZPMydh+woSdSudp7usEpLrV2fwukoBBIGQINS7yCAQ02oUjO6k5fYO/ZpZiiXGzpaisiBISAbxNYdzUQI38Fpm/0XQ4//PCDbnzbtm3jIHD08NNPP8WJEyfw2WefgfGdEyZM0KOgPXv2jCtnbMyaNUtvduvWTa9pVFrK8ePH9egm9zE2k/GelEOHDuHgwdjsHoZBqg/Y+adZs2Z6xHXq1Kl2niHFhIAQ8GYCkVlr6j6d/fqNjIU9sqlicHrkbZNKexuBErkbKBfbENQp2sPppkXeOI7vtz6OtUfHOK1DThQCQsDzCQRHBSJomTI4N3h+W5xpAUcmGXNJN9mELqmvv/46SpcurZP50MWWMmXKlEQJgWg8coSxYMGC2khlue+//56rOLGMzzx79qxOIMSDK1c+ePG3c+dOXL+uMn04IH5+fihVqpQ2Wi2v4YAKKZpKBDiCzdFxxvnyXosIgZQgEJmtpu7T2a+LwZkShEWnEPAhAun9MqJj1R/QvuJnLrV65aHhevqU6NuRLulx9eTb96JdVSHnCwEhIAQcJsBssJRy5colOjdz5syYPHmy3s/Rx44dO4IjigmFiX2OHDmCl19+GYUKFUKrVq2wfv16PXpplDUMjC+//FLvMkZAlyxZoo1duvBSjHL6g51/SpSIzUAuBqedwFKpGEfH337zdbRoUBG5smfS2YQnjnwdE4NeRY0aNZA7R2a0aVYTQ4YMweHDh1OpVnIZIeD+BCSG0/3vkdTQxwg0eWhg3NQpZ6/td6r1u84sUFOnbNNTp5TJ+4hTOlw5acWhYbh19yo4DYw7ytA//OyqVr+6a1E6b6DNsvbqYVInJneyJVO3BOLIpXW2DsftT04P43n/DBsZV97WRmn/5uhXL9jWYdirhwrGtlYJHGzIkYvBmBrSwsbR+Lud1ZM1Qx40KflGknzjX0k+eTsBGgWUYsWKWW0q4zdr166tYzytJQriSUayIBqklA4dOmDVqlVgLOeIEbHJoxgjSnnqqadAF14ee+edd/Toap8+fVC/fn19nImDGjZsqLft/WPUnW67Iu5BYOHcmRg08DW0LncN3SrGYIz6aa1ZHMiS8Zau4E2VqHj3yVvYHbEDqzceQNPpUzH8fx+Bo+oiQsDXCYhLra9/A6T9bkmglH9T7WJbs8iLTtfvwvUwTAtpib/Cxzutw5kTd56eh+CjH2PDf5/j4PnfnVEh5wgBuwlE34nE+mOueQXYfTEp6BEErl27puuZJ08em/XNly+fzTJGsiC63laoUEG7xBrzZn777bdxsZohISF6JJOxoYzzpKFrjGrSQK1cubK+Bss5KkbdjbY4er6UN48Ap9Xp2LYpXnqpB77ocBXfvRSDl5sADUvR2HxwncxqCKdOSaB7I2DmSzcw9JEovP3WG3ji0cYPCsmWEPBRAjLC6aM3Xprt/gQypc+OztV+QjE1hcryg0OcrvDyg2/r0c5nK09DpvRqEqcUlPPXD2Hh3r5xV/jt0Dson79N3Gd32eAInz2SJaPtB1aeb68e/6wBSV6uaM6aSR43Dianh8ftqVNy17NXj1EvW2vys6c+ts439tvSc+lGOC5FHwONTqsSFQ5cPRZ7KF8NIFPS99OqDtnpcQQKFCig63zjxg2n6v7jjz/q8+hSmz179ng6aFT+9ddfqFWrlna5past5emnn8bAgQMxceJEbYQyyyzn1GQc6T///BNPhz0fjLhPxpCKpB0BusXSNbtTPeCagw47g1oCg1rGoMX4jWBcbkxMTNo1xJuufGpdbGsy5Qby2ffb6U3N99S2iMHpqXfOznpzImumcucbVk5MzbiQLl26oHFj8964MZECrzNo0CA7awWMGjUK48aN0z/EnCfNEGd0Gec6uv7iiy8wbNgwMH39o48+avV0/uizniyXMaPFq0xVmm/BmWyiQYMGaN++fbzzT548iV9//VXH7hw9elQngOAb8meffVb/8MQrnMyHZgFvo2iuWli4py8uRh9JprT1w9tO/YSTV7biuSrTEJDnYeuFTNi7SBmbN+5cjtN0KmonVh56D4+Vi41lijuQRhuWLp7JucvaU8Wk3FLtOd8o067iJGPTpXVdlXSKi6tilh4atmYwsqUnWdffg9PVvBQjY3G0XWtOGn1X4cr5KU6AMZcU/i45KkwWZMR4MmlQjhw54lTQNZZZbWmQ0pik0DWXwt9WzsP5999/67hQ4/jDDz+MFStWxGWz1YXt+HP16lVdymiLHadIkRQg0LRxfQSWB2b3dl752sFA+y+Aoa+/hLFTZjqvSM6MJbAsMHZdpDnQLjh2W/66PQExON3+FjlfwcjISNSpU0e/hbXUwpiQpUuXWu5yepuTa3OCagoz/hluQMkp5IMA5z7j+YY4q8s439E1336zDrdv37Z5KsswXofr0aNHxyt3/vx5vY/xGZYG59y5c8H4Heq2FCaWoKFtK2bIsmzC7bJ5W2KgymLL0UPGZzojZ6/tw1f/NlVJiSajyUPmx5T8emAwwlTMXkJZe3Q0KhV4EiXzmPeSI+E15LMQEAJCwCDA5C0UTk/iqDC7LUcxGXP5yiuvxDudsZ80OPli1LhG1apV48q8+OKLcQansbNmzZra4Ny1a5exS8d6Wn42DjBu1BjRNOper54aWhNJEwIvd++Ci5cisfZD1y8/oyeQ980f0bJdV7Ru3dp1haJBCHgYATE4PeyGOVJdxprQJeiZZ54Bt3Pnzg1m5aNrh1mSPn16/P7772Ccib3Gpq1rm6nL1jWc3T9mzBi0bNlSL0npoCHfqVMnXWTs2LH6TTeTP+zYsUMbm4zrcVayZvRH1xrzsebIaPx++D1n1WDp/oGIuKISCqnRznR+5nQBW0/Nwt/HJtis028H30H/+v/YPC4HhIAzBJiIKalkTM7odPUcvmjiVAl62bIJ6dNnQKMmTUHDg94cliNmrl7L1vnd801HczWqsuO4rRLevT9Xrlx4/PHHtaFHL5Vs2ewPJTDm73zppZcSQfL39weNzuXLl+O3337Tx6tUqRJXjm61TBpEd1pDDMOUvwHFi6sMM0r4u8wlodAbiULXS2a25ffloYceSlhMPqcCgfnz5+OHmT9jYkdzLuavvoKTXgAG9uuKdZt26czH5mgWLSlNIF6fHvKvem4C6jVorF86VatWDUY8eErWo/CVleBIOSXHzcPqbyA3PUrMedr0qCb7TmX37dunG8sRyLx58+rtJk1UpLsV4QTYXLJkyQL+gBoZ8liU/2z79+/XP5aMR9mwYYM2MKnr2LFj4I9w4cKF42ll3MOePXvAYHu6q7JMUkJ3X7orWeq6dOlSkmnF+Y/O+lLofsSMgRcuXNCuTfyhNlyajOtyBJXp8tkWusE6Knx7zbfSxhvohOfzwea1117Tu/ljZWlc1q1b17RR5UdKD0cxutiq0c7LN04krIZdn0MifsDJKOViq+I6S+Sub9c5tgqduboHi5S7b1ISHrlezQ/6MVqUejepYnJMCHgsgT/++ANB7w1G6I59uHX7LsoX8kPj0jHK4AS+DJ6HPRH3kDlTetSpUQWDhvwfnn/++RRra0CmcAQoN0C+WvTVqLHevXtrg3PdunXa+EwIm/fLmvz000/gYksYKmFL+DvI3xfL354XXngBXAyxJ46Pv2X83e3fv79xmqxTmcBnk8ajW0PGYJp34QGBKpnQ5kg9rytDdUTcm4Du098fitBtu2L79MLp0bjUXWRUffoP/8xXL/Rie9caVcqgW8/+GDz4vkWYAs3Kcvu0du2m6u1qBgBPFDE4PfGu2VlnxpbQ9YcuQnwra03o2jpgwAAdk2J5fOjQodpdNF26dAgODtYuo3Qv4g8xfwgpnJPMMGCZzS8sLEzvZ5yiMR+Z3qH+0N3266+/BudAsyY810ghb+hauHChdk21Vp776NJKg5PxMdTPibcNKV++PObMmaMTO3DfxYsX9cgj09obQuPZXmFZ6uecbMuWLbM6Svznn39qVywysTQ27b2GI+Uq5H9cZ7Globf3nHPu0aeiduDzzQ3wbOWv0aB4P0cuH68sDV975txcqaZKoWtt4RzV4p0vH9KOQEREhH5RY4zIMQ6NroQclbGMrU67GnrGlceNHIQhQZ+hjwqPVmFaaFQGCMhnaerdw4lLwMYjd/Hbrp3a86Hni+3x/U+/eEYDPbCW9OxhTCVHLDnamVrCjLWuyowZM3SGWxrNImlD4OD+vegTPzWDyxVJnw7KYLmN0PXWX3a4fAFRYBqBcR8Nx5D/G4Ne8fr0ByFgfJV3Xtl9/4YDW8LD8Pbbb2PFkp8x75dVcQM8plXGSxSpr7+ItxJo06aNzpDHmBPGGFqbQPqtt97Sxma7du30CNzUqVP1Dx3dQY15yAw+NBjpKkQXWhqDfCA13IqMMlxzhJRupZz8mj/2LDd9+nTt1mtZznKbI6oJdfFhYebMmfEWGqMU6qUbA0dln3jiCb3vww8/1POk0TA+ePCgbrORUp6jCTQ2OacakzrwWlmzZtXn2fOHCX+on65URkKJhOexLhTOyZYakjNTIXSv9Qs4N6MrsmjvK+DijPyybwCORW6w+9TfDg61u6wUTDkC/F+uqN7K8v+O//sfjRmJkAO/4qeFX6nU/y9pgzNv/lxo176t/n9JuZp4tuYzZ86gYunCmPPDZ/h7iJq7sRvQWTkMBORL3K7iysnj+Tqq7+oRu6z9fSmKFMidpBdHYi2yx14CDB3h787GjRt10jx7z0vrckwyxzwAs2fPRoYMMiaQFvfjv//+w7mLUajq+ruDRNXntCmhO2OfFRIdlB1pTkD36WWK4ZOxH2PlG8C3SfTp+VU+sSdUCPeItsDvquz58K36uTQpD4k0b2AaVkB6szSEn9KXLlu2LLZs2aINL47KcWGCG7py0OWHMSLfffedNgh/+eWXuFE7ZtWj0cjRUcukCXQlpRHJUU9D+OY44UjhyJEj45VhDArjH+mKa7icGucba8Y1JdTF+c+4GLJy5cq4mFSOaFKMBDw0lBk/Q6ELLxMmcYSTxiVdmDjKyznRuM+IYWWsDd9K2Ssc9eVoKrPxNmvWLFHWWiPJQ6lSpeKppKE/adKDLKRDhgyJ1654hZ34wBg2Tp3Ckcartx6M8jqiavOJqWrqlK1qtHMakpsyw9C75eR32HBcpd5zQA6cX4GN6pxGJWJdjx04VYqaQICu5/0GdsXPP/yCJuqHtLx6N1IgAMgf8GA07oZyYDgXzh/PKOxYu0J915fjzTff1MlSTKiC16ig1wRdKKd2Bfo2daxZPRoprw+1jF5xRU+5cO7cOeTPn98xJVI6WQJ8QcnfHcMrJ9kT3KAAvYDoVVSpUiU3qI1vVsHw1qpazPz2M+Nt1PVbOsabSR1F3IeA0ae/Ggh84eC78daVAS7j1OB1165ddeiVMWWS+7QwbWsiBmfa8k/xq9Ngo2H5888/491339XTeGzfvl3/oHFN4T+ZtVE5xqJYSvfu3eMZkpbHLLcZK8kRSCYqYhynIYz3dFbozsrr07jl6IwhjM+h8Fo0kA0xrsu3xcw4SOF0MIaxaZRzZM23zUyHz4cYjuCyjZbCRBUUY/4041h4eLg27I3PbIelIW3sd2VdueBTauqU2sro7IOD5393StXxy1swZVM9HddZt1jPJHWcVEmHaOA6I0wgVDF/W/hnVa96RVKNAEf4mR2xRHU1Mbl6T1CorPVLZ1Ge5iWU1zOXWu3u4c8voecWnL9wDv75ayNKlpT7RnLNGlTT8V2OGpuW1IcrT8+sGVX6h0Y1sPvQSctDsm0SAbqJe5LQJdcMt1xParO71ZX5L6oUS4/MGe6aXjV6PxTPl1G/7Gb4DfNrMHcFl4TbjiS7Mr2iPqiwWaNaelT7i87ON/7t1gBdp/lbyxddqZEkzvnapu6ZYnCmLu80uRrfmPbs2VNnq2UHx1E/dqiGAVikSJFESX84rYcxebZRaXt/uN977z0dFM8RUcagUD9HE50VjlAydpJGJ0dpjaQ9TEhkxG0m/IHmyANHVRnHSpcqCt1iXRWOXtLo7NatW6L4Uo4oU5iYyFLatm2rR1k5usupUVJK8mQpgV61V6p5L4erBD1jnLrMvZg7mL/nZZVQaBueUtOn2BLOtxkTc8/W4ST337p7Hb8degddqs9NspwcNI8AXXz41vWRfkDdZx3T++irQPU2wLzhpxAQECA/ogrf22/0xd1rp0zJYPnmo8DSnafw3tuv4qNxKdc/OHbXpbQQEAIpReCu+ulkDorVq1eDyRH50p9rJk+0FOaosGWMJrc/4bzhlnplOzGBtwe/iT0H/4Oaxc1l0X36DmDIgJfw1fRFLuvzFgVicHrLnbSjHZy2hNnyOK/k5s2bYbh+MskPRz/NEGaa/fTTT/UoIEdI2elxIm1XDE4aaYyd7Nu3L2i8GUK3YKaZ5wjmF198kcjF1SjH0V0KO3gzhA/uNHznzZsXT12tWrX0Z9aX7rf8QUgLeazcaDXaWUtnjo2+E+lUFTb8N0VNnaKy2KqpUwpmV34iFsKRzRNXQiz2OL658/Q8VMr/JGoXVRlWRFKUAB9m+J0N7OO4sWlUrKBKgjNAvR/48kXgyfatsW7NBuNQmq4vRYfj0o1Yz4nS/s1TpS58SBw/+Rv8ogxxs2TC8zGo/eFXKF2pTty8xmbpFj1CQAg4RoDuzHtO3sVNZf9lNvkpOfwCcOrSbSxbNV3Pk25ZMyZxtDRAbW0zTMcwUlmGIUQJhd5gCUdMkzNSWd7V6e0S1iO1P5MhBwX69etndwy07tMnTMIHKkmUtfh7Z9owoSNUn74YNRp+HS80zRld3nKOyf9K3oLFO9rBEUAGvxsjbxwpNFK6szMyslBOnDhR/3NaGkjsxCw/20uE06FQOAJovGE7eTLWVcwyjsaI++TcZIahZu0a7FiZRZdurNbSiLdo0UJ3LlOmTAETIBly+fJl0MWVLrSGfs5xxjhRzvdJoeuvs0K3XsbZcITVSHPPqVaMOdrovstOz5ifiWUYp5VaUr3Q83FxnWEX1zp1WU5lMvm+i22tIl20jk3Hv8K/J75xSl/Ck5hAqGKBtsiWMW/CQyn22T9rQFySJW77grR95lEUVLm26ndwvbWdPlHu6702ard2y/hu1zU7pyEkYjr+DBupTx7bOiaxkjpBABcbwmzaDCewjEu3UTRu94K5sxDUDmhfI26Xyxu1lNcndX4xZaIYnC7TFAVCwDUCZcqoN2xKdqtHFyb5MVOCDwI5s2VKZGzyGnxm4eJo2AJf6lsaoLa2+QzChIp8vjOM2YQhQOwL7TFMjTJcG4Zt9uzZzURlW1dfK339/dI0vvnMOGzYML2m4ZkczwULFqBV1Uz4vydv2b6mg0eMPv3LyePE4LzPTgxOB79EnlSco4IcvaQRVK5cOZ2FlpNN03h77LHHdDwkR+KYkZYdBt1omSyIWWjpbktj0NEseUb6eSYjorHFjnD06FgfBcaSUienXGAG3aCgIAwcOBA3b97UBm9CttHR0XHzl9EPPmGCH47UjhkzRicy4vxHdBvs3Lmz7khpgLIOzE7LfR988IFuFxMicR+NX17fWWEnywRECd10x48frxM1MbkQR5CZAZSjsH/99Rc2bdqkL+dKHKkj9c2XrSz61l2D5Qfexl/HxjtyalzZ28r9dc6urmq0cxuqFe6AxfvMG9aJunUajOfsUOXbuOul9AaNTCZZ8hXh93HjX9vQ0TkP60SY8hYHWg2Anh+QL6waN26cqIwn7WAfyf6B/SQNTyYeszV1k9GuHVv+RssGxifz1hUKAQdWHdIvwoyXYuZpF01CQAjYS4CJDgvkzYndEVGmG5yhyiGjTvXy9lbFrnI0Epl0zJnEYzdu3IhngFoao5bbzEVBDzZLY/b27dvx6se+09IAdWTbGKCIp9CJD6wfhc94HEzhMyLzZtDwbNRIZWqzIjv+XYeGhc0zNo1LsE/ftSwMnH4sYdiXUcaX1mJwevHdpv8/DTs+VBnChyr+ExojjJzqg53UqFGj8M03sSNXjJFkFliOAFoanPaMAtAwZDp3xnHSmKTQGKTRxUyXNGZpcNarVw/MZstRS9aPnUFCocFoxEPSWOViKRxh4eglDTluMzaVE2ZTOKWK8Q9uZP1jm5itluXZftaBWXztebiz1naOrtJgN6Ze4XWZDIgJi9544w3tdmu48/IY42dZz9R+SH+ywjjtYqvny1QGpDNCg3XTiQfJmpzRYe0cZrqtpEY5qxR82tph2ecige+mf6Wz0QbUdlGRxem11Ejcf5tz6hcuqf1dtqiGKZucGokvxebPn6/7LfYF7CM5hyPXRj9pXIxZfjduD0PVZ4w95q2ZEfP6jVt6BMLVDKXrrgZqDwy673V/EIVgXmVFkxDwcgLlK1TC6v3/ort1G8Wp1jN2c8PRDGjxXBunzk+Jk/icyDwbXBwVGnWWBqitbT4TWRqvtlyAaaA6YqQa5S3rzesYQmOawmdSzm3L2QU4U4PlPOm6T9+6D71TILrHyHLM51LjedSom6PryKw1MfLXGfq0Nq8XdvR0tyjvp1z9bI9Nu0UVpRKuEqDLBF1r+dbOVtYzfg3o+mq4dLh6TT7E0d2UxqthrBkdATsJQ1g3dj6u/jNSHw2/Cxcu6LkFbRmRLMMOhgmRjHoZdXFmzTd8dN+19maRTI3ETEy4ZKtOzlzXmXPOXtuLhXv6gq6y7iT5s5XD4CZ7kc7PufdffHFhjFZLd/bgzvJ/gd/LDh8Apes/2G/G1qrPgdzRjbF2Vdp+l1aFBSXtUptMYzlFEb0h6GVhCPsFfo+40Dvkueee08YnvUA4xUbrR5ri6mT15JgCkn1gOvw0Z2HcFE/OXsLyf2Lt2rUIDAx0VpWcJwRSnYDl9zet+nS+hOK83RNVLN6gluYg+GwN8NXm/Fi3aTcKFVLDXz4qfD60NEAd2bZ8wU989BizNFIZSrZr1y79ItEWXnqfcVCAAwChoaF6MGCTSmPSoJStM5zfn/11P7wzfITOneK8FugXiBzkoHhqn+7cE54r1OTcVCdAI7NixYpJXpf/tByFNEv40MZMsZZiaWga+1k3W0awUcbeNeMHkoshsKeMvddjObqBWDM2eYxMAwICuOkWwuQ//ev/g1/2DwSTArmLnL9+CMuVa227ChPcpUpeUQ/+6FKKVDC/Of5qNG7/byoYyQ2Frvhc+Hbbcm1t2/CgsGwGH4YMofcF5/9llmzO78s+rFyRjOrwAwPVKGvGunLRDHqE0wxdokMICAHnCTD0pudLnTD05zmmGJyXlHPRIJV47ffff/JpY5N3hM+HDLkyclw4cpf4ctDWSCoNV3q68eW+ZT+eUD+ny2OI1vDhw9GwYUN9uGrRhKXM+Vy5SIw2as3R5tlaxOD07PsntRcCDhPgdCfFctbS82hyGhR3kH+OTVSutU+ibF6TXiW7Q6PSuA6Ml2ayoKy5za9IgQBgzX/ntUFHt1RrwtF/W8afYQxaGoH27rM852JUBKKuAXeU/Tf5TlZtZFqri+U+PozQjYz1ThiDZFmO2wwp4FQF9A7h22W+XNoSvCRhMdM+n7lyz6mHMNMqIIqEgBCII/D9jNlYsWIlWoyPxNrBcbud2uj+A/DOq531/IxOKZCTNAGGSCXlAsxZEuiJYq1v57k0WDm40r59ex1yRo83hlqdjQJKZTYf8pkoP9R2wl3Z/JqkvUYxONP+HkgNhECqE6hbrGfs1ClqipPjl7ek+vWtXZAJhAY2DLV2SPY5QWDj5vUpMrrJqhS473rEt8McybdmLCacU85aE/gAQMOPi2EEGmvLfdxmuv6EZY9dXYuwqJXIkAnoUe/HRMet6eJ0SoYwnp1vuROK8WDChxLOYWxMxxQWFqbizW/jojJy85qckPH8VeD4hTsoX97chCIJ2yafhYAQsJ/A3xu26KSLnVVuu9m97T/PsmQLlbOP2WmX7vjZcrdspwABjn4abtjs6+liS2nevLnux5nXpFq1anFXZp9O2ROhDM78cbtN2dB9+sUYqxmJTbmAhykRg9PDbphUVwiYRaBozpoY0OBfLNr7CjafmGqWWqf1nFTzfq4KG6GyyI50Woec+IBAeuW2pMIQU0QMvTQ4mXLeMOyMdUJj0dhvrI3jrsZRrwq7rmI4V+o2dmjdIXFbo8KBq8cAeslmVfOYWBibLMx6GA8nHPlkorRixYrpJGY9VJIxxl5bCqdLKF4wNntls3KWR1zf3n48VgcTj4mYTOCkCp4r9ojJSkWdLxDgtHIcFev6TCCyv75JGZ0xdk+JNGk1MHSRH1o2rYf3nm+ls+W///77voAtZdt4al2s/kzKfSdfzXjXolst7xdjZI1RTBqZTGhpTXSfXiiPykgcibbVrZVwfp/Rp9epU8d5JV50phicXnQzpSlCwBkCz1b+GsVy1dKGpzPnm3nOn2GjUDF/W5TIXc9MtXG6IqK2Y9mBN/XndhUmgka3t0qTxs2w6tP5qnkPYhLNauvZ2JfCOuGO5YihWfpN03NwOhB6/wVG27VA0cB4qmlwGrE+TBDEWE1mp01K6tWurh5O1sNsg3P/aaBE0fw60VpS15djdhK4uBsIm6MWFTiX4yExOO3EJsUSE2AfN2/5Biyc/R1eGzQIv+y4iiZlgMoq7q+mSn2RhWHdSm6qCBXO3blbjZatPuCHVQezYdynI/H6oMGYOnWqTlLDRImcqkPEBQLLAmNPLtIcaBccTxFfgDIRUO3a9qdmr1e3jrpn6u2AycI+PV+e7DLCeZ+rGJwmf8FEnRDwRAINivdTxldsXOepqB1p2gS61varp4yDFJAbtyNx5GKw1sxtb5aaNWviXPg9RJ1TE40XMLel544CJcsUUQOGmcxVnMraOOJKl1rO08bMhfZIjYat8PPsULza/IY9xe0us3R3ZtRt0NTu8lLQCoHoM7EGJo3MMxseFKDBKSIEXCTwXOdeaNC0DSZ9+hF+3LgaofOPIir6DiqqZF/pVWjBnpO3kSt7RjSsWR7VGz6Cv2cOBEdIKUZSxo8//ljHtk+aNMnF2sjp1ggMGzbM2u4k99Wo1xTjxgZj05G7aKjyHpglS3f4oWmzQLPUebweMTg9/ha6VwOY1bFVq1Z6fk2mr3733XcTzWUXHh6u5/wsV64c6LZmKTyf7hCciN2azJkzBwEBAXGZxYwy9MNnNsnXXnvN2KXn92TweK9evVC6tPVe5K+//tIZfDl9i69Lidz10bvO7/jkb+VCdPdqmuE4cikYf4WPQ7OAt9OsDt5w4erVqyNjpvSI2HcXFUw2OC8o98/KlaqmOaZWZYKUC3aQ0/V44YUXHD53xIgRWPTzNxg45yQmd3L4dKsnfK7er4QeT4+QpSrYywTpnm86mqskJzvuu+maoNKNVcQ8MDLDl7hxPaVq3kCAhuO4z76KawpH07jQJZ9ze7PftSaW7vmfffYZONI5bdo0a0VlXyoT0H36nB+U+/MxrDPpsYN9+qp9Mdiz4BNTWlP4ysq4xFU5bh5WOgNN0ZuaStKl5sXkWt5N4IcfftAuaYzL4sjH6NGj8cknif/Z3nnnHX2MSUASCl1N+AbQljADGRN9JJS5c+diwIABOqOkcYxzFbEOhw/zn9O6nDt3Trs7REQoHxgRLN7bP02NTeMWcJTz3LX9xkdZO0GAMSs1a1XD4c1OnJzEKbdUev/I8JyoVrlWEqW8+9CoT77AFPVAYcTouNLa4xeB15Xn54xZc+0eZU3uegGZwhFYXrn7lUiupAcfj1BxmX/3A6b7K//FzoAYmx58Mz236ozP69u3r36xbcvYZOssDU5+/v7779GtWzduirgBgVEff4a/DgEjf3W9Mkaf/v23X6Ny5cquK1Qastw+rft09usZ0nBAwJXGiMHpCj05N47AyZMntbH56quv6qkEXn/9dT2q+OGHH4JzHhnCEUVOqMxR0KefftrYrdd79uzB+vXrsWXLFuzfn9jYYJrrrVu3Yt68eUg4+e+SJbFvtg8eVKngHJB27drpLGZDhw514CzvLLr6yAfYfXaxWzQuBjF6bk63qIwHV2LI4OHY8yewdal5jdjwM3/wcuGtt94yT6mHaWKcZ6fn2qLNZNd/QgMnpMPrfbvEZcL1MBSpW13GZYa8D8xVT12/tgT2qRGiW5dTtw5yNSHgBAHO4Ws55zgTlPFFecLnICdUyykmENB9esfnELTMdWWBakrxujUqoGcv9UJMJI6A67+Wcapkw5cJ0CWBQvdVCqcVmDx5st6mWy2FHeybb76ptydMUP+RCeS7776L2/Pzz+qpNoEcOHAgbs+qVavitjk6SSOVwrkHHRGOxHJkdNasWXE6HDnfW8ruP7ccfxz+n1s1Z9+5ZSp7rrgcuXJTOHn5i92ew18/uKLlwbnhW4F/5wNff/G9z09ePnvBMpxV82bmGeSHbU64ri7ZDmR73Q9Hzt3D5KmzHkCWrfgEos+qLCxTgF8eBhao6Qy2fghcVkMRIkLAwwhw/khL4Ut0hgLxBTxDiUTSlsDsuQt0BfyUnbhqn+N1+Vt1S/7qEfeIypuwcvV6xxV4+RlicHr5DU6N5jEWgcZikyZN4gLjeV1mfXz88cf1iCRHLn/88Uc9Qjlw4EBUrRo//os6vv32WwQGBmodzOhGA9VSdu9Wb7fvy8KFC41N/P7773HbISEhcdv2bhhZKWl0+qJcuRmBhXv7uGXT6Vp75aZK+yfiNIGfZi5Arhy5sTyxd7vDOucNA956e5BMXn6fHKdU6devL2orG4iuWNdvJY80Urkk95wBPKPCwAKbN4ubliX5M32sBBP//KFi+X8sBGwYqJIAyQOcj30DvK65zD+RUGho/vPPP/rZJzIyMuFh+ZzKBNinvzNY/cZNAt5Rj5lXbthXgeHKya7ZOJUMu0RpXLhwAfny5bPvRB8qJQanD93slGoq39BRmI46oRjxloxxoNtqzpw5YYyGWpZdtGgRoqKi8NJLL6Fz5844e/YsgoODLYtg+3Y1JHBfaBxGR0frT7/88ouxG5s3Ox6wxnn3KIxBtWey+riLecnGwj19lVF3yi1bc+POZXGtdfLOMJHWihUrwAQVbVq1xZ7VAA3G6CuOK1yv3sV80ib2vPGfJo6hdlyj95wxduLX+qXX7N2F8fCEbOg1U7FS78BW7gFOK9Zno4A1B4ApKuSwn+IYOCkrVh7MrT1Afvsj2HtAmNGSiLX34zLzqLjMTiou0z1c/M1omugQAkxemCFDhkQgbty4oV/Gt27dGpJPIhGeVN8xdtxE3acvPVwMzT/LmahPP3cVOt5z6l/AG+q9WOvPs2HMCmDw4MHYvT8MefPmTfU6e8IFE3/zPaHWUke3IrBr1y5dHyPtt2XlOIk5Dc2xY8fq3V9//bXVf0bup3C0kecwOdCMGTPQsqWK07kvhsH5+eefazfYNWvWoEWLFqDByfNoLDLLLdfWOnVDT8K10TnQ4D116lSi4P6E5b3p8x+H38f+88vduknbT/2MSgXaombhzm5dz7SoHJNeMW7ZWBj7TE8AZoK2fHkyc+ZM8H+s9ZMPY8rzO/D4W0C1+wZkUvWO2AesU57ux9W/eIeOz2L+3AeeBUmd52vH+KC4buN2MFRg/87NWL91Hw4sUq6gFlKlTGFUrFQZj3Wqh969e8dNl2BRxDc3LynPFY5mchFXWd/8DvhIq5k4iM8mln1zxowZdQgSkykyLj579uw+QsO9m6n79A2hsX367q1Yv2236tPVxJoWUu6h/KionlcrP1IXQye2i/e8alFMNu8TEINTvgouE+DDLSVhfILeqf4whpMGJ0c3jRhP4xjXfECmy+0zzzyjjdEGDRroyc/pgkvjMleuXLo4pzgpX748XnzxRW1wclSUGXEpjFVjjCcNzn379qFaNRXrY6f4qfmzaCyfOHECTH6UMJucnWo8rtjus4uw+siHHlFvutZWyt8WmTPk9Ij6pnQl+aJl27ZtuHz5sr4UH1qYlp9vyhMKX94Y2RA3rNuOyV9MwBsDBmN/cDr4F7+HvCWAgmXUUlqNfip158OBc8di12Gb0uGhgGJYsmSKfqmTUHdaf74UHY5LN1RllZT2b56m1SlUqFDcizVW5NatW3raA64ZJ+7pc5aaCpdxmTQwj6jltLjKmspWlLktAT5nGKFC7A/u3bsHzpdMby7LhEJu2wAfq5i1Pn3x4sXa2+5///sfsmbN6mNEXGuuGJyu8ZOzFQEjY2zu3Lmt8jAMxgIFClgdeWR6cAoNTsZyUpg9lnGhNCp79OihjUGOQDIulNneOKLJbLeGPPHEE3EPdBwJdcTgpA7Ow0mD02iLoddb1xejj4KutJ4il2+cAI3OZyp/5VKV/bMG4NEyI7QObnuqDB8+PF4cJZNPcEkoQ4YMwdtvvx1v98DX3kLdWg3xw/TvsWHzWvy59Ei84/xQoEhu1K5TEx3faWHVBT7RCWm0IyRiOv4MG6mvPrZ1TOJalO+h3oQFxu7PXzPx8RTcwwfKTZs26T7Fl41miLUAADHYSURBVDP6xkNsjGSKq2w8LPLBNwjwZTb7ab58p+cXX9LzJTxfdHNechE7CbRVrveUzMr1PhWFfTrnXF29ejU4hZ+IYwTE4HSMl5S2QoBvgShGTKWVIjZ3GcmCWIDxm1wshcYoDU7DbbdGjRr6cKdOnbQrLY/TOKURahiZTBxkjOhY6kpq++pV5ZSvhEaxL8iivX1x/fYFj2rqphNfo2KBJ7V7rbMVp5HZqkyQs6e7zXnMavjss8/qEX1r2Q35w8j/EWvz4LIRjRs31gu3+SLHmLz8oYceQu3atVGmjBry9AbJGQBwSQPhy6s5c+boEY3z588jf/78aVALN7gk4zINQ/NWpBtUSKogBNKGAA3Ojz76SGfrN0bH6MVFV/yvvnLtZWratCiNrlo0MI0urOZLVqFezDHCZ1LjmTPNKuNhFxaD08NumDtWt3DhwrpahtHmSB2NZEHMZktXWUsZNmwY/v77bxw5cgQ7d+7Uh4x/cI50GtKxY0e9WbZsWb3euHGjccjutZEdjiOd3i4cKTx0QU3O6IGiXWtVPKdIbIIC/v8klCxZsuiM0fxhtEf4tp3ZobmImEeAc+wx4yHdnbn92muvmac8GU3HbgXoON7tx4HYV3TJnGD24UsqY5JhZF4+aLZ20ScEPJIAEyvSO8VSOFUcX7Qz4YzxDGN5XLbdhwCT8NHY5G8sXyYaz6OpUcMbGQtj3f2uNHvJHKlxSdOvERsAZ7paUehLBBhzSTl69KjDzTaSBdHlrGvXrvEWY7STc3Ju3aomAFRSpUoVveZDsmFoGsYng/Hr1aun59Nk3JQhzIrLyZUtF2bNNYQjs+xEmEHOMJ6NY9623n56DtaFf+qxzTp7bZ92rfXYBphUcWZ/pls5E0xwNNMQbvN7bM0QNcrIOnUIMFETY7ToQmev8W9WzaZf6IHA8cCgeWZptEPPjXPAns+BpU2B+VXVfJkfqCRAYmzaQU6K+AgBGioJhd5YjOO0Njd5wrLyOW0J0MjkC0TmSmD/nppyOtdjuk9nv341c+zgSmpe34xryQinGRR9XEfTpuoBQ4nh9movDmbUZLIgY4Ql4Xk0EEePHq2nKzGyzlpOvUKXQRqLdKc1pG7dutrgZAIhQxhHlVD4UG7I4cOH9SZjSL1Zzl07gEV7+nh8E2kwM2ttKf9mHt8WRxvARFr8n+CoP9+U86WMpfsr46WXLFkSl2jLUf1S3hwCfPm2bt26OGVbtmzB3r17Ubly5bh9XrMRpqxaJv85mni03WvaKA0RAilIgC/c2ZdzLaOcKQjaBdU0MmfPnh2XK4E5P/788088+uijLmj1rVNlhNO37neKtDZz5swYOHCgNh4vXbqU6BrMJEvXMs4LaCkVK1bU+69cuWI1mRBHK43zaEBymxllDWnbtq02Ro3PXH/55Ze6HF0dxo0bp7d5XsLFsi4MAKcYI6r6gxf+Wbi3D27ejY1V9fTmLVduwb4kdOXhix1+Rxm/eezYMZ3Mp1SpUnjvvff0KCez1NLYlOQTaf/N4Jtw9ouGGC5YxmePX0cEq/ky+6uAJjXf3OoXxNj0+BsqDUhLAjLKmZb07bu2EY9vlGb/zlAJEfsJiMFpPyspmQQBpojmSCX/KVNL6NqQL18+ly7H+bC++eYb9O/fH9WrV3dJlzufvHT/IBy99Lc7V9Ghuh2/vFlN6aJc9rxcODLGkXdmYWbCCXoRTJ48OZ7rN0c6c+TIgXnz5unYTS9H4hHNowutZTInvh1Pbbda00Fd2guEjADmVgR+bQHsU3Mn30z8gtH064pCIeADBBjLycRBli/DfaDZHtPEWbNm6RAJo8Ls33/66ad4+4xjsrZOQAxO61xkr4MEaPjxbU9QUFC8By0H1aR6cdaZcXDMHOetEhoxA+v/+8zrmvfH4f8hImqb17WLDaJLZp8+fVC/fn09rcbatWvBWOaqVVVsXAJhtkN+f5m1ViTtCdCF39Kl36jRf//9h2A1355HSby4TBU/v3WUist8EK7gUW2RygoBNyZA7xVm4ZdYTve7Sey7DU84y9oxpCs1B1ksr+2J22JweuJdc9M6M9MsDTi6r3qKlC9fXvvhW8aBekrd7a1nnaLd8W7To3iq4hRUyP+Yvad5RDlmrXVEIqK2Y2pIC71w292EU5QwOzNjjDlVCeea/eOPP5LNIPvKK6+4W1Pcoz6hQcA05YbPJSI4VerEPtBachC6YHnMw8kRFZe56jlgpsravf514PQ/qcJOLiIEfJkAYzgZFiSjnMl8C4w+fVlgMgXNOcx+2zJEwtDKEC+OcorYR0CSBtnHSUrZScDTplZgnKgvCOefbPzQAL3cuHMF+8/9iv3nl2PfueW4ceeyxyLg9C6Zitjfjd24HYkjF4N1e7ntTjJ+/HidEIgj7nStEiPSne6O/XVhYie60CYUumDRLcvIzJ3weJp/jgiOTf7D6Uy8zVU2Yo166SDv19P8O2ZnBUYUicH/lMe2Fh+6b5yFfHxxYGLvsvi884N8FfdJyCqNCCQMkTCqwcGV3377DadPn44X5mIcl3V8AvY/qcU/Tz4JASHgoQSyZMiFmkVe1AubcPD879injM/9yvi8GH3E41p1q+Qq5CkKREZ4XNV1hfljxsyzx48f15lnOcLJBEAiyROoW7QHyvgHJl8wlUosW7YMFy5csHm1a9eu6VhbY0onmwVdPNA8RzBGqOlqj11MRhHjMmlgMstspLe7ynqO500yd80nDj/ID+hb9+2tR4Ee04G3Ho1B6QI+cavdupEhISE6w3hSleQI6KBBg5Iq4vKxPNe36z6dirLcPu2yvrRQIAZnWlCXawoBNyJQPn8bcHmq4mQcv7xFj3zS+DxxJcSNaplEVfzuIrA3sESFl3mSLF++XBuaGzZswBtvvKFdaQsVKuRJTUjzunLknou7CDNy//rrr3HV4XypHNl899134/alxkZgzmAEtoOeKDzR4/qN87FGJg3N096TSCw1uMo1hEBKE+jeCJjwZ+zyeeeUvproT47AmTNn4vXpnH+Tyfs+/TR15zPPE70dQapPp2wXgzMWhPwVAkLAcwmUyF0PXFqVCcKF62FxxufBC3+4daPKNwGqtUnZKl69ehXbtm1D6LYt2PjvWuzcsQf7dx9FzboV0bBhQzSu/4hO+pBctuPNmzdrQ3Pp0qXo0qULpk2bhipVVEIWEY8n8OSTT8ZrA2Nwr1+/joT74xVKrQ9H5scamkcXptYV5TpCQAg4QeBNNcrZc7oa5WwFlM7vhAI5xTQCCftuviDmHJwJ95t2QS9WJCOcXnxzpWlCwBUC+bKVQZOHBuol+valuJhPxn7evBPliuoUOZejnFdvnUWOTCrRicmyePFiDHyzP67duIychW/AvxhQvBlQsztw4dh+bFbLyn9+wsX/0qNihQoIen8MmETLUpgIgq6z33//Pdq0aaMzljZv3tyyiGwLAVMJ5MkGFDg7Sc2XqRIA3UzOv9bUS4syISAEnCTQg6Ocq1QspxrpnNLJSSVymhBwMwJicLrZDZHqCAF3JJA1oz9qFemqF9Zv//nfdMwnjc9L0cfcospZcwHMWtux6nRT6/PmsF6YMu4HPPZWDKq0TKz6objpW2+rg7ex+usdet7M/q/1xZefT8WVK1f0tCWffPIJ6tSpgwULFuC555QBICIEUphAwZzK4LyinlrvXUvhK4l6ISAEzCTAWM6eMxjLCZSSUU4z0YquNCIgBmcagZfLCgFPJlAx/xPgAnyB/y5v0sYnEw9FXEnbeTE552ilAm1RrVAHl/HSfbZxk4YoVOEWXvkRyJ7XPpUtXwGKVlRJMcd9gzmz1TRB9/yQO3dunZ20X79+9imRUkLABAIHzwAH6v2KwBJqgzGb4YtN0CoqhIAQSGkCPRo/iOWUUc6Upi36U4OAGJypQVmuIQS8mMBDuRuCS+uyH+D89YN6qhUmHTp8cXWatHr5wSHa6MyQLotL169duzaadAOadHVcTaVAoFJgDGYOiEL6GwVx9OhRcM4uESGQJgTKvABwiT77ICvt6fVpUhW5qBAQAvYRYCznyzLKaR8sKeX2BGRiKre/RVJBIeA5BPJnK4+mJd9En7p/YkSLc9q9tXqh55EpvQomSyW5FB0OGp2uSL1G1ZFLJYx1xti0vG7n8fdw8vhpTJ/5reVu2RYCaUMgq4pvrvo60P4f4PldQO3/A3KXS5u6yFWFgBBIkkBPNcpZVU35xVhOESHg6QRkhNPT76DUXwi4KYFsGfOjTtHueolBTGzMpxr5pOvt5RvHU7TWG/77XI9yls8XP3Vtlox5UNo/NlEPt63JmDFjELJpFzp9au2oY/syZgbaDVNvqXv0RZ1aDZBcBlvHtEvpVWFB+DNspAYxtnWiCUCA8j2AIoGxoPLXjF3L31gC/lWBulw+ACLWPJgq5dZlISQEhICbEGCmWj3KqdYB+dykUu5QjepvA+W6uENNpA52EhCD005QUkwICAHnCfjBTxuAjK98Rqk5Frkh1vVWGZ+nonY4rziJM5lAqHyj+AZn0Zw10a9esM2zGLf54Ucj0aAj8CAZkM3idh2ge+3hjVBZbl9B8OoNdp0jhUwikDMA4CKSNIGij6jAY7U0nfrA8PSmeE//yiql9NCkGchRtyGwZPESMDM4ZcZM5VPqw9KzhYrl3DAcE/ZVxuT3nYjv8GZ2+eQloifdXjE4PeluSV2FgJcQKJmnMbg8Vu4jnL22Py7jbdjFtaa18FTUTqw8NFxdY7TdOjlHZvpM99C8l92n2FWwdnvgp7c2IjQ0VGeqteskKSQEnCRw7FYAwsPDsV05EtRwVEfCeE8mGzqz3lEt7lU+a2E1GvKSe9VJamOTwI7oo5i56b7BKfcNb757B7169cJbo6YhICDAJjc54L0EbmQsjHUHY9uXvWQOj2yoxHB65G2TSgsB7yFQMHtFNAsYjL511+D9wDN4vsr3Ksvsc3A16Q8JrT06Ro+m2ksrZOtG5CnB6U3MlfwlY/XR4BQRAilNYPqFHggcDwya58KVjHjPp1S8ZweJ93SBpJwqBFwi8PLLL6NKlSqYOHGiS3rkZM8lcDrXY7pPZ79+NXNZj2yIGJweeduk0kLAOwnkyFQQdYv1RNcaC/BByyh0r7kE9Yv3Qa7MKnOCk0LXWntl4+b1MIxDe8+xp1xm9UIyb3Fg0xb18C4iBDyNQN6qsbGeL6hX7G1V9ulKfYFMuT2tFVJfIeCxBN566y1MnjwZx44d89g2SMV9m4AYnL59/6X1QsBtCaTzy4DKBZ/Cc5Wn4b3mJ/FKvb8QWGooCudQD78OSHjkejXS+XGyZ1y9ehV7d4aliMHJi+d7CAgJ2ZJsPaSAEHBrAkasZ49LQMvZKpPJ025dXamcEPAGAhzlrFy5MiZMmOANzZE2+CABMTh98KZLk4WAJxIo5d8Uj5f7GG823oW3Gu/GE+XHopR/M7uasvLQMJy+ujvJsiEhIfp4vvvur0kWduJgwTLAru37nThTThEC7kjAT83t2QlorWLtup0GGn8GFGrsjhWVOgkBryAgo5xecRt9thFicPrsrZeGCwHPJVAoRxU0D3hHjXquQ9aQt7FShbYcUglg06fLaLNRvyUzN2fdunX1uRdSyGPpbBhQrWZFm/WTA0LAYwlkVZPWVh0IPKWSC3XYCdR6D8jlmXFGHnsPpOJeT4CJgzjKKbGcXn+rvbKBGbyyVdIoISAEfIaA3+0c2LkSerlz96qe53O/mu+TS9QtNfJyXw6cX4nVYaPUjKCx8zVyjlD/rAHGYeTIkQOVqpXG+WNH4vaZuXHhP/U8/lg9M1WKruQIhAYBoSNjS7VVGZCLBiZ3hhx3lUDeaipgWS31PgROqnjPIyrLLTPd3rriqmY5Xwj4PAGOcvbu3RtcP/SQitPwRZmmvCsoRZqria6D9ab8cX8CMsLp/vdIaigEhICdBNKny4SqBZ9Bhyrf4v8CT+k5N5sHDEHB7GoePiVrjo7GqrAgvVyKDtf7LP80athYGZyWe8zZvnkVuHgCaFjvYXMUihYh4AkEirVUc3tOA7pH3o/3fMoTai11FAJuS4CjnJUqVZJYTre9Q1IxWwRkhNMWGdkvBISAxxMo7d8cXJ4o/4mK4dyFDf9NweYT39hsV73aTTBvwRx1/I7NMs4cMIzYOnXqOHO6nGODQN2iPVDGP9DGUd/d3TxHMEa0BY5ddBMGfmpEgvGeXKLPxI54hqn/szMb3aSCUg0h4DkEOLrZp08f3x7l9JzbZUpN81zfrvt0Ksty+4HnlinKU0mJGJypBFouIwSEQNoSKJyjGmoWfjFJg7NBgwa4He2Hdd8BzXuZV9+tS4EmzetBDE7zmFITXaIt3aLN1e64toiICGzduhWh/25A6OZ/sPrvfxGjPLg7Pt0a9Rs/guo1a6NGjRooVEjFPKagBOYMRmA76InCYx3IU/Bijqo24j0Z83lRze9Jd1san1dUkLOIEBACyRKgSy2z1TKWM2E8Z1wfpOZ8Dt0YjNDtu8D3PfVrV0PdRs1RtXptVKtWDaVKlUr2OlIASMhz7T+bcefOHTz7ZAvUbdgMVWvUSRWeeaK3I0j16ZTtYnDGgpC/QkAICAFPJVCrVi28M+RdfPDBByilwi0fqu56S/YFA1w2bZriujLR4LYExn3yMYYMHabrV60YUEeFV33xApBBBa5sPLIKs79YhSEqjpfy9uDB+HTcuNgPvvw3YbynYXzejvJlKtJ2IZAsActRzhIlSujy41SfMmTIEL1drZif6oNi8NHjQMb0wJajwfj1h2D8L9wPd+/FoMuLnTB5yhfImzdvstfy1QLWeE553oLndAuenTti8udfCc8kviwywpkEHDkkBISA7xEYNWoU5syfid/GHcMrM11r/+2bwLIxwNhxo8DRUxHvI3DmzBn0ffExrFi3HVOUx2hb9ZIiIF/8dna9f+tPqKkr56rZd4ZOHI/F837El9/+iNatW8cv7KufGO+pYz6/fjDqeUy5BogIASGQiABHOcePH69HOt9991307doWS/8MwWfqJVf7GuyD4vs2dKkfq+LS9RisPQAMmj8P5Uovw+QvpqJLly6J9PvyDt2nd2uPpav+tZ/nvAWK56+K5zThaePLI0mDbICR3UJACPgugaWLV+KKCjVbP8s1BnPUy+Zs2bPincHvu6ZIznZLAt9//z0KFy6MjJHbEfYhMKBFYmPTsuLF/YHBrYCQ4SqRa/qzaNOmDTihu4gFAT/1WFK2M9DmF6DrKTW/5yQ1v2dDiwKyKQSEAAlwlHPSpEm6Dzp5KAR7g4CBjyTdB/lnA56tBez4v3uoU/QaunbtqhchGkvA6NNPHvzXMZ7vk+f1WJ5iwFv9OonBaRWL7BQCQsCXCVSsWBFBQUFY/6MakRoKXHMw+QpdaMerpC2n1Jvk69eiMWWKuNN62/dpxYoVYMbImT2BBf2AEg54ptVUHnD/DgMmdgR++OEHfPLJJ96Gx5z2ZCus5vd8Q80npJILddih5vdUlnqu0uboFi1CwMMJFC9eXLdgkHIO4EusSkXsbxANzz8GAV++CPz000/SByl0Rp/uMs+ffxaeVr6KYnBagSK7hIAQEAIjRozA4cOHkf1uWXzdzQ97VtvHZLXyCKQbbb16dVXCmBh8+umnGDhwoDZg7dMgpTyBQMcOT2O4io/q5sLgGx9spnZVLrZDh2Lnzp2e0Oy0q2Ne5atc7yOgUxjw5CqgYm8VTJUj7eojVxYCaUyAfVDdkrEvrpytSv/m0gcZ7Dp2eEZ4GjBSYJ0hBXSKSiEgBISAVxAoU6YM9uw8hJEjR8aOeE7PhFxFbsFfJYUpEADkV4n+LhyDnruTU59EHs+Mu7f8MHr0/zBsmBrCUvL2228jX7582nXywoULMtqpqXj2n47tH0EN9T346GnX29G3KfD3IeDV3l3wz7+7XFfoCxqKPariPdXSdGpshlsmG5J4T1+489LG+wQ6Pt0GfvduIXiw60ji+qCXO+GfkL2uK/RADR2feULxvGkyz46K534PpJEyVRaDM2W4ilYhIATckECWjHn0vJysGrftFY52NmnSBAcPHsTe/TuxZ/92HFx5DH8eOY0y5YujQuVyeKJ9Q5QrWwHVq1cHs91aSs+ePbXR+fzzz4NG58/K5UbEdQKrwoLwZ9hIrWhs6/hJMvTOIoEqXez96+QMuL/h2oousGvXrcda5Y5mlox5Bnh4wmGdYZIj4iJ2EtDxnsonsKxarqu56Ti9Co3Ps5vsVCDFhIDnEWAfNP+XP/DdS0D2zObUX/dB4494Rh9UZ0Rso6306efPn0f+/PkdgqJ5LlmRAjyPegZPh2g5X1gMTufZyZlCQAh4GIGiOWuiX71gp2r96KOPgouz0r59e6xZswY0OpksZv78+ciVK5ez6uQ8ewgUDQS4mChrf/8F77S6hapqlNssYTKhVx++ge+XLNAu2Gbp9Sk9jPespt4CcLmg4j1peB5Ry5UjPoVBGuv9BNau+g3PqxdpLzcxr626D2p6E98vnuv+fVCdIJsNX7JkCZYvX47OnTujY0cVJG+HrF39B56umRI8b+GrOT+6P087GJlRRGI4zaAoOoSAEBACdhDgKCmNzpMnT+KRRx7B0aNH7ThLirgTga0hm1G1qPk1qqjspQOHwxEVFWW+cl/TmE/NC1F/dGy85xN/qERDyvddRAh4CYGtWzagUQrkztJ9UNhxj++DfvvtN7zwwgvImTMn+vfvj+Dg4CTv/NZ//0mxPj38xBntGZVkBXzkoBicPnKjpZlCQAi4BwFmwF27di2yZ8+ujc6QkBD3qJjUIlkCnJ9tT9hpU0c3jYtWj004acrDychTQfBTmXMDxxvafXhdXM1D0+xbHwYgTfcmAroPOhyBhilgcJrZB7kD86tXr+os4C1atEBAQAAYGrN3b/wYVc3z0IkU7dNDQ0NdxhGer4fu09mvR2ZTw7EeKOJS64E3TaosBISAZxMoUKBAnHtty5YtMW/ePO1m69mt8v7ab9++HQVyZ0AJ/zumNzYgn0pClTO9Njjr1FH+ciJCQAgIgQQE2AdlzphOjXDeS3DE9Y/sg/Ll8AOvwYR5nijR0dHxqn3z5k39+dixY3qqklGjRqF+/fp46aWX9Cgo20qpkgJeK7pPV4m0aXDSxdfXRQxOX/8GSPuFgBBIEwLp06fHokWL0KdPHzz22GN6LrQXX1TJT0RShQCnIaGxnyVLFmTLlk0vOXLkABe6YnEE2tjPNT/TNSt/jpRzDKpaPIMpI5ypAlAuIgSEQKoT2LRpExqVz66umzKu91WK+KF37956SfXGmXTBjBkzWtV048YNvf/ff//Ftm3bMGDAAFSqVEnvS4kwCSqmXjNGOHUlPfyPGJwefgOl+kJACHg2gW+++UZnsO3SpQsuXryofwQ9u0WeUXtmE86dOzfCwtS8jlYkXbp04EsBPz8/vdAVum/fvhi18U8rpc3ZtffkHfQuW9YcZaJFCAgBryNQr149fDLmeoq1a9/pGPzvf/9D69atU+waKal42bJlmDBhgtVLsD+/e/eu7ve7deumRzivXLmCJ598EvtOAZWKWD3NpZ17T/nhxSc80wXWpYZbOVkMTitQZJcQEAJCIDUJfPzxx9rofP311/W0KYw1EUl5AsxiOHHiRBhvvi2veO/ePXCh8I0/XwycOHECr732Gk5fAQqbnGA4IlLN5nHlLsqKwWl5G2RbCAgBCwJ8UXb95l1s/Q+o/ZDFARM22Qedi4rBE088gQYNGpigMfVV7Nu3T78gtLxyhgwZcOfOHTzzzDPatfXZZ5+NO8w+nbI3BQxO3acrngmnSYu7uI9tiMHpYzdcmisEfJnApehwhEbM0AjqFO0O/6wBboNjyJAh2ujs1auXNjonT57sNnXz2IocnA4cjL3faDRRBSjFf9PMZBJjxoxJsnm8D3wRQClevDhKF/NXb8MvmW5w7joZW41y5crFbshfISAEhEACAroPKp4P/4ZfMN3g9Jg+6NcWsVTyqmzUjSclIBT7kaESfJHYqFGjuHhNf381/1QCieVZAPtOn0twxPWPBs+aNeP/7riu2TM1iMHpmfdNai0EhIATBGhwrgoL0meW9m/uVgYnK/Xyyy9ro5NzdV64cEHHderKyh/nCESFAxHBsefejNRrTklDt6ulS5dixYoVyJQpE27duhVb5v5fPqxw8vBZs2ahefPm8Y7xbfXeU2vQokK83S5/OHgWKFm8MPLmzeuyLlEgBISA9xKoVbsu/j36O15pZm4bdR9UrKD790ERwbENj4mxCoBGZPfu3bXLbIUKyXfUterWw76I36zqcmUneRbImwsclRYBUi77gdAVAkJACAgBhwk89dRTWL16tc5iy2RCMi+jbYRl/APxaJkRerFdKvbI/PnzERgYqEcphw8fDmYKXrBgAfr164fMmTPHO71NmzbYsWNHImOTharVbYZZoTnjlTfjw+LdOVG7XiMzVKFkpnA0Lw/ULGGKOlEiBISAGxGoVrsR5oamx1+HzK3U4l1ZUbt+E3OVprI2GpiMy2ccqj3GJqtXrWZ9LNmRAjx3ZsLDzVuaQiDL7dO6T2e/nuHeVVN0prYSMThTm7hcTwgIASGQDIGmTZtqo/P48eN6rs7w8PBkzvDNw6XzBqJVmSC9JCRAg/Gvv/6K2z1v/jydkZCTgjM504wZM/Dcc8/h6aefhpE6n4UZP7tkyRKbb/l5PCZbYfSYHqfa5Y0xK4HNh2/i008/dVkXFfTINx3Bg4FJHU1RJ0qEgBBwIwLsg6pVLodBc82rlO6Dwu6a1geZVzPHNPG301HRPKtUMJ/nkRjTeBa+slL36ezXc9w47GgT3aK8GJxucRukEkJACAiB+AQqV66sRzmzZs2qp++Q1Orx+Vj7tHHjRnD0smrVqmDcDI1OQ+bPm4+vvvoKjz/+uLFLrx955BFtXHI6lIULFyIoKCjecWsfPvv6R8zYCCzcau2oY/tCjgHDFwOzfp7jsXPfOdZiKS0EhICrBD77ajq2HQe6T3dVEyB9EPDZl9+nAM+50qdbfD3F4LSAIZtCQAgIAXciUKhQIW10MgaEc0b+8ccf7lQ9t6gL3Y8HDRqE0qVLo3HjxuAIJrMRcq41I9lPchXlSOfWrVthmb0wqXOYwZGGaYepwHkXvJtU3g/UGw10eqGjrnNS15RjQkAICAGDgNEHzVQvvn7fY+x1fH3i0v0+qOOzPt0Hmc6zw1M+zdPaN1EMTmtUZJ8QEAJCwE0IMKX74sWL0aFDBzC2cPbs2W5Ss7SrBpP+cE7MwoUL49FHH9XGZZ8+fbB7925s374dH3zwAThfnb0ybdo0OJodlm5Y7ds9iQLKxen79fZe6UG5Pj8CpYYDBQvkx+w5JvrGPbiEbAkBIeDFBHQf1L49HlMJzd9b4nhDhywESrwL5PXPg9lz1QcfF1N5znfihng5fzE4vfwGS/OEgBDwDgLffvst3nnnHbz44ov44osvvKNRdrYiOjoac+fORdeuXZErVy60Vw9ZR44cwbBhw/R6w4YNertKlSp2ajSn2C9Lf8Xvv/+OXjOBp74E1oclr/fPfUCdj4Bv/wEGDhyIM2fNT8effC2khBAQAt5A4JdfftF90OgVQLNxsCuR0GGVPbXt58A45TDTpUsXXLiohjlFNIH4PP0c5/liJ+Fp47sk06LYACO7hYAQEALuRmDs2LE63nDAgAF62hRm4vNW4bQwnLrEmMLk7t27aNeuHcaPH68NTrobu4O0bt0ap0+fxpCBvfHwJ7+iQuF0qFDoHioXAWoUB9Kr17qcVHxPBNfp1PoeWjatjw1zJuk54tyhDVIHISAEPJdAXB80qD+aj1uMCkUyokLB23F9UIb0wP7TalH90L4z6bBfLaVKllDTPn2gDU7PbXnK1DyO55uvKZ4LUaGo4lnAFk8/xTa94llc8fxQeCZxS8TgTAKOHBICQkAIuBuBoUOH6rk66UJKo+yzzz5ztyo6XR9m5TWMTI4cMmESRzOZUZbrnDnNn47E6cpanEjjd+bcZejQZSn279+PAzv+wd9q/e2mCPj5+aFKueKoXL0KenVpgrJly2rD2eJ02RQCQkAIuERA90GzF6FD5/t90K7N+HvfHtUHnQD8gIqli6JSxYro+ExjlC5TRucEkDl/bSPXPH9egA6dDJ7/Kp67E/CsoHg2UjzLCU/bKOOOiMEZh0I2hIAQEAKeQaB3797a6OzYsaM2OmfNmuUZFbdSywMHDsQZmX///bduF43LRYsWaSMzfXr1et6GhERMR2jEDH20X921iUsVCVT+q/d35wxIfNzkPaw3F+AdkzWLOiEgBIRA8gQe9EHJl/XYEnVGxFY9Vft0j6XlNhUXg9NtboVURAgIgZQmwHkbx7aOSenLpIp+ZmJlhtbnn39eT/Uxf/58cGqPlJDIyEgcPHgQBQoUQKlSpVy+xLZt2+KMTE73UqJECW2ovffeezoxkr0XuBQdjiMXg20XLxoIcBERAkJACAgB7yBQJ8g72uFjrRCD08duuDRXCAgB7yHQrFkzPW0KjU5OmzJv3jyULFkyXgOHDBni8OTTTMLDbLjbdm/EoYOHcTbicpzOLFkzokyFEqhaqSYqVagOZvazR9avXx9nZO7btw8VKlTQRubEiRPhzGTd9lxTyggBISAEhIAQEAJpT0Cy1Kb9PZAaCAEhIAScJsDMrGvWrEGmTJm00cn5JA0ZM2YMxo0bh5UrVxq7kl1//fXXaNKkCVZu+hbRuUJR/ZnLeHECMEhlee+nvFeffPc28tU+gl1nFuHjTz5Epaplwcx+1mTVqlV6LsyAgAA8/PDD4Ge6AYeEhOhYx08++USMTWvgXNw38lQQ/PoBgeNdVCSnCwEhIASEQJoTCM/XQ/fp7Ncjs9VM8/o4UwEZ4XSGmpwjBISAEHAjApyPkkan5Ujn0aNHMXz4cJ20ZtSoUXjssceSrPHVq1fxWLtArA8OxeNvAdXa3EhUPlNWIHdhoEzD2EMR++5g3XdhePrpp9GpUyf89NNPcVllmV323Llz2nh99dVXdaKcSpUqJdIpO4SAEBACQkAICAHvJiAGp3ffX2mdEBACPkIgY8aMWLJkCXr16gWmdTckJiYGGzdu1O62HF20JseOHQNHIQNqA6/PB7LmslYq8b6iyn7srOZ+Wz8LmPPjHMyZM0cXatWqFThlC6cxSejim1iL7BECQkAICAEhIAS8mYC41Hrz3ZW2CQEh4HMEunXrZrXNQUFBVvdzZxM1L2SlQKDjGPuNTUtlTbqqvKy/x+4ZNmwY/vjjD3CuUDE2LSnJthAQAkJACAgB3yQgBqdv3ndptRAQAl5IYOfOnXjqqaestoyJer799ttExwa80Rcnj59Fu2GJDjm849kggHGjlnGkDiuRE4SAEBACQkAICAGvIiAGp1fdTmmMEBACSRGIvhOJI5fW6YXb3iQnT57UWV+vX79us1kjR46Md4zzXn4x+Rs0sT4oGq+sPR/KNmLsJ/DmO/3tKZ7yZQ5OB35tEbtc2J7y15MrCAEhIASEQMoSMPr0DYNS9jqi3VQCYnCailOUCQEh4M4ETl3ZjqlbAvXCbW+Sixcv4rnnnkPWrFl1oiBrbTt16hQmTFApZ+/L6LEjUaK6cqlVLrFmSQMVJro1dDsSGrdm6XdIT1Q4EBEcu9z0rhcMDnGQwkJACAgBbyEQERzbp8tLRI+6o5I0yKNul1RWCAgBIWCdQLVq1TB+/Hi9zJgxA1OnTtXJgrJkyYIbN2Izzt69exfMWNu/f39tmG7auAkNu1vX5+zevMWBEjVvYdfebc6qsPu8Mv6BKmWuncX5VtyatF0LFA20diR23zQ/28csj9RR85EmNSH5skDg1DrLM6xvm6BnRBEguLxaDlq/hOwVAkJACHg8Afan1vrnIs2BdsG2mxcaBISOtH3c8kjfGMtP8bcjgmO9Z+Lvtf4pKT08w1o7LDQFqu3gwZ491ZWMcFrcUNkUAkJACHgDge7du2PDhg0IDQ1Fnz59kC1bNqRLF9vdR0dHY+zYsWC8Z+TFa2CmWbMlbwlgz96UH0EunTcQrcoE6cVqG3IGWN3tCzvXqoeTSdaTEvtC86WNQkAIeCuBfDW8tWVJtqu5eokYMxXIcO9qkuXc9aCMcLrrnZF6CQEhIARcJFC7dm1w4cinMeoZEhKC0aNHI3/+/MhVIB3yl7zn4lUSn56/JBAyPyLxgdTeE/A00Ei50t5Siy1JzijliKM9UiQw6VIVeiQ9kmqcbYKedevWITg4GJNWA4vdJJzWaJ6shYAQEAIuEWg0SXmLBNtWkVyfzj62ju3T7T7C69j7+5Cc0mT0hIeH69/w7ceBNybmSE6bWx4Xg9Mtb4tUSggIASFgHgHO0dm7d2+9bN68Wf9wTftmKopUMt/YZK0LKjfXmzduIywsDGXK2Ovzal574zRlyqOyGA2K++jURlJuso4oLN/DkdK2y9qhJ/jXkQhaFmxbhxwRAkJACHgqAYZAJBUGkVy7XD3f0K8NziDjk2vrZH5nwqOCVZ8+Q1/jDdeulGZni0ttmqGXCwsBISAEUp9AgwYN8OWXX6JYsSLImCVlrp/dP1bvhQsXUuYColUICAEhIASEgBDwGAJicHrMrZKKCgEhIATMI9C82SO4qNxzUkLOHonVWr68CjoREQJCQAgIASEgBHyagBicPn37pfFCQAj4KgHGdkbsU66vtqftdBrN2TCgUFF/5MmjXFpFhIAQEAJCQAgIAZ8mIAanT99+abwQEAK+SqBGjdhMfzQ6zRaOnJavIKObZnMVfUJACAgBISAEPJGAGJyeeNekzkJACAgBFwkUKlQIFaqU0qOcLqpKdPrVU9lRo0q9RPvN3nHkYjD+DBupF7N1iz4hIASEgBAQAkLAHAJicJrDUbQIASHgAQQ4b+PY1jF64bavS5+er2H9j0DEfvNI7PodOLjlGjp37myeUhuawi4FY1VYkF5sFJHdQkAICAEhIASEQBoTEIMzjW+AXF4ICAEhkFYEBg8ejIZNq2Pdd+bUIPoKsGIC8NVXX6Fx48bmKBUtQkAICAEhIASEgEcTEIPTo2+fVF4ICAEh4BqB5UvW4vhO4PAm1/Tw7GVjgIcD6+GVV15xXZloEAJCQAgIASEgBLyCgBicXnEbpRFCQAgIAecI5M2bF7NmzcKiEcDpQ87p4Fk0NsO3qhHOZWucVyJnCgEhIASEgBAQAl5HIIPXtUgaJASEgBAQAg4R6NKlCwoWLIjWrVujxhNAgxeAPIXtU7F+FnQcaLESBREe/i9y5Mhh34lSSggIASEgBISAEPAJAjLC6RO3WRopBISAEEiaQKtWrRAVFYUKBdpgWndlRCpDMky52d68mvi888eAnSuB2UNijc3XBvbBif/OoGTJkokLyx4hIASEgBAQAkLApwnICKdP335pvBAQAkLgAQGOTs6dvhItGn6NCZM/ViOXyrJUUrgcULwqEHUBOLXPD1fOxSB33mxo2KgBfvp8JJo2bfpAiWwJASEgBISAEBACQsCCgBicFjBkUwgIAe8mEH0nEqeiduhGFslZA1kz5PHuBjvZOib94RIREYGtW7ciJDQEW7ZsRp0qJdCkd1PUqFED1atXd1K7nCYEhIAQEAJCQAj4EgExOH3pbktbhYCPEzh1ZTumhrTQFPrVXQuZizPpL0TRokXBpW3btkkXlKNCQAgIASEgBISAELBBQAxOG2BktxAQAkJACLg3Af+sASjt39y9Kym1EwJCQAgIASHg4wTE4PTxL4A0XwgIASHgqQTqFu0BLiJCQAgIASEgBISA+xKQLLXue2+kZkJACAgBISAEhIAQEAJCQAgIAY8mIAanR98+qbwQEAJCQAgIASEgBISAEBACQsB9CYhLrfveG6mZEBACKUhg2YE3kSVjHhTNUQPtKk6yeaWQiOkIjZhh87jlASYisiURUdvBa9ojZulpV2EiiuasafOSRgIlmwXuHzBLT52i3ZN0gV22fxAirsZmEU6qTsnpSepcOSYEhIAQEAJCQAikLgExOFOXt1xNCAgBNyFAA1BLTEySNboUHY4jF4OTLGPPwRu3I1NdD6+ZlNjbLrP0JJfgh/fkyKV1SVVZH0tOT7IKpIAQEAJCQAgIASGQagTE4Ew11HIhISAE0poAp0F5+KE3EGdsqgolNQLI+pqVCZWjqWYYSo7oYdmkxN76mKWHLJOS5O6FcW5yeoxyshYCQkAICAEhIATSnoAYnGl/D6QGQkAIpCKBpNxnrVXDrEyoNKb61Qu2dgmH9pmlhxc1oz5m6nH03jgETgoLASEgBISAEBACaUJAkgalCXa5qBAQAkJACAgBISAEhIAQEAJCwPsJiMHp/fdYWigEhIAQEAJCQAgIASEgBISAEEgTAmJwpgl2uagQEAJCQAgIASEgBISAEBACQsD7CYjB6f33WFooBISAEBACQkAICAEhIASEgBBIEwJicKYJdrmoEBACQkAICAEhIASEgBAQAkLA+wmIwen991haKASEgBAQAkJACAgBISAEhIAQSBMCYnCmCXa5qBAQAkJACAgBISAEhIAQEAJCwPsJyDyc3n+PpYVCwGcItGjRwmfaKg0VAkJACHg7AenTvf0OS/t8hYAYnL5yp6WdQsAHCAQHB/tAK6WJQkAICAHfICB9um/cZ2ml9xMQg9P777G0UAh4NYH69evjiSeewPXr1726ndI4IeAogfLlyyMwMNDR06S8EEhTAiNGjMD27dsRGRmZpvWQiwsBdyNQoEABVK9e3d2qZVd9/GKU2FVSCgkBISAEhIAQEAJCQAgIASEgBISAEHCAgCQNcgCWFBUCQkAICAEhIASEgBAQAkJACAgB+wmIwWk/KykpBISAEBACQkAICAEhIASEgBAQAg4QEIPTAVhSVAgIASEgBISAEBACQkAICAEhIATsJyAGp/2spKQQEAJCQAgIASEgBISAEBACQkAIOEBADE4HYElRISAEhIAQEAJCQAgIASEgBISAELCfgBic9rOSkkJACAgBISAEhIAQEAJCQAgIASHgAIH/B6DdOKUBt9vbAAAAAElFTkSuQmCC" + } + }, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Introduction to Neural Graphs (NGs) \n", + "\n", + "The Neural Graph is a high-level abstract concept empowering the users to build graphs consisting of many, interconnected Neural Modules. A user in his/her application can build any number of graphs, potentially spanning over the same modules. Once defined, graphs can be trained, exported/saved and imported/restored in other application(s).\n", + "\n", + "![neural_graphs_general.png](attachment:neural_graphs_general.png)\n", + "\n", + "The import/export/save/restore options combined with the lightweight API make Neural Graphs a perfect tool for rapid prototyping and experimentation.\n", + "\n", + "\n" + ] + }, + { + "attachments": { + "neural_graphs_nesting.png": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAq4AAAHiCAYAAAApjU6MAAAKx2lDQ1BJQ0MgUHJvZmlsZQAASImVlwdYU1kWgO97L73QAhGQEnpHepUSeiiCdLARkkBCiTEhqNgQEUdwLKiIoCLgoICCYwFkLIgF2yCoiH2CDCrqOFjAgso+YAkzu9/ufnved3L/d3LuOefe7958JwBQCWyRKANWAiBTmCWODPRhxCckMvC/AwSQgAogADybIxExIyJCASpT499l5C6AxsfbVuOx/v37/yrKXJ6EAwAUgXIyV8LJRPkEqm84InEWAMgB1G6wNEs0zldQVhWjBaL8aJxTJ3lonJMnGIOZ8ImO9EVZHQAChc0WpwJAMUTtjGxOKhqH4oeyjZArEKKMvgNPDp/NRRnNCywzMxePswxl0+S/xEn9W8xkeUw2O1XOk2uZEIKfQCLKYC//P7fjf0tmhnQqhzGqFL44KBId6eie3UtfHCJnYfKc8CkWcCf8J5gvDYqZYo7EN3GKuWy/EPncjDmhU5wiCGDJ42SxoqeYJ/GPmmLx4kh5rhSxL3OK2eLpvNL0GLmdz2PJ4+fwo+OmOFsQO2eKJelRIdM+vnK7WBopr58nDPSZzhsgX3um5C/rFbDkc7P40UHytbOn6+cJmdMxJfHy2rg8P/9pnxi5vyjLR55LlBEh9+dlBMrtkuwo+dws9EBOz42Q72EaOzhiioEf8Aeh6MMAMcAOOABb9DMMgCzesvEzCnwXi5aLBan8LAYTvWU8BkvIsbZk2NnY2QAwfmcnj8T7exN3EaITpm0iNL4reuaR6mlbsiYAzeg50iBO2wwPAqAYD0BTLkcqzp60jV8ngEV/CxSBKtAAOsAAmAIrtDIn4A680YqDQTiIBglgIeAAPsgEYrAUrARrQQEoAlvBTlAGKkA1OASOgGOgGZwG58FlcB10gR7wEMjAAHgFhsAIGIUgCA9RIRqkAelCRpAFZAe5QJ6QPxQKRUIJUBKUCgkhKbQSWgcVQcVQGVQJ1UI/Q6eg89BVqBu6D/VBg9A76AuMwBRYFdaGjeFZsAvMhEPgaHgBnAovgXPgfHgzXApXwYfhJvg8fB3ugWXwK3gYAQgZoSN6iBXigvgi4UgikoKIkdVIIVKCVCENSCvSgdxGZMhr5DMGh6FhGBgrjDsmCBOD4WCWYFZjNmHKMIcwTZiLmNuYPswQ5juWitXCWmDdsCxsPDYVuxRbgC3B1mBPYi9he7AD2BEcDkfHmeCccUG4BFwabgVuE24vrhHXhuvG9eOG8Xi8Bt4C74EPx7PxWfgC/G78Yfw5/C38AP4TgUzQJdgRAgiJBCEhj1BCqCOcJdwiPCeMEpWIRkQ3YjiRS1xO3EI8QGwl3iQOEEdJyiQTkgcpmpRGWksqJTWQLpEekd6TyWR9sit5LllAziWXko+Sr5D7yJ8pKhRzii9lPkVK2Uw5SGmj3Ke8p1KpxlRvaiI1i7qZWku9QH1C/aRAU7BWYClwFdYolCs0KdxSeKNIVDRSZCouVMxRLFE8rnhT8bUSUclYyVeJrbRaqVzplFKv0rAyTdlWOVw5U3mTcp3yVeUXKngVYxV/Fa5Kvkq1ygWVfhpCM6D50ji0dbQDtEu0AVWcqokqSzVNtUj1iGqn6pCaipqDWqzaMrVytTNqMjpCN6az6Bn0LfRj9Lv0LzO0ZzBn8GZsnNEw49aMj+oz1b3VeeqF6o3qPepfNBga/hrpGts0mjUea2I0zTXnai7V3Kd5SfP1TNWZ7jM5MwtnHpv5QAvWMteK1FqhVa11Q2tYW0c7UFukvVv7gvZrHbqOt06azg6dszqDujRdT12B7g7dc7ovGWoMJiODUcq4yBjS09IL0pPqVep16o3qm+jH6OfpN+o/NiAZuBikGOwwaDcYMtQ1DDNcaVhv+MCIaORixDfaZdRh9NHYxDjOeINxs/ELE3UTlkmOSb3JI1OqqZfpEtMq0ztmODMXs3SzvWZd5rC5oznfvNz8pgVs4WQhsNhr0W2JtXS1FFpWWfZaUayYVtlW9VZ91nTrUOs862brN7MMZyXO2jarY9Z3G0ebDJsDNg9tVWyDbfNsW23f2ZnbcezK7e7YU+0D7NfYt9i/dbBw4Dnsc7jnSHMMc9zg2O74zcnZSezU4DTobOic5LzHuddF1SXCZZPLFVesq4/rGtfTrp/dnNyy3I65/elu5Z7uXuf+YrbJbN7sA7P7PfQ92B6VHjJPhmeS535PmZeeF9uryuupt4E317vG+znTjJnGPMx842PjI/Y56fPR1813lW+bH+IX6Ffo1+mv4h/jX+b/JEA/IDWgPmAo0DFwRWBbEDYoJGhbUC9Lm8Vh1bKGgp2DVwVfDKGERIWUhTwNNQ8Vh7aGwWHBYdvDHs0xmiOc0xwOwlnh28MfR5hELIn4ZS5ubsTc8rnPIm0jV0Z2RNGiFkXVRY1E+0RviX4YYxojjWmPVYydH1sb+zHOL644ThY/K35V/PUEzQRBQksiPjE2sSZxeJ7/vJ3zBuY7zi+Yf3eByYJlC64u1FyYsfDMIsVF7EXHk7BJcUl1SV/Z4ewq9nAyK3lP8hDHl7OL84rrzd3BHeR58Ip5z1M8UopTXqR6pG5PHeR78Uv4rwW+gjLB27SgtIq0j+nh6QfTxzLiMhozCZlJmaeEKsJ04cXFOouXLe4WWYgKRLIlbkt2LhkSh4hrJJBkgaQlSxVtjm5ITaXrpX3Zntnl2Z+Wxi49vkx5mXDZjeXmyzcuf54TkPPTCswKzor2lXor167sW8VcVbkaWp28un2NwZr8NQO5gbmH1pLWpq/9Nc8mrzjvw7q4da352vm5+f3rA9fXFygUiAt6N7hvqPgB84Pgh86N9ht3b/xeyC28VmRTVFL0dRNn07UfbX8s/XFsc8rmzi1OW/ZtxW0Vbr27zWvboWLl4pzi/u1h25t2MHYU7viwc9HOqyUOJRW7SLuku2SloaUtuw13b939tYxf1lPuU964R2vPxj0f93L33trnva+hQruiqOLLfsH+e5WBlU1VxlUl1bjq7OpnB2IPdPzk8lNtjWZNUc23g8KDskORhy7WOtfW1mnVbamH66X1g4fnH+464nekpcGqobKR3lh0FByVHn35c9LPd4+FHGs/7nK84YTRiT0naScLm6Cm5U1DzfxmWUtCS/ep4FPtre6tJ3+x/uXgab3T5WfUzmw5Szqbf3bsXM654TZR2+vzqef72xe1P7wQf+HOxbkXOy+FXLpyOeDyhQ5mx7krHldOX3W7euqay7Xm607Xm2443jj5q+OvJzudOptuOt9s6XLtau2e3X32ltet87f9bl++w7pzvWdOT/fdmLv3euf3yu5x7724n3H/7YPsB6MPcx9hHxU+Vnpc8kTrSdVvZr81ypxkZ/r8+m48jXr6sJ/T/+p3ye9fB/KfUZ+VPNd9XvvC7sXpwYDBrpfzXg68Er0afV3wh/Ife96Yvjnxp/efN4bihwbeit+Ovdv0XuP9wQ8OH9qHI4afjGSOjH4s/KTx6dBnl88dX+K+PB9d+hX/tfSb2bfW7yHfH41ljo2J2GL2RCuAoAqnpADwDu0TqAkA0LoAIM2b7KknBJr8HzBB4D/xZN89IU4AVLcBEJ0LQCg67kZHY1QVvQGIQDXaG8D29nL9p0hS7O0mY5Gb0dakZGzsPdo/4s0A+NY7NjbaPDb2rQYt9gEAbSOTvfy4KB0GYP8K27CQmJ6ukVzwL/IP1NgR3fsPECIAAAGdaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA1LjQuMCI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOmV4aWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vZXhpZi8xLjAvIj4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjY4NjwvZXhpZjpQaXhlbFhEaW1lbnNpb24+CiAgICAgICAgIDxleGlmOlBpeGVsWURpbWVuc2lvbj40ODI8L2V4aWY6UGl4ZWxZRGltZW5zaW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4K+urioQAAQABJREFUeAHsnQeYFEUThuvI6ThyONKRc85BBZUoSUyYMSdQMKMioL8JxIgBFAVFURRRUZEkGclRJWc9QHLmSPf310evc8vmnU13Xz3PsLMz3dU97yx7tTXVVXGpSoRCAiRAAiRAAiRAAiRAAlFOIEuUz4/TIwESIAESIAESIAESIAFNgIYrPwgkQAIkQAIkQAIkQAIxQYCGa0zcJk6SBEiABEiABEiABEiAhis/AyRAAiRAAiRAAiRAAjFBgIZrTNwmTpIESIAESIAESIAESICGKz8DJEACJEACJEACJEACMUEgm7dZfvfdd3L//ffL3r17vTXleRIgARIgARIgARIgARJwS6BXr17y6aefuj3v7YRXj+uaNWtotHqjyPMkQAIkQAIkQAIkQAJeCYwePdprG08NvHpcrZ0HDRpkfct9EiABEiABEiABEiABEvBKYNasWYItWPHLcB04cGCw47E/CZAACZAACZAACZBAJiNgh9EKZF5DBTIZV14uCZAACZAACZAACZBAlBKg4RqlN4bTIgESIAESIAESIAESSE+Ahmt6HnxHAiRAAiRAAiRAAiQQpQRouEbpjeG0SIAESIAESIAESIAE0hOg4ZqeB9+RAAmQAAmQAAmQAAlEKQEarlF6YzgtEiABEiABEiABEiCB9ARouKbnwXckQAIkQAIkQAIkQAJRSoCGa5TeGE6LBEiABEiABEiABEggPQEarul58B0JkAAJkAAJkAAJkECUEqDhGqU3htMiARIgARIgARIgARJIT4CGa3oefEcCJEACJEACJEACJBClBGi4RumN4bRIgARIgARIgARIgATSE4gKw3Xz5s1y5MiR9DML07tRo0bJq6++Klu3br1oxNTUVDl48OBFxyNxIJrm4nz9+/btcz7E9yRAAiRAAiRAAiRgO4GIG64wyDp16iQjR460/eJ8UThkyBDp37+/bNy48aLm11xzjRQqVEhGjx590blQHDh//rxbteGei9uJOJ0YMGCAVKhQweko35IACZAACZAACZCA/QQibrjOmzdPNmzYIB999JH9VxekxvXr12sNW7ZsCVKT5+5nzpyR559/XkqWLCnuvJfhmovnmf539ty5c/LYY4/J//73v/8Oco8ESIAESIAESIAEQkggWwh1+6TaGKwwXhcvXixNmjTxqV84Gv3888/y+++/S5cuXUI63IkTJ+TFF1/0OEa45uJxEhdOrlu3Tu6++26ZP3++L83ZhgRIgARIgARIgARsIRBRwxXexc8//9xxIWPHjnVpuB49elTwGD1//vxy+PBh7aEtWrSolCtXTrJkSe80Pn36tJw8eVJy5swp2bNn17Gr6FOlShWJj493jOVpB97EY8eOScGCBXUYQ65cuVw2xzgIMYiLi5Nq1arp8ZwbIkYWbTBPtMmXL1+6JmfPntXXZA4eOnRI64FOXK+vc9m/f79s2rRJSpcuLaVKlTLqHK8wjhGWkTdvXn1sz549sm3bNv2YHyx9lQULFkjLli19bc52JEACJEACJEACJGAbgfRWn21qfVP0xRdf6IaNGzfWr4glPXXqVLrOMLZgwBUoUEAQ5wljsmnTptrgypo1q7z//vvaIDOd3njjDd22YcOGUqNGDalcubI0atRI62jTpo3LRVimr3mdNm2a1oExsU2ePNmc0q9//PGHtGrVSvLkySN169aVOnXqSI4cOeTmm2+W3bt36za//vqrYA6IkcV8cY0wnG+88UaBkWlkypQp2gA37zFfjJmQkKAXrHmby+rVq/UcihQpIs2aNdOGK3hNmDDBqNSv3bt310bzmjVr9LxKlCih2xcrVkzP+/jx4+nau3sDgxfywgsvyFdffeWuGY+TAAmQAAmQAAmQgO0EIma4wiCF0QkZOnSo9ojCs+psJFqveOLEidK2bVvp2bOnNtBw7qGHHtLxodZ22P/rr78E3stbb71V98GxWbNmaSPPGF845kpgHLvzKmJ+tWvX1o/JYfRhLgglgFE6adIkgccXsnTpUlm+fLk+fskllwjaQmDs9e3bV+/jn8TEROnWrZvjPXRdf/31eoPH2NNcEFoBwxnGKzzKDzzwgGAscLz22mvl448/dug1OzCyMS+MY+b05ZdfyltvvWWaeHytVauWIFQAi7JguFNIgARIgARIgARIIGwElAHpUQYNGpSqJqM3jw39PDl79mytUxlPqepxeapanKTfKyMunSYVIuAYX3kRHeeUgZj66KOPOs7t2LFDn3vllVf0MeV9TFWP+x3tlXGXirFwLX369HEcVwafPqY8n45jZqdBgwb63I8//qgPKYPQoeO6665Lp//AgQOpypA0XVNViEAq5qIe0etjagFW6rBhw7Q+zGHXrl2OtsrAdhzfu3ev47h1x3kuKoQgFdcIXcoITVWeakdzc8+UMZ2qwiT0cWXw67bQo7zC+hjmZBiqzACO/r7u/PDDD1onxqGQAAmQAAmQAAmQgDsCrVu3dtg67tr4cjxiHlfjDbzjjjsEj/y7du2qbDARZQyJO48ovItG4I3EinbjNcQjdasgntTEc+I4vKRYuQ/5+uuv9au//3z//ffy77//ai/qhx9+mE4/PKMm5AF68bj/6aeflty5cwviWP/55x+56qqrHEMivjQYgUd54cKFWgU81ojpNfL444/rOcLzOnfuXHNYvz7yyCNSvHhxvZ8tWza57bbb9D4yJyCelkICJEACJEACJEAC0UogIoardVHWDTfcoNkoT6Dj8f8333zjEy8Yhc2bN9dtXRUQcFZi2sL49DWm06oDxiLkyiuv1LGr1nOu9hctWiQdOnTQi62SkpL04izTDnMIRlC0AYIQhapVq6ZTBYO9Zs2a+pg3A9m6MAsL0igkQAIZl4DyZggWavq6mdAnu4ngRzXSAAYjWA8RyPd4MGPGWl84TFBgx4Tlxdr8OV8ScEUgIoarWZSFuMz69evreWEVPRYuQUyKLP3Gyz9YlARRj969tBQdL2oaBWI4GmOxTJkyRo3b13HjxunFT1h8BUEs7BNPPKENTbed/DihQgp0a+R+dSUm/tQsFnPVBsewqIxCAiSQOQisXbtWPynCj1tftptuusl2MEuWLNGLZfFULFDjFYY3ssogS8uff/5p+xwzisLt27frAjuDBw/OKJfE6yABCXs6LPziN7/+kLvVeAZxL3bu3KlvCRYbrVq1Si888naP/v77b93EF2PSGoKAVfX+ihnDm5EMb4L5wke4wMCBA8Wk1JoxY4ZeHOVubPDxRczjfixAcxZ4SX777Td9GOmxKCRAAiQAAnAQ+CPO6Qb96euuLbyAEIQnwXBF2Je/ghLhxvmA72Pr3xF/dbE9CZBAbBEIu8d1zpw5Og8rMKFUKB73mM362Np4ZT3hRMiB8WhWqlTJU1N97pdfftGvMOYQZgBBnCdELa7Sr57+gYcAglRXrgxG0xceBSMoLGCMVnPM+dX6x8RV6Vnn9nhvyqziy9u5sheyBhipWLGi2eUrCZBAJidQvXp1HcuOeHazqUWjmgoyqZhj5jXQ9QCeMHfu3FmQIQaZV8yTIU/tXZ2D4wHFYZDJ5fLLL3fVhMdIgAQyKIGwG65mURZyjuLRu/P28ssva9RY/OT8GAmPPYwgsf+9996r3yLOE+mdrIJf9db+qDxlqlNhgZIR43lFDlk8fvIkV199tWPRE8a2Gq/wFr/22mu6OxZqGTFeZLxHblVjVFoNZcwfGwRGsfG6ouiCO0GOWjCE9O/f33GtWAj29ttv6+MwbpEei0ICJEAChgC8qNbN/HC2HjP75hxiSVHIBQYtBE4DLA61fr/heHJysqBICfJF47vIWfAdC13IqQ0j2gi+66Df9MF3N568YV2B9Xsc7VNSUnRb9Mf3G+ZqBE+74AiB4HsURVnwnespfh9zWrFihSCMAoJrw98IFJjxVfB9Dh64fgj2sWjWiJkzXiEYE+kMUcrbXDOO4+8ajsOodzdn9DWscK14OolwCV/ikdEG1wou5l5iXAoJxBQB9Z/bo5jUSuqiPLbz5aTyDjpSIahwAZdd1H9AR5uffvop1ZoOC3NQXoFU9Qs7VRl6jnafffaZQ5dJh4W2SH/VsWPHVGXkOdoi/ZX64nS0HzJkiOMcdFrTUTmnoEInFbuarj3mor48HceUFyBVfTmlmjRb0HnLLbeka4O5IT2V+kJ2zEMZwg4dSE2l8q2m3nXXXY7zruaCsaALG/qo/K+OcXHMpPGCEpMOa8yYMQ6d2MH1Gh1Iy+WPMB2WP7TYlgSik4Aq2qK/A/A95k7M99n48eMd3yX43sD3HwQ6TLpB832CVxVbme57TmVecXzf4DvOiCofrY8jLaLKKe1oAx34DrWmK1Q/1NOdVwVdtBrztwJ/I1Q2lXR/I6BHORbMcPoV39MqjCudLlyDudannnoqXXtXb5BaEKkRrdeM6zJ/n/A3DPLwww/rNph7v3790rVXxnaqyg+ear7jrbrU2ohUZUinG9p8l+PVjGP6qFze6dI0Gq5oh+sx7fCKvxnLli1Lp5tvSCCUBOxKh+XVGrXTcH3vvfcc/3GseUydQZkvDlU8IJ3hqh7xO/rjPx7aTZ8+PV13Y7jiP6r1PzX2YRziS8Iq6heo40sFOpWn1nHafJGox1GOY9iBwWjOWb8IHnzwwVSV3UC3Vb/etfFpzpsvDhXX6/iCVxW4dFv8gzywKl1WuuszfxRw3oznPBfkpzX5XM1Y+OLEF5ZV2rdvr3VbjXycx5ei6afixqxdvO5jLuiLL3sKCZBAbBLwx3A13xWqimEqvo9VikB90eYPEr4LzL5pq56yOcDgO9h8v7syXE0fOBuMgYZj+P40P6xHjBiRajWAnQ1XowNzQV5w8x6vKlTNMRe1WNZxDvpg8Jq2+L5V3kxHW1c7+NthnQe+v2EMGh2q0IyjmzFczTl8H6MvjFiIehqo++E68QMCczdt4fiwipULOOHvpPn7gD64DhjxEGO4Gl1oj78r5j3GsTpyrONwnwTsJmD9bghGd1gN10Aman5F4z+aeryhDU8Yhe7+sxnDtVevXqnqUUiqCi/Qm/mP7G4O+JJSYQu6GIK7Ns7HMQf1KCtVLfpyfFFY22BMjK8ep6U7D0+rimVN54kw/fALHsattXiCOefpVT0+SlUVrfzu50knz5EACWR8Av4aritXrrwICoqvqAp8ju85FD7BEyB8b8NQtQq8kDjuznBVWWUczVX8vsPIwtMuI/ihb4wvV4YrnlbBsIRgLsYovf/++/Ux9DH9cf1GjLcXBqR6hG8Ou3w1P9yhR61r0G3w3Q6jHsfgiTViNVytTxvN3yV/CtYYwxVGr+mPceANN9f07bff6qGthqv1B4QKL3C0RT8KCYSDgF2G63/BQeoTHwuC9CdYJOVLUD9in8qWLas3E6vl7hqRwF/9WtbFENy1cT6OOSDOSv1qdblaF2NifCwGs46PBWFYTGYWhln1IltA5cqV0xU3sJ53t4/FZsjnai264K4tj5MACZBAIARU6JXLbC/Ke6jTGeJ7DrGhWDSKstMQZI/xNZ4S36V33323Y2rly5cXrC2AWNc4OBq42UHcv8lWkD9/fkeqRfVETPdA/KyR22+/3ezqEuF4g1hZ9RjdcdzVDmJFIcoAl0aNGul9fKejqA4E+cid43Nx3JzHvvm7EEjBmnr16jn6Q5cylEUZzdjVhXz0zoV/wFUZ845DmLNZI+HrgmBHZ+6QQIQJxJzhGmFeHJ4ESIAEMi0B5UV1ee3K86crEiItFX7Q48e3ta11IatLBR4OlipVSp/FwqVABYYbxFX+a+tCWSx6MmJdNGWOWV/NvJArG9dvxDpPY5iaczCS3WWZsaNgDRarQXwxRk1BHus1m3nylQSimQAN12i+O5wbCZAACUQRgcTExItmox4xykMPPaSLrCALAIzE3r17yz333HNR20AOGM9pIH1NH+dCK3g6ZYzZd955R6/yx4p7FT9runjNDWuMRHiXVZiENl5hBH/66adaB847P1Uzxq5jkAs7dhWsKVy4sNZo8ps7j2N9b83GYD3OfRKIdgJpSUyjeJb4xYrUIvhyNAUAPE33tttuExVHIdacsJ7a8xwJkAAJkEDgBObNmydIXwhByr/u3bvrFFXwsvpTBTHwGQTWE2kDUa3x3XffFaRDRLgVjFDI8OHDJSEhwaNieJX79Omj+6sFUrrQjDWn9uuvv+6xvzmJsIRAC9YYHebVpONCVTEKCWRUAjHhcW3atKmOx3H+1ezqpsAjgNgdJt53RYfHSIAESMBeAqZKn1pVLz169EiXV9XekezVhthZtYhXK4XxCKMVsZ+zZ8/WHmRfRhswYIColfq6qTFaVSYDXRSmSZMmvqgQfwvWuFMK545KgahPmzm5a8vjJBDLBKLe4xrLcDl3EiABEsjoBMzjaSTwR6wnHkHj9dVXX3VcOuI+TTvHwQjvIJTh888/14/2sbgMgkf7KguCTzPDIjQ4SVAoABUc8UQQ143FYL48HTSDOBeswYI0iLuCNaafWWiG94jHBW+VRUCfvvPOO00zvpJAhiNAwzXD3VJeEAmQAAmEj0CHDh30YPA4IstKixYtdPUsZBMwghhQVVzAvI2K123btul5qFRVOouAmRRiX1Xifxk0aJBYjUpz3rwiJtZ4WZFBAV5bI/Dc3nzzzaJyxaZb+W/OW1+xoA3GMnjVrVtX4LFFBgVVRMHRDPxUPtd0MbOY33fffSclS5bU1bBMmAOyB5iMAQ4F3CGBDEQgJkIFMhBvXgoJkAAJRBUBXxbpmEVGrtoitR8eUavcp9r4Qrwo4lvHjh0rKmervlZrvKcrHeaYq3AwszLftIFC6745b4XqfMy8z5o1q6PZSy+9JCoZv36PlIVI9QXPKwxALNh65plnHG1d7SAGduTIkaKS/+vTMD5RehyvSLelKlXpTAs4acY1r1Z9uGZVhVCHKcD4BTeVK1f3hzELQ3ratGm6PKy1H45jHHh7MWe8V3lo08UVG06uuJq5GDZW3dwngWgmEIeks54mqEr26V+eaOOlqSc1PEcCJEACJJCBCeBxNR5fIySgUKFCjivFo3RVoU+SkpIcx6J5R1V41FkRYICuX78+oKki1nfixIlaDxZ/+SL4+6qK1WijHNkHjEEJrvAOgx9+QLRr104bsqp8t/YMwzsLz3CRIkV8GYZtSCBiBNq0aSOzZs3S4wdjTzJUIGK3kAOTAAmQQMYhAKMKK+2dBUVjsEWbTJ48WXuEUawAHlcI4lbh3YRYjW99wMU/L7/8si50oMp9O4q/IK+riTVFYQFfBYYqCtY4C7jCq+1K4El1xdxVWx4jgYxCgIZrRrmTvA4SIAESIAGfCKgS2TqjAB6xqxKs+jE9qidaV/gjXZYnQeWsZ599Vjd58sknRZWV1QUOTGwvjOFHH33UkwqeIwESCIAAY1wDgMYuJEACJEACsUsA1b2+/vprad++vb4IxIoaoxVxrvC6ektnVatWLfnss88c6bDgZTVGK8qvIk2Yp8VdsUuPMyeByBKgxzWy/Dk6CZAACZBABAigUA02xJBu3rxZUlJSdMiALyECmC4qeqHwADZ4cDdt2iTw2sLTmjdv3pBdERaOYfGbu/CBkA1MxSQQJQRouEbJjeA0SIAESIAEwk8AMaQoARuMwIOLFFjhkGrVqoVjGI5BAlFLgKECUXtrODESIAESIAESIAESIAErARquVhrcJwESIAESIAESIAESiFoCNFyj9tZwYiRAAiRAAiRAAiRAAlYCNFytNLhPAiRAAiRAAiRAAiQQtQRouEbtreHESIAESIAESIAESIAErARouFppcJ8ESIAESIAESIAESCBqCdBwjdpbw4mRAAmQAAmQAAmQAAlYCdBwtdLgPgmQAAmQAAmQAAmQQNQSoOEatbeGEyMBEiABEiABEiABErASoOFqpcF9EiABEiABEiABEiCBqCVAwzVqbw0nRgIkQAIkQAIkQAIkYCVAw9VKg/skQAIkQAIkQAIkQAJRS4CGa9TeGk6MBEiABEiABEiABEjASoCGq5UG90mABEiABEiABEiABKKWAA3XqL01nBgJkAAJkAAJkAAJkICVQDbrG+6TAAmQAAnEDoFZd9whR7dt82nCVW6/Xar26uW27YK+fWX/qlVuz5sTdukpXLeutHjrLaP2otf1o0fLhjFjLjrufMAuPdDbZeZMZ/WO9/tXrpQF/fo53nvasUtPizfflML16rkdalKbNm7PWU+ES4+vn6Gkbt2ktvq8UUggEAI0XAOhxj4kQAIkEGECMKRg3PkqJS+7zGPTfUrfrtmzPbbBSbv0pKamehwLBnnyrFke2+CkXXq8DZRy6JBP87FTD8b0JL7wQf9w6fH1M4R503D1dGd5zhMBGq6e6PAcCZAACUQpAXji7lPGH7xcMBi8SXxSkscmRTx49qwd7dLjbTyM481Ixrzs0mO9Rlf7OQsU8Gk+rvpaj/mjB209iS980D9cerzdC8wF99WT5x9tKCTgiUCc+rXq8Wfv4MGDZdCgQVqHl6aexuE5EiABEiABEiABEiCBTEqgjQptmXXhKUow9iQXZ2XSDxAvmwRIgARIgARIgARijQAN11i7Y5wvCZAACZAACZAACWRSAoxxzaQ3npdNAiSQuQlMmzZN1q9fL6sXLpS/Vq+WzTt3yqHjx6VCyZJStVIlqdWkiVSqXl1q164t9evXz9ywePUkQAJRQ4CGa9TcCk6EBEiABMJDYOBzz8kLL70khbNlk2Jnz0pxNWxbtSWobfeOHXob99tv8m+OHHIuSxZ59vnnpX///uGZHEchARIgAQ8EaLh6gMNTJEACJJCRCGzevFm6XnmlrFeppu5QF9ZUGa3OUst64PRpGa/eP/PMMzLpu+9kwZIl1rPcJwESIIGwE2CMa9iRc0ASIAESCJ7AaZXjE3lXsflShAAZYiqpEIDzymh9RQ3f1McpXK/a3aW2JUuXSlxcnKxYscLHnmxGAiRAAvYToMfVfqbUSAIkQAIhJ4DcraZyUsOBA6XRoEFux1y0aJFOa9hZtcDmrzRWHbC9praWzZvLiVOn/FXhtn3yUXUd632rSHVfI/eVrSKhp0vVNyUxvp7baxuxtI3bc9YTdulpmHi7NErsZVWdbn/Sur6SfMx7dTRvetIp9ePNUvUZXaZ+QEGQg5hCAoEQoOEaCDX2IQESIIEYItCjc2cpqeYbiNFqvcwn1JsHU1JkwJNPyotDhlhPBbx/6swh2XJgVsD9TcdI6MGYnsTX67JLT4WCl3majsC433Jwtsc2OOlNj1cFbEACISTAUIEQwqVqEiABEog0gfvuuUd279sn99swEfzB6KO2/w0dKhMnTrRBI1WQAAmQgH8E6HH1jxdbkwAJkEDMEEDKq5Effyw91YyROcAOqamUXKm2Rx95RDp06CC5c+cOSm2FQq3ltXbBPzaONj2AYsd12annvsazoC4gMWEGifnqSpdqbwWkg51IwA4C9LjaQZE6SIAESCAKCaxZs0aKZM8urW2eWxOlb5vK+7px40abNVNdtBLQYQYqpAOvFBKIJAEarpGkz7FJgARIIIQEls+fL8XOnLF9hESlMZvKMLBp0ybbdVMhCZAACXgiQMPVEx2eIwESIIEYJrBY5V0tEYL5I8asmFoVvv7PP4PSPmJJa3lqapzeglLEziRAApmGAA3XTHOreaEkQAKZicCRI0dko3qcHwrDFRwRMzt3xozMhJTXSgIkEAUEaLhGwU3gFEiABEjAbgLLly/XKkNluJZS2hcqjy5Lwdp956iPBEjAEwEarp7o8BwJkAAJxCiBRo0a6ZnvCtH8/1Z669SuLSNHjpT69evLDHpfQ0SaakmABKwEaLhaaXCfBEiABGKEQJF69aTLzJl6q9qr10Wzzpcvn9SsUEGSLzpjz4HdanFWl+uukz9VnGuNGjXkyiuvlCeeQIkCCgm4JoDPqfnMum7BoyTgnQANV++M2IIESIAEoo5AjgIFJLF1a73FJyW5nF/zVq0kFB7Xk2q03WpxVtWqVaVEiRLyxRdfyOeffy6fffaZ1FZe2ClTpricDw9mbgL4nJrPbOYmwasPhgAN12DosS8JkAAJRDGBhs2byx6Vx9VuMV7cKlWqOFTfcsst2vvaoEEDXZigX79+cv78ecd57pAACZCAHQRouNpBkTpIgARIIAoJNG3aVM5kzSrjbZ7bDBUmcIXy9loNVwxRpEgRGTNmjIwbN07Gjx8vNWvWlJ9//tnm0akuEgQS4+tJhYKXCV4pJBBJAiz5Gkn6HJsESIAEQkgAi6aee/55eeaZZ6S8GqexDWPNUTqWqzCBVW+/7VZbz549tdf1sccek86dO0vv3r1l2LBhkiNHDrd9gj2xU6X+2rBhg6xfv05W/7VENm7YJEWLFZU6tepLjWp1pFKlSlKxYsWgS9QGO89Y7c8yr7F65zLevOlxzXj3lFdEAiRAAg4CSFfVVC3kGuM4EvjOXtX1S7UhlrVOnToeFRVQMbijRo2Sb775Rn788Ue9gOuHH37w2CeQk8eOHZN7779LypYtqxeI9R/UW6b8PkaOJsyXP//9XoaPHig33NhDx95WrlJeRowYEcgw7EMCJBAlBOJSlXiay+DBg2XQoEG6iZemntTwHAmQAAmQQAQJxKnH+/C6Yt1/IB6Lrarfa2prptJs/a7yt/ojMC7hfUXqrPvvv197X/PkyaPr3p86c0irGjt8jpQqVUruuusun1UvWLBArr7uKslZ8Kg0vu6cFK8sEl/k4u7nz4ns2y6y6heRFZNEOnS+QiZPmn5xQx4hARIIGYE2bdrIrFmztP5g7MlAvr9CdlFUTAIkQAIkEBoCKEiwO2dOeVCp/9PPIeBlhdFaMSnJb6MVQyE1FzydEydOlKlTp2rv64QJE3S8ZIVCrWX/5rwycOBAue+++2T16tXo4lUe6v2AtGzZUmp0OiQ3vnFOKjV3bbRCUZasqkRtBZG2vUU6PS4ye85vKmwhuyxatMjrOGxAAiQQXQRouEbX/eBsSIAESMAnAsnKczFCeVGxLb3wVMxTR8S7njh1Sp566CF5VzX8Vm071eZu3f8JdW6z2t5U2xy19enTRzZthd81cOnevbvOPHDVVVfJtddeK/fcc4+gNC10Z8uWTbJkySJ33HGH1wH69HlI3n/vQ7l7lEiT67w2T9egVlt1Ld+oVF6tz0qzZs3SneOb0BLA59R8ZkM7ErVnZAJcnJWR7y6vjQRIgAScCLw6fLg0veIK6acWTE1PTpYcyvAtriLGElW7vGrbp7bdavtXbZDmKjTglxdekI4dO6YdCPLfXLlyyXvvvaf1IXygTJkycvToUTGPDtesWSOPPPKIvO1m8dfkyZNl+PD35Ra1NqxQ6cAnA8/raWWdX39LJxk/VsUQUEiABGKCAA3XmLhNnCQJkAAJ2Efg6quv1guZli1bJtgWzZ0ry1eskBW7dkkz5Zltrx7B12/cWC9oqlWrln0DWzQh2wCKFzRW41jlzJkz8s477+jjyA3rLPc+eLvU6yySWM35jP/vu/QX+fCWabp4wq233uq/AvYgARIIOwEarmFHzgFJgARIIPIE4uPjpXXr1npTK6f0hGBI/qYWPGVVuV/DIX379pXsqkACjFVnQRgBjFpU5zLy8MMPS46CR6RdH3MkuNesqjZDh0fPSp9HHpC6det6zZQQ3Gix3Xtp8mg5dHK7FMhdThol9orti+HsY5oAY1xj+vZx8iRAAiRgD4F58+bJnj171GP44fYo9KJl6NChMn/+fJdGK7qi6tadd96ZTsv3k8ZL81tT0h0L9k3FpiKla2YRXD/FPYFl/4yWaZsHCV4pJBBJAjRcI0mfY5MACZBAlBBAmhrEn8KgDLUgjnX27NmSNy+iakVyqmwHSNdlldOnT8vixYvlySef1Id37NghO7ftkcLlrK3s2c9T/KjMXzzDHmXUQgIkEFICNFxDipfKSYAESCA2CEyZMkVOqawD//zzjy4cEMpZ165dW3766ScZNrOR3K4cvK3uTNFZBlAyFoLwAWQZOHv2rDakUcQAxm7h0tkkVz77Z1aojKoGtmKZ/YqpkQRIwHYCNFxtR0qFJEACJBBbBFJSUvRjezPrcHhdzVgoGtCwu8j48eNl7969snbtWvnggw8Ei6VQDQuCogTTZ0yVgqXPmm62vhZNEtnw5w45d+6crXqpjARIwH4CNFztZ0qNJEACJBBTBBAmYNJRYeLr16+Xr7/+OiLXUK1aNW2ofvLJJ7J9+3ZBiAAqbqF4ATyjoZCi5VVM7blUWaEyK1BIgASimwAN1+i+P5wdCZAACYScwMyZM3V8q3Wg119/3fo2YvvI89qzZ0/p0KGdnEirDmv7XI4dSFOZmIhsthQSIIFoJsB0WNF8dzg3EiABEnBDoEi9etJFGZyQeFWKNRj59ddfdXyrVcfSpUvl559/FlS5igZp2eIymTRztJqK/Y/z/90sUqJ0IaHhGto7ndi6dWgHoPZMQYCGa6a4zbxIEiCBjEYgR4ECYochcODAAVm1atVFeFB+9Y033ogawxV5VvdtxyN9kSw2p5mF3nr1Q1No4SKwmfgAPq92fGYzMUJeuiLAUAF+DEiABEggExNAmIArQR7V3377TebMmePqdNiP1alTR/LkzaWNV7sHP/xPDrm0eQe71VIfCZBACAjQcA0BVKokARIggVghgIVZyKPqLPC44vhbb73lfCpi71s0bykrJtk7/N6tIluXxEnDhg3tVZzBtCXG15MKBS8TvFJIIJIEGCoQSfocmwRIgAQiTGDy5MmCdFgoAJCQkCAnT56UBg0a6FKrKAFbvHhxWb58uT4W4anK88+9IK0umSGFSp+TxtfYM5u5o0V6P/yAtGvXzh6FGVRLl2rR8wMmgyLmZflIgIarj6DYjARIgAQyGgGkmpowYYIYA/Xo0aOSP39+eemll6RNmzZRd7lNmzaV/v2fkhdfeFkadBPJGuRfsMXfiMQdLSWv/u/NqLtWTogESMA1gSD/27tWyqMkQAIkQAJpBI6kJEuubPklR9YQlHwKEjIS/Jsk/1AVHx8v+fLlkz179gSp2bfuDUv1kgqFWvvW+EKrFwa/JJN+/l6+6LtWbhue6ldfa+ONC0RmfSwqX+0b1sPcJwESiHICNFyj/AZxeiRAArFNYOP+aTL+j16SJS6r5FQGLIzYnFkvvJr3F17/O5fwX1trG9UvZ7b4kAKB93X37t0hHcMob5TYy+z69bpi6Z+SWLq4DGn/r1z/ikhSA9+7pxwTmfiCyA6VSAHXesMNN0i/fv2kU6dOjs1VzK/vI7AlCZBAKAnQcA0lXeomARLI9ASqFemkGZxPPScnzxzUWzBQ4lQyGBjA8X/kkPhH/tWqstxbSfI81EByacPWjVGc7lyCNqBdzSOchqur8X09lvz3HnntjYHy9GMvSIOuIk2uF8lf1HPvVb+keVkLFCgoCxdOFoQebNiwQX755Re99ejRQ7Jly6YNWOSvhTFbunRpz0p51mcC60ePlg1jxuj2Jgexz53ZkAQuEKDhyo8CCZAACYSQQN4cRfXj8C0HZtkySqqcl1NnD0ncafVo/4LG/Sc2yabdm/zWrz282qN7wcOrjNuUXNtk0fo98tP60+m9vukM3/TGMYzpSMhTjw6WS5q1lwf73iGf3L1ZCqpFW4WUnVmknEixiiInj4hOn4U8rQd2ZJVDu8/JHXffLJ98NNYx3SpVqgi2vn37yqFDhxxG7NNPPy333XefNG/e3GHI1q9f39GPO/4TOLptmySrLBYUEgiGAA3XYOixLwmQAAn4QABeV7sMVx+G87nJqbNHlBF8RA7L344+5/KIbFdv5273fcESwhc8eXvdhkZoYzhBCuZWlmaA0qJFC5n32zL59ttv5ffFc2TRkgWycNwmOXM6rcJW6aSi0rhpQ2lx3RWCXLCesgcUUEUdbrrpJr1hOtOmTdOG7Oeffy4DBgyQSpUqOcIJ2rdvH+CM2Y0ESCAYAjRcg6HHviRAAiTgAwEYrr9seNKHlpFvkregyN9/+jePlLNHBZuk/ONzx+L5aso1NUZKifg6Pvdx1xALynr16qU302bZsmX6MT/SeQUqbdu2FWxvvvmmri6GkAKUwX3nnXd09gWEEpiQgkKFCgU6TEz0W5o8Wg6d3C4F1I+MQGOTY+JCOcmoJxCZ5ztRj4UTJAESIAH7CMBIK5a3un0KQ6gpjzJcTxwM4QBKdcPE2+XhZsukXIEWIRsIBQWCMVqdJ4aSs/3795d58+bJP//8I8OGDdP5b++++24pXLiwXHnllbpE7vr16527Zoj3y/4ZLdM2DxK8UkggkgRouEaSPscmARLINASqFe0UE9cKj+vxEBquXau9I9fXGi3ZsuSMCR6uJpmYmCgwWL/77jtB7tvvv/9eKlasqD2z1apV0yEJMHLnzp3rqjuPkQAJBEGAhmsQ8NiVBEiABHwlYLIL+No+Uu1guKaq9KjHD9g7gxL5asmDTeZLy7J9HIonresrI5a20ZvjYIztZM+eXbp16yYjRoyQnTt3yoIFC/T7KVOmyKWXXiolS5aUu+66Sxu5p06d8vnqNm7cKL/++qvP7dmQBDILARqumeVO8zpJgAQiSqBiocslT/bCEZ2DL4PnvRCqaafXFaEBfZotvSg0IPnoSr1oLRoXrvnCylUbZCF48cUXdZlcGJ/wvCK04JprrtHFHbp27SoffvihNnJd9TfHEE/bsWNHXcXMHOMrCZCARCiHCcmTAAmQQCYkEAteV3hcIXYZrt2qvRvzoQFpRPz/F1kIHn74Ye05PXz4sIwdO1YSEhLk2Wef1RXLmjVrpo1cLCRzFoQfQJ577jnp3r277Nu3z7kJ35NApiRAj2umvO28aBIggUgQiIU416wq10zu/MEbriXy1VahAQukRdnekUAddWPmz59fevbsKUittX//fpk+fbq0bNlSvvzyS2nUqJGOke3Tp482cpFPdpYl3ynCDmrXri14pZBAZifAdFiZ/RPA6ycBEggbgaoXqmjZMeAZleXp4C1pmk7VtUPjfzqCXaCFdEk9aoyQrFly/KeUe+kIXHHFFYIN2QnWrFmj02whPGD48OGSK1cuiYuLU7HGKthYCWJjUYa3Q4cOOnTgmWeeSafL1RssGoMnFxty22bNmlUuadla6tWrpxePIYVYuCWxdetwD8nxMiABGq4Z8KbykkiABKKTACpVVSncTjbsnxr0BM/CcL01aDUuFQRjuCI0gF5Wl1jdHoQ3FRuqdcFAvfrqq2XJkiVy7lxaEQVrR4QZ4NzHH3+s03BZz2F/6tSp0n/Ao7J6xTo5eyatkliiysSWJavI9CHf6Epi2XNklTr1qstTjz8v1113nbOKkL2H4UrjNWR4M41ihgqE8FZv375d/0LetWuXHmX+/PmCwPyzZ886Rv3mm2/0l5DjwIWdzZs3y8iRI50PB/z+5ptv1jkNjx8/7rMOrGhFHsRPP/00XZ///e9/UrVqVe0hwAmURUSOw8WLF6drxzckQAIXE6gaA2mxtOHqZ1YBFBJ4qOnvNFovvuV+HSlRooT88ccfLo1WowjfzTB0YaRa5bn/PSKo6HWu8J/S8bFzct9nInePEun0uEiHfiJ3qj8pD4xV7584JykJf8j1118vN9zSxaqC+yQQ9QRouIboFh07dkxatWolZ86c0cH4SEqN9+XLl9ePbMywr776qrz11lvmreN13Lhx8vjjj8v58+cdx4LZOXjwoPz777+OR0++6Dp9+rTu42zswpDdsGGDjBo1Sj/CgoG9evVqXXLRF71sQwKZmUBMLNBSmQX8WZzVqNQdqqDAUimb0Cwz31pbrh2xr/j74Unw3Yw4WRip+BuyZ88eKVephIz67B25aZhI+74i1duIJLgoGhZfVKTqpWnGLAzaKdN/ksLF8sumTZs8DclzJBA1BBgqEKJbgdWjf//9t/5SyZMnj37FUA888ICOXcI+jNrly5fr7aOPPhK0MzJx4kSd2Bqe18qVK5vDUfGKVbJ9+/aVokWL6lgsvEcJxCJFikTF/DgJEohmAkXyVJbE+HqCVFDRKv6ECnSvPlyal3koWi8l5uaFOFcjuXPn1im0EI+KxV0FChSQggUL6le8j4+P19W74KW9/D6RRj1MT99ea7UVwbbwq6P67wziYt3Fvt7XeJZvStmKBEJMgIZriAAvXbpUa3777bcFj+nNY3QkqUbda4i1NOCMGTOkS5e0RzbI+QeDFrJy5cqLDFeEICCYHwH8eFzkqqxhSkqKrFixQqdQueyyy7QuV//AMMZjKXxZ1axZU/AF6E1Q3hCCsAeIeY+FAxQSIAHvBLBIK9YN15IqNAALsOhl9X6//Wnx6KOPysCBA7WhigVa3qRi1dJS60r/jVar3mY9RbKpdXSXd2osi+estZ7iPglEHQGGCoTolpQrV05rhpGJlaEIEYDgvREYjEYmTJhgdtNVS7Hm94MxCu9mUlKSNnLbtm2rDU08KjKrT6EERi3yByIRNoxh/DKfPHmyQz928Mv6jjvu0O2QIxB1tlHh5fnnn0+nK10n9Wbv3r1y66236nlcddVVOkQAX7IwxuvUqePcnO9JgARcEIj2tFjwuJ46KnLujIvJq0N2hQY0LNVLrqw4UG+uR8p8R0uXLq3Dy3wxWh/ud78kJyermNXgOcFbu/vwOhn69uDglVEDCYSQAA3XEMFFfOqPP/4o8+bN06EBY8aMERinqJhiBN5UI6h5DcMU8sMPP5jDsmjRIsf+0KFD5d1339VxTfDgzp4921ET2/RBLCtyAyJMAQbl77//rtOrOJRc2OnXr5+MHj1aYHwiLOG9996TKlWq6GTYn3zyiXNzx3uEB0AnPMkQeHzxHqEDFBIgAe8E9hz7U7YfUumJojhVFAxXiKs41+7V35Pran4iWeKypzUK4l+kzWpbcZDeglCTKbviKd27b42QFrekpcyyAwLCDZ7sO0iXrbVDH3WQQCgIMFQgFFSVTsQmmUf/GAKP03v0SB+AZAxX5O3r3bu3/Pbbb7q29aRJk3Q6lBMnTuiE00iJgmD8AQMG6NnC4DSP9L/44gttPL7++uu6ugqMW3hTYZgOGjRIt0d1Fug0yauxkAoLq+AhxXHzyx6Lx5AdAIuvUFvbnUCfEfS1vjfH+UoCJPAfgc0HZsq6fb/IerXtOfbXfyeC2MuxWX2vXPgdfEzFKR5tF4Qyp66Osq8qs0D+YmknS8bXvRAa0NSpNd9GgsDX33wp5Rsq7/fV9o1evJJIy1tFRo56T1q0aGGf4gua1qu/XRuUEwfSZebMC0f5QgL+EaDh6h8vW1svWLBAezlvvPFGbbjC82myCCBNyapVq7SxiXrXJp8fjE1jtGIytWrV0gH6MEYhWJEKcTaSs2T5z7luDGaTL1B3sPxjdFkOcZcESMAPAilnj2hDVRureyfL8TP2l+vMqjLb5U77by+nbI7SyVNAJE59ZRiPa+NSd2qjNUsc/2T48TEIadNFS+dIYg37hyhUWi3W+mmu/YqVxqPbtkmypSJYSAah0gxPgN9CEbrFeJQPzyi8soUKFdKvX331lc40gCl17NjRMTMYmqVKldLvGzdu7DhuduDdRaorhBqsW7dOH0Z8qzsxcbbIVuCcCQCeVoQDUEiABPwjsO/EJu1RXbf3F1VgYIp/naOwtcksgNCA5mUejMIZZt4pIV3W6mWb5Lpr7GdQNElk0pqd+u8TshZQSCDaCNBwjdAdwQIqiFnQBK8rHtsjDABVUxISErQ3FW2QoQC1rCH79qX33GxTv2BhtBYrVkxy5sypF1jBIMaG1CmuxCwUg9H81FNPuWrCYwEQQDLwvHnz6hhj5+74UYIsEoh9RhurwEuOSjj4gZItWzZ937FwzupZt7b3Zx8hISgPiXroKC/pSfA5Qgw1vP2IXaZ4J7Dt0HyHsRrNWQK8X8nFLQoUyS01895Ko/ViNBE/Yp6KoSKW3VIkSSRHriy6VGzr1q0d6vH5PnX2sOTKlqDTuTlOcIcEwkzgv+fHYR44sw9nvnjwqB/SqVMnBxIYDhAsloIsXLhQkKUAv36xCMtqvCJAH3LJJZfo16ZN0+LPPvjgA/3e/GOt1mWM4DfeeEOwmMsqhw4dcrw1v7atmQ0cJ7lzEQHEEMNTbiqlWRt8++23OuYYcctGcB+vvfZaQXYIGJfjx4/XBib2ce+9JSE3ejy9njx50mURCVd9fv75Z0FVtPfff9/V6aCO4fOHBYjOVdiCUhqBzmfPp8gf/06UCX/eIy/NLiUfLG4lv215OapTWwWCqXGpu6R2hdZy6hB9G4HwC3UfLNotXTW35Pwv9betQxYtl00brlalk9b1lRFLWgteKSQQSQL8VooQfZOn1Riu8LCiZjRKwJowgRw5ckiDBg0EpWKxCAoLsFBeFXlZkRYLhs9zzz2nrwAZBCAwej7//HP9inRbCLBHrOy0adP0efxTrVo17Wl97bXXdJjC/fffr48hZRaMLhiq8PyZRVfwAiNnrLMx7FDIHQcBeLpvv/12nX4sa1ZVHNyNIGYZVW/wOYDXHZ7OevXqCQxbeEe3bt3qNhG4G5VBH4anHz9cTH7eoBVaFCA38TXXXCO9evXSadgsp6J+99CpHWkLq1QIwLp9k+V86tmon3MwE7y6+vvSrMwD8kuJOwVx8JToI4CUWYf+dZOrzIbpHj94XhITE23QRBUkYD8BGq72M/VJIx4Lw6NZtmxZR/sbbrhBZw+AEWukYcOG2rhBOb57771XG7CPPfaYwNiEwDMHL5Z5tAujdM6cOdpAgAGEDW3wRYe4WrNIC541ZDp48cUXHSm60AbFEmBUwXBF7Cy8gDCOkcaLhqu5K55f8SNh2LBh8uSTT7ptiB8DMFrxw2Tu3LmOqmnIuYsE5P6ILwUp4PFECjV8BvAZqVq1arrSw/iRg8wV+FGEe+8sKPuLH0AwZBBqgs+bczssLNyyZYsOiciePbs2xBHCAq+vCY2xFtfAjzb8OItG+fvIUkGsKrIA7Dj8X0q6aJyrXXNCNS8UFCiT0ESrRKiKtUiKXeNY9cB7l3xslT50XyOuMrey8bSP741jB8/KPpUWvEg5Ty39P3fyiCij+Kzgbw+FBKKRwMV/oaJxlhlwTq7+IMDTZY0pwmWPHDlSbwbBPffcI9jgGYVh6SqOFWEDqIhlQgqcF2BBF4yOJ554QsdcwpiAwYTNWeAFRoYC1MKm+EYAP0gQO4x72aRJmhHg3BMlciHweltL/Tq38/Qei/FwD+Gttcorr7yixzdpznAO3k6rXH755dozb7wq3bp100Yn2iD2GTmIjSB2F0UnEANrBF5iePZNjDYMVizsm+W0YviFF17QY5vUcDDqjfcfBrf1h5vRHYnXVEmV9dqjCq/qL3Lw5LZITCNiYyI04BpltMbF/feUABX5Qv3/HnGTWw7Ojth1x+rAFStWlKIlEyR57WHbDdd/VZq1QsXyOkLVYpUR551xCTDGNYruLbxUpnyqt2mhypUro9XaDwarK6PV2gbGDTytroxW0w6PvI2BY47x1T2Br7/+WnvTYSwePnz4oobwfpoY5zZt2lx03tcDvhSkMLpgqM5UeRMRh4t95Azu2bOno0raRx99JOPGjTPNHa9IxYaQBgiMUPSHgYr5I7QAoQ3wqCJOF0Yr8gejIAWMUxi7OF6hQgVBjC8EYTA4j81VqWLdKEz/HD29R5b+86mMXXWdDJiRTz5d0Vl+3/l+pjNar67+gVxb8+N0RituATyuDBUI04cxgGEaNKynDNcAOnrpsn+nSN36Nb204mkSiBwBelwjx54jZ1AC+IMPI7Bz587y4IMPCopEWAUebghCOJzjYGEcmvN4lN6nTx9rV8c+jEVfClKYDo888ojDmw/DFYv4EKIAAxbZBnDMeaEe+j777LNaxYgRI3SBC7xp166dHDlyRMdjIxcxnh7A4wqvLRb8GUEZYSNmQSCMVRM7bc6F83XX0dVpWQCUV3XrwbnhHNrlWJUKXSEo/zpn2xtyJCXtc+GyYQgOJsbXvxAacHGKPQyHzzFCRHCvPf2wDcHUqNIHAs0btZHXXp8nDbqek2IVfejgY5Nti3LLLZ3/WyzsYzc2I4GwEaDhGjbUHCgzEUApXRitWKEPr6NV8uXLp99aMziY8ygLbLyx8HS6M1xhKELwqB4GhhEYuwhVMDrMcesrwkQQbvLAAw/odp7SZKGsMARlgBGXa2Tt2jRXD+aBVF6Q7t27m9NR9bpx/3SHsbr3+PqIzg2phKoV6aSN1arqNU/2Qno++09s1t5efyZ3prjIwVvSepyq609PkSal7tZGaxyqDLgR87mC15WGqxtIETyMBbnf/vCZzB+7Ra5OW5sb9GyWqwihxCKVdbnwoJW5UBCflCQlVRw9hQSCIUDDNRh67EsCHgjgUT5ytCInKx6XG0E4CIxLxIwilMC6GA8LoHbs2KHTn5n2rl4PHFC1OJV4K0jhqi+OYdEUxJXxrE+of7BYy8S1mvbmHOKoscFwNiEG0bKY4+SZg2lZAJRXFVkA8D6SUiRPFW2owmCtXDj9jxgzr6pFVAiFClPwR87CcFXlOf2Vq2t8IM1Kpy3u9NTXhHIgzhVPByjRR+DjD77UTzC2LhUpn5bqO+BJHtmrKi++J7Jp03cB6/DWsWqvXoKNQgLBEKDhGgw99iUBDwSw6ApGHQw64yE1zVu1aqVTZg0fPtzxON6c8+UVMc4QswDP9HEuSGGOO7+aXLOmGIXzebzHin+TjQJZJZyzCJg+ScqLApk3b54ju4U+4OKfU6dOuTga/CF4UrGoCtsm5WGNtCQVbJXmWVXGasl47+5QhAtkz5pbzpw7GbKpl8rfQHtZS+f3zcJBDD0WgDLONWS3JGjFCPl5ZsAT8uYrw6TPhPNB6Rv/ZFYZOWaYYOEXhQSimYD750TRPGvOjQRihADS1iBzgLOYvLtINYbE/FbZv3+/9a3LfV8LUpjOSHFm5MyZM45MFaZghTnn/GoWj8HAtop10ZkpL4xYXqTEMgIj1RRRMI+aYdzaJVsPzpHJG5+WNxfUkdfnV5Of1j8aMaM1e5bcUqv4NWqR0yh57rJkeaDxXGlTvr9PRit4xEkWbejaxcZZT5PS90ifZkvFV6PV9Ee4AA1XQyM6X196YYhKY9dG3umRRfZs8n+OGxeIvNk1TurWbCb33PaI/wrYgwTCTIAe1zAD53CZjwDy7qK4A1bdG4HBiFRWCCdA9gE8dodnFgUMUKbVmyADhS8FKYwe5P3966+/BP1gYCIGFseqV69umrh8RWqt77//XmcLQPqrm266SRCmgBRcyB+MuSMU4r333tOFMi699FKdPg2puhDfe+edd8rgwYOlYMGCOs0WyhqjvDFiccuUKSO33Xaby3FdHTx97oQjVhU5Vo+prACRlIK5ywniVHXMqnr1FC/qyzyha82eCb409atNjxofStPS9/nVxzSm4WpIRPfr5J+my8+/fi+dO14tLVX4SJPrRLLn9DznlGMiv40QWTNVZMgbL8gT/Z7z2OG+xrM8nudJEggXARqu4SLNcTI8AZMhwBR5MBeM42PGjNHGmjmG1yFDhugYVXhdscIfGwTxpCg2geponsSXghRGHwxlU2UN8bUwJlFlzSpWb6k5XqpUKV1yGEYu5mcqviFHrQlXgDGMVFt9+/bVuV1R6Q2CMANsRjAmDPOvvvpKH0I6LW+G64GTW9OMVZ1jdbLql2rUReQVyflhqCIm1STqt2si0Gun+Bsa4GrscORydTUuj/lP4KoO3bV3/J4+PWXCE8skIemoFC4jUrS8+k5RT/9V5kPZt01k7/a0130b80pSmcqyceM3UqlSJf8HZA8SiBCBuFQlnsbGH5tBgwbpJl6aelLDcySQ6QkgrhQeLF9x8OoAAEAASURBVGthAAMFj9SxKMtbTl3T3vnVXUEKLL7CIisYwxgDj/iRk9fVHBCyAA9qL7V4At5UZ0FqJHhb0d8Y6c5tkKMW1bngYbUuOrO2Q7ovGM8mfMB6DvuoVIWKVfCqooJVJAVeVONRhUcUXtZQyodLLrUlTRdCA1BQAEEIwQgyT+zcuVN++umnYNS47asrZ6kiBBB69Nxi8vsEspPMXzRDfl88V/5atVmOHEqLnU4olEfqNqoiTRu1kuaNLte5mP1Wzg4kECABhJ6ZJ4/B2JP0uAZ4A9iNBPwlYDyUrvohRVaNGjVcnfLpmDvd1iIVGMOk4rIqhWGCSmsvvfSSPvzQQw9ZTzv28+bNK9g8CRZwmcVa7trBi2uV86ln07IAaK/qL3Lo1E7r6bDvx+csecFY7ShVsWhKxa+GS+DJDTa/LMq2Ni19ry1Txg8tk+7MFoVOSrpUe8vpCN/aQQA/QLEZufvuu+WWW25x5HI2x/lKArFIgIZrLN41zpkEbCRQs2ZN/QgfKuFhM8UCbBziIlVHUpK1RxVZAOBdPXs+5aI24TyAlf/as6oM1aQCrcI5dLqxMIdfN6YP4UjXwPImx2aRwh+mHTimsmzlv7ah9rKWyt/Q0iq43UiFCvhaBhb3LXe2Am4v0lc9BXOVU970JLd6UJr21NmLq+A5dwiXHpQkPnhKPfP3Isgb/NeifTpu3lVYjj96EuPreRnN++lkFee/60Ju6IYqDy2FBAIhQMM1EGrsQwIZiMDLL7+sH+tjcVgwXl9vSJKPrnAYq9sPqaXMEZbKhdtdMFY7CnKtRoPAEMNc9p3Y4HU6WY+L5F6d1qxwq4Zyq8oaYLdEanHWiCWtfbqU+xrNlAqF3Lf1Vc+VFQdK24qD3I6JkAZfjOBw6VmaPFqmbx7sdr7mRIWCl8mwB5N1KjuE8DiLP3rsCOWA4bpMhR9CaLg63w2+95UADVdfSbEdCWRQAr179w7ZlW3YN8WRX3X/iQBy9dg4M1Sp0lkAlFcVnk14o6JRkNN13nbvhqt17tWLdra+tW0fhivilvfu3StFixa1TS8VhYfAhHe2yPbte9waruGZBUchAXsJ0HC1lye1kUCmJnD8zD5ZfyFWFWEAKWePRpRHsbzVtZFatWhHqVToiojOxdfBYVTP2/6W1+ZpXmL/DFyvSp0aWKtnhdNwhefSF/H0eB/9fdVTsWBrj8M1LNXLo2fXdA6XHj1ORTOq69fl87fItLGf65MoJuLK4+qLHijwxtn1DHiUBEJDgIZraLhSKwlkGgJ7jv3lyK+6+cDMiF83Ho/Cawnvaol8tSI+H38ngLKwuVTc5qmzh9x2RV7WZjl6yiRp47aNHSfgcYWgCAFy74ZLPD2292cOdulplNjLn2HdtrVLD8IjPIVIYAIPdfwv/AWZRZC1xFl80ePch+9JINIEMo3huuXALJ9ilAqodDeevlx81YMb6+nXPoLilyWP8en+R5uehom3e/wF7kvsFS7cmx6f4LBRRAjAQDULq2C4RlJyZM3niFWFsZovR7FITseWsaspD/HKXeNc6IrTC7CQ7ir531kuztt7CCVfkZmC1bPs5RpqbShusn379nTDbN26Nd17viGBWCWQaQzXzQdn+RzM7slw9VUPPhDeDM5pmwf59LmJNj3waHl6dOTrdXnT4xMcNgoLgZSzR5ShOtmRtgohAZGUQrkraK8qHqsjhVRGE1yXs+GKcq1IdYXCAuGUSC3QCuc1ZqSxpk2bpqvqOV8T8idTSCAjEMg0hmtGuFm8BhIIJwEspoJXFRsWWUVayhZorjyrHbV31c6UT5G+Llfjw3NslWYqNOBqVbo1EhKplFiRuNaMMKa7PMz79+/XC+2Qa5lCArFMINN8ghHrZEe8k116EFv0WrvUoD870aYHFxTMdZkwA28hG0GDowKXBJCmShuraoEV0ldFUrLGZdcFAOB9xJaQ67/ysZGcVzjGRgYELCZDSEYPZbAiNCBSQo9rpMj7P66rEAGrFizQ8lYgxNqe+yQQjQQyjeEajfA5p4sJmDADhBF4Ctm4uCePBEIAif91eVV4VpWxisIAkRQYp/Cq6rRVyljNmiVHJKcT0bGxwKxTlSFhDw1wvmgYrn/++afzYb6PMgIIERg2bJiglGauXLnk1KlTF82QhutFSHggBgnQcI3Bm8Ypk0AwBFBSVS+supC2CiVXIymI2UyLVe0k5VQ4ACWNwCXlHvWIIj4pyZHEPbF1a49tgzmJUIEZM2YEo4J9w0Cgbdu2kpKSInPnztXbqFGjBHGt58+f14Ys8vG6SokVhqk5hsBntuRllznec4cEAiGQKQxXVE9B1RN48eyo/hEIaPYhgUgS+PvIUu1RhXd1x+FFkZyKHhsLqoxntXAeLwkpIz7b6JwAjIBGgwaFfHLwuO7Zsyfk43CA4Alkz55dLr/8cr1Nnz5drr32WunRo4c2ZGfOnBlxw7Vqr16CjUICwRDIFIZrMIDYlwRik0Cq8qqqLADKqwpj9cDJyKbCyZujqPaqas+qegSeU6WwosQGARiuqJwFjx0X9sTGPUOYwLx58+TRRx+VVq1a6a1///6xMXnOkgS8EKDh6gUQT5NArBA4dnqPNlR1GIAyVk+fOxHRqRdXyf91FgBlqFbwUpkoohPl4B4JwHCFwOtaqlQpj215MjoIwGiFwGilkEBGI0DDNaPdUV5PpiKw+9gah7G69eCciF97xUKXp3lWlbGKcquU2Cdgyr6iCAEN19i4n4hzrVu3roSzTG9skOEsMwIBGq4Z4S7yGjIVgU37p6elrFJe1b3H10f02nNlS0iLVVWGKsIA8mQvHNH5cHD7CRiPK6tn2c82VBrhcaW3NVR0qTfSBGi4RvoOcHwS8ELg5JmDOl7VpK3C+0hKkTyVtZFaVRmrVQq3i+RUOHYYCGTJkkXgdaXhGgbYNgyBLALwuN577702aKMKEog+AjRco++ecEYkoD2pJlZ1o/KwRlqSCrTUJVaRXzUxvl6kp8Pxw0yA1bPCDDyI4eBtPXPmDD2uQTBk1+gmQMM1uu9PppsdUpZBMqNxtPXg3LT8qioEYNfR1RG999my5HLEqsJYzZ+zZETnw8EvJrB/5UpZ0K+fPlHl9ttDmmYI4QL0uF58D6LxCLyt1atXj8p45ORZs2TX7NkaW8OBA6MRH+cUAwRouAZwkzZs2CDLli2TpUuXyJJli6VFsxbStGlzqVOnjlSsyJyUASB1dMlMeXbPqFX/uryqMlTXq7RVR1VWgEhKgVzllFcV+VU76cpVWeKyRnI6HNsLgZRDhwSGAMTupO6oa4/vufXr18u6P/6QtWr7c/VqScidW2rUqyeVKlXS33WFChXS4/Of6CEQzfGt+LwuGzxYw6LhGj2fmVibCQ1XP+7YE088Ll9+PUaSd+6TnHmySLFK56VsXZHx0+fLW8PjJOV4qpQqV1S6XXWNvPrqEImPj/dDO5tmBgIHVT5VnV/1grGaKqkRvezSCY3TPKvKWC2T0CSic+Hg0UHg9ddfF9S8h8RnzSrFzp2T8mo/Tm3jhwyR3Srm9YiKo4Q8/vjjMnToUL3Pf6KDAAzX999/Pzomw1mQQAgIZArDtWGpXlKhUGspmDspIISrlafhlru6y96DO6V6x7NyaWWRsnXSvrj/U5gqyWtFbXvly+9GyvgJX8rLL74u99xzz39NuJcpCaBSlV5YpbyqqGAVSYmTLNqrisf/8KwG+n8iktfAsUNDAHlab1NVlmb8/rv0VEMgmVlxZbReJMpo/VcdXKW2N4YNk2+++EJGjh4t7dq1u6gpD4SXwMKFC+XYsWOMbw0vdo4WZgKZwnBtlNgrYKz/e3WQDOg/WNrcK9Kuq0jW7O5VJapvemx1O52XacOP6FWdY78cLbNnznffiWcyHIHzqWfTsgAoQxWhAIdO7YjoNcbnLOF4/A9jNXvW3BGdDwePPgKffPKJ3HXXXdJATe1FtXkLACim2rRVW/XUVBm7a5e0b99e7rjjDoEeSuQIwNuKcLXy5eEjp5BAxiSQKQzXQG/dDTddI7+vmCx9xovkTvBdS/ZcIp0eF2l5i8gPg5dKhYpJsmXzNt8VsGXMETiSkqyNVMSqwlg9ez4lotdQMr6Ow1gtX/CSiM6Fg0c3gTlz5mijtZeaZjM/p1patX9abb+p7dNPP5X69etLnz59/NTC5nYRwMIs5m+1iyb1RCsBGq5u7swX6vHX+HHfyZNT3DTw4XBCCZHbPjgtQ9pvl6/Hfy03XH+DD73YJFYIJB9doapWTdZhANsORd6rXrlwW4exWjRvlVjByHlGmECPLl20weqv0Wqd9uXqDaK1H374YenWrZuULVvWepr7YSIAj+trr70WptE4DAlEhgANVxfcDxw4IA/0vkcaXe3iZACHrn9FpOcNPaVVy1ZRmaIkgEvKtF027J+iS6wiZnXfiU0R5ZA7e0FtqOosAKoYQO5sBSI6Hw4eewTuuvFGOXHkiPSyYepXKB2Ie+2nEt9P+PVXGzRShT8Eli9fLvjbdcklfMLiDze2jT0CNFxd3LP7H7pLchc+KZff7+JkAIeSVOBYAxUf+3j/3jLus4kBaMg8XaZvTkuVUiB3OQkmNtkuYsfP7FOpqibrx/8IAUg5e8Qu1QHpKZq3WpqxqgzVSoVgKlBIIDACU6dOlU+++kquD6y7y17XqaMvTZkiw4cPl969e7tsw4OhIQBva5kyZaRq1aqhGYBaSSBKCNBwdboRmzdvlm+++l6utjk3chP11+HTu3+SDc9tkCpV+BjXCbvj7bTNg/Q+ChFEynDdc+yvtCwAylDdfGCmY26R2ilf8FKHsVoiX+1ITYPjZjAC34wdKzXUNeExv11SRinqrLaPVDomGq52UfVND+NbfePEVrFPgIar0z1E6qts2eOkbD1782vmL6pW6paJ04ULaLg6QY+Ct5sPzHIYq3uO/RnRGeXImjctVlV5VREGkC8H1nBTSCA9gZwFCjgKD8QnJaU/6cO7JfPnSwUf2vnbpLjqMH3TJjmnUmllVXlgKeEhAI/rgAEDwjNYgKPgc2p3sYwAp8JuMUwgUxiuk9b1leRjqyQxX13pUu0tj7dr6YoFUrJ6qiow4LFZQCcLlDojc36fIjequDJKZAmknD2algVAeVURAnD89L6ITqhQ7vLpjNWIToaDxwSBwqqCVddZswKaK3J9rtqyRS4LqLfnTqXU6VNnzujKWyg9Sgk9gT9UZTOU5I32+NaqvXqFtDRx6ElzhGggkCkM1+SjK2XLwdlq2at3L+q8+XOkVIi+a4uUE+1xjYYbnxnnsF8tpkLVKiysWr8v8otHyiY0U8UA0sqrls7fMDPeEl5zhAjgyRIkFB7XRKU3p6quhXKxNFw15pD/A29rsWLFpHZthhKFHDYHiDiBTGG4+kN5+ZI/pN1j/vTwvW2xiiKTv43sSnTfZ5sxWm4/tMCxsCr5yIqIXlSWuGyOWFVUriqQCxGBFBIIP4FFixZJ+Tx5JNeJEyEZvIQKEUBBgw8++EAqV66sN4RIYb9SpUohGTMzKU1OThZkEVii7uMilYd3jnpNVY6Zbqp6WcvLL5d6DRpI3bp1pXhxBG5QSCBjEaDh6nQ/SyQWkaP7djgdteft8YMiRYszZZE9NF1rQeL/9cqrisf/61QxgCMp/7huGKaj+XOW0l5VxKpiy5olR5hG5jAk4J5A6dKl5YB6nB8qOaTiWzupalqFCxeWjRs3yi+//CJbt27Vw2XLli2dIWs1bEuVQqABxROBIa++Kk/176+bgBYy5iIzRBa1bZk2Td5Tm/kL1l+1e/nll9UZCglkHAI0XJ3uZeOmDWX12h1Sv4vTCRve/rtZpHa9ajZoogorgUOndqYtrLpQtQolVyMpifnrp3lWlaFarkCLSE6FY5OASwINlEfusDJcd6mzJV22CPzgMdX18Pnz8sADD6SLuUxJSdFGLAzZDRs26P3FixcLir3sUmVjIfHx8Rd5aI1hCyM4M8uePXvkJuVRna3CPHoqEAgKcCbS9AIg5SORpWp77ZVX5Icvv5Qfpk+np/sCG77EPgEark738JLmbeXXF79XR73Hwzp19fr2wN8iPa6/xGs7NvBO4O8jS9M8q8pY3XF4ofcOIW5RtUgHweN/eFUL51ExIRQSiGICqGdfPH9+2aKKD9htuKqvOS3O+URz5swptWrV0pszmkOHDjmMWmPY/qqKGLz77ruCcxDEcBoj1rya8IM8KuwhI8snn3yiQy9USnB5UW2FvFxsQXW+rdqwXGPs9u2a2zvvvMNyvF648XRsEKDh6nSf6tapKweTU2XfdhEsprJTDv2TXVo2vdxOlZlKFx7/r7/gVT1wMu2xY6QA5M1RRBupxljNmS0+UlPhuCQQEIGGKivBFhUf2TKg3u477VanEosW1Yam+1bpzxRQqb0aN26st/RnRK+WNx5aGLXYxo0bp1/hxYWgxCyMWWPIWg3buLg4Z5Ux9X7y5MnaaO2lZu1vWd7Sqs/TavtNbSjHe9lll0mdOnXUOwoJxC4BGq5O9w7/qYsnFpbVk/fbVjkLQ6ydpVbaZssnDRty9bgTcrdvj53ek5YF4IKxevrccbdtw3GieL6aDmO1YqHW4RiSY5CAWwJHt22TDWPG6PPIjZnYurXbtq5ONFGLeF5TuVwvUfGoSa4aBHhsTe7c0rRVqwB7X9ytRIkSgu3SSy+96CTiZo2H1hi101SM5yaVRxaSRWU3MEass2ELYzcW5Jpu3aSjmqi/Rqv12uAuQQDVZc2by8HjkfsexWf2mPIAQ5jPVWPgPwEQoOHqBC1fvnzyzFPPyyOPPCJV1Hdv6VpODQJ4e0Y5BWaNyCKvv/aKjuEKQEWm6dKvxWq9qAre1RdnlYj4dVcs1MZhrBbPhzpDFBKIDgIwApYOGqQn03DgQL8N14Gqz0RVPet7ZeT1temSflV6NitD+IehQ23S6FlN+fLlBVs7FftplTMqftcYssawXbFihYwfP17++SdtwSbCC1x5aGHgFlUeY7ulU6dO8swzz0grP4z6zpdcIonqWrrZMBkQWq6ySDx0223y3mef2aDRfxXrR4+WZYMH6473+ZCe0v8R2CMzEKDh6uIu45HKF+M/lunvr5Fe77to4OehmSNEqlarKvfefZ+fPTNX8zPnTshbC+qq6GL744t9JZkzW35tqOosACrHap7szssffNXEdiQQ/QRGKMO1WbNmMkdN9WJ/pn/zhx8NqwO+++orQQxtJCV79uxSo0YNvTnP4+jRo47FYca4nTFjhnz44Ydy4MAB3RwLwZw9tOY9nBuByMqVK/VitVtuuUWw2h/z8ySffvqpzFVpruz6UYGxeqjtjc8/l2Zt28qtt97qaXieI4GoJZApDNeGpXpJBfVot2DuJJ9vxOQfZ+lULhvmifa8+tzRqeGWJSIrfxZZsiQyv3CdphPVb7NnzSNVlbG4bq8CFkYpnKdSmrGqxq5SuH0YR+ZQJBBZAk2bNpXnn3tOXvjf/wQLfwIzyUT2q76vqO267t3l6quvjuxFeRkdmQsQsuUqbOvff/+9KPTg22+/1YbuyZMntWakEoMRawxZs49XpPpyJehrMid88803Mlb9YICDBAYswiBcyS/KO9xWeVtR0MEuqaIU1VfbJHVNNFztoko94Sbg+n9ZuGcR4vEaJfbye4RChQrJe++9Jw899JA0v1HkEv9VyET1RGTjgrShsfAAj4rMhsdblIsJwNMZDsM1qUDLtCwAylhNjK938UR4hAQyCYHBL74oy5culcfVKv7b1DX7m8Dtc9VnvtqKKi/l+IkTY5oaMhdga9ny4iVr21VspvHQmsVis2bN0seQ/B9iNWKthu0Rlb3BiFlQNnLkSF2g4dlnn9UGbI4cOUwT/bp82TK5Kt0Re95gzfGShZHPxGLP1VBLZiSQKQzXQG/sgw8+qGOnul7TVkbfv1Na36sWMcAt4UX+mikye1ScpJ7KJZ9++r5cd911OgH3zz//LINVfE/v3r2lUaNG2oi96qqrpEmTJl40Zp7TMFxDIdmy5EyLVVWGKsbIn9NOP0YoZkydJBA+ApPUyvWpU6dKe1U0YJUatp3avD3sX6vawEzdobbeKmfru+/bEFeldEWrlCtXTrBdeeWV6aZ4XuWsNYasMWzXrFkj3333nezcuVO3RegCshsYAxcHT506pc+hQADSfiHmuE+fPvoYcrZu2bvXVm+rVqz+qaC275VnGdW3EhP5PWi48DV2CNBw9XKvUJ7wr1Vb5c13hkr/p5+V8g2ySP7EFJ0qq5j6BkAZ133b1KYCvPaq14M7ssuG38/KvfffIR8MH+XQDuMVGwS/0lFJZsKECfLCCy/oL0PjiYUhG+vpWxwXHcAOwjmK5Kki+05sCKB3+i4FcpVNZ6xmicuavgHfkQAJOAhggdPu3bulzx13yFBlyJZThlZZ5UmEoVNFbVnUtkVtWy+8Yr+lSqn1lTJYm6vV6plVkLmgWrVqenNmcFyt4IcxO1QtVsP3vfG2WtudPn1a9u/frxcEI9fqoEGDpEiRIpJflc0tqBa62S24l9nUvUXJWBqudtOlvnAQoOHqI+V+Dz8hl1/WTubOnSuzFvwsKyatkl/W73L0rlyzlDRu0kBuu7mTNHm7iaAyjTtp3bq1YBsyZIj89ddfAk8sDFnU9c6tUsnAiIUBi9fMWGsai7QClaJ5q0ndEtfrMICyCaaOTKDa2I8EMhcBfN+Mv/CjetmSJbJALVr6Ye1aOXQhhVLBvHmloVpUdFObNtJALeqK9njWSN+9vIpXPWXc58qVS86eRUIq14Lz8MAijdfTTz8t+VVxiARlEEsIDFfMoIrKqLBMhSJ07tzZ9YR4lASimAANVz9uTt26dQUbHvVDsDoV//nx2D/QlaZm5esTTzwhe9WjIRiw2BC4f+edd+rchcYbW7s2ivxlbPlhbR85nPJ3wBe59/g6OXZ6r/LaVgpYBzuSQGYncM011wg2I3gUnlV5ACOdLcDMJ9ZeETpw7oIRaoxUXAM8qwgVwyI5/B3BgjH8eJgyZYpcrxwXoZINKi2Wq8VpoRqPeknATgI0XIOgidWp8JzaJcgdePvtt+sNOo0RO2LECP0rHI+jjCf2cpU8PKMJCg4s2Dk86MtauPMDWbX7K2lX6UVpUeahoPVRAQlkdgLId0oJnAA8qQkJCdpYRFiFyWrgrggCvLRHVOzsQTVkwcCHddkTQVhnVQiIp6eCLjvyIAlECQEarlFyI1xNw3hacW6pWvVrQgqGDRumU3WZ8zBmYUTHuuTLUdy2Szh55qB6xNlbVu1KM2BZ6co2tFQUJQRyqjKppvpQfFJSlMyK03AmcPDgQVmk8rEiy4CvAq9reZWlIVnFvtptuCI2uZxykjC+1de7wXbRRiBOrXJMy+PhZmZYBY9gcYiXprpNNP4zaV1fST62ShLz1ZUu1d6Kxin6NSekZTHeWLxiVWuHDh0cqbb8eZz3ww8/yBVXXBFwqINfE/eh8VNT43SrfDmKqUf+//rQw7cmTUvfJ+2VBzZvDvsr4vg2A7YiARIgAd8JXKuKBMj06aL+tVVUPRypoJwd3/70k616qYwEvBFoo2LjsTgdEow9qaK/M74kH10pWw7MErxmBEFKlgdU+plJkybJsWPHBAmtS5YsKS+99JIgCwIeQw0YMEAW+pCrD4ZvrVq1ZPbs2VGFBousnmy1US206mnLvBb9PUKGzKss83e8a4s+KiEBEiCBUBLo2LOn/JYtmy7uYNc48LauUFvX66+3SyX1kEDYCWQKwzXsVMM4ILIQXHvttfLJJ5/oVDYwQNuqX+rwpCKWCjFU999/v/z4448uV7VOVAnD4cFFrO4bb7wRxpl7HipO4gQVrW6qM05ur/e9lMgX/MK0U2cPy4/rHpb3F7eUTQd+8zwBniUBEiCBCBK46667pJ5akDvOxjn8qnTd1qWL3HbbbTZqpSoSCC8BGq7h5R3y0S699FJ59dVXZfXq1TrV1iOPPKLzCHbr1k2HA/To0UNGjRqlyw8uWLBAZzIwk3rsscfk5ptv1l5ccywaXmsU6yb9WqyWDpVfUjlug//Ibj+0QD5aeoVM+OteOaoWhFFIgARIIBoJjFWP8/9QE/vdhsmtVjqwvTVmjA3aqIIEIkcgeCsgcnPnyF4IVK9eXWCMzlC5GPft2ycfffSR5MyZUx599FEdmN9TPYrCe6ug2gvSbs2ZM8d62O0+UrysVXkev//+e52XdvTo0TpEAam97JY25Z/R4QP1St5ki+rFf38kQ1X4wLztb9uij0pIgARIwE4CWED1+uuvC0zNY0Eo3q/6oq4ZwssKFrR7uVcQE2NXEgiAAA3XAKDFYpfCaoXqrbfeKuPGjZPDhw/LZFUZB1VdnCu5IAn2tm3b5LLLLpM333zT7aV++OGHUqdeDckbn1uQi/b6nj3krVH9pV//O3WIAup9J5YtKl27dtZFG9wq8vNEodwV5MbaX0iv+j9Kyfi6fva+uHnK2aMyaX1feW9Rc9m4f/rFDXiEBEiABCJIAM6HLmox1eNqDgsCmMfnqs+zauvYsSMLDgTAj12ijwDTYUXfPQnLjLCI68CBAx7HgmcWBRaQRxYVYCBYDPZg317y+agJ0vJWVdNc5cgumqQSaSchOUVagopTR9PK3+7btk9WzvpVFVH4Wfr162drDG31ol0E26ytr8qUTc/L+dQzmF7AsuPwQvl4WVtpXOounf81f86SAetiRxIgARKwk8CPKmRg6tSp0r59e1mlFLdTm6o27lHWqrMT1bZDbSgl26dPH4/teZIEYoUADddYuVM2zxPZBHLkyCGok+1JUF8bsbBjVFwUvLGoZ14wUeRmtY6rVE3XPXOplLJl1FoqbPW7nJPp6hkVvLffTPhK5s35XZAVwS5pXf5pnXlg6qYBsnzX2KDVLvlnlKN4wSXl+gWtjwpIIFQEjqonIxsuxCsin2ti69ahGop6o4AAvnt3794tfdVi26EqNKtk9uxS7MwZKaHmVkZtSCS468K2W537R51rqz4XMz7+WGebUaciLvjMHlOLgSEmB3HEJ8UJxBwBGq4xd8vsmTCyCTgbrShFmEfVsEYxA2wFVIJzbIiJevHFF2XatGlSW/3U7/iYf3O48kGROu1Fxj+zS5KSknSpXHclcl9r5zGtsMuBC+ZOkhtqf55mwG4eIP8cQcKXwOX0uePy0/pHHQZslcLwb1BIILoIwAhYOmiQnlTDgQNpuEbX7QnJbFCYYJz67r5RZYlZt26drFmyRNb++acs2rlTssTFSWWVRQaZCGqqMrJ4qtZFZRCIJlmv1kAsU7nhIfd5TiEfTdPmXKKMAA3XKLsh4ZgOYlyR8xXGaf78+R0bapG7EoQUIEa2fCP/jVajr5h6rtX7a7VA4KY4uaprO5n9WyDRWkab69dqRa8SbLO3DZGpKnzg7PkU1w19PLrz8GIZtay9NCp1h7Sr+IIk5CrtY082IwESIIHQEejatataP9A1dANQMwlEMYFMsTgrMb6eVCioHqWpV4romtktWrTQ2QPw2B4eVXdGK3h1ubqd5FULUa97KXh6PYekypyZvwsWd4VKLkt6Up5otUEaJtqTq3DpP5+q7ANVZM62YaGaMvWSAAmQAAmQAAn4QCBTGK4o83pf41kZotyrD/fU1ibDhg2TBXOWyeX326O2kHJatu0tuvIXYmdDJQVylZXra42ROxv8IqXzK1dxkHLm/En5ecPj8u7CxrJ+H9J4U0iABEiABEiABMJNIFMYruGGmpHGGzX6A509oHpr+66qvgq7qto4Xr766iv7lLrRVLVIR+nTbIlcVWWoZM+S200r3w//fWSpfLK8o4z/o5ccOrXD945sSQIkQAIkQAIkEDQBGq5BI8y4Cvbv3y9r/9gsJavYf415ShyVNWuX2a/YjcZLkx7X4QONEnu5aeHf4WXJY3T4wOxtQ/3ryNYkQAIkQAIkQAIBE6DhGjC6jN9xzZo1+iJLVrX/WguWEtmwYYP9ij1oxOKq62p9Knc1/FXKJDTx0NK3U1j89cuGJ+WdhQ1l3b6ffevEViRAAiRAAiRAAgETYFaBgNFl/I6rVq2S4hWzSO6E87ZfLIoW/LZjn5w8eVJy5/7vEf6Wg7P1WLmyJYRsMV2Vwu0F29ztb+jsA0h/FYz8c2S5fLq8szQoeasqXvCCID0XhQSihcDpQ4dkv/q/7It4yq3pj57CdetKDpVKz53smp32/9zdeXPcLj351CLUeJWKz53sX7lSTqtsK97ELj05EhKkcD33i4Wt+U49zSna9GCunj5Dnq6F50jAVwI0XH0llQnbLVq8UEpUsd9oBcqi5dOAbty4UerUqeOgO2JJa72PLBBYUBdKuaTcoxeKFzwvKDwQrCzf9bms3P2VtFfGKwojUEggGgjsU0bZpDZtfJqKp9ya/ujpMnOmx7yyP7Zu7dN87NKDPLeNLuS8dTXw/L59xRdj2i49MO66zprlair6mDXfqdtG6kS06cFcPX2GPF0Lz5GArwQYKuArqUzYLktcFglVjmijN04lzY6k5M+ZKNfW/FjubjhVyiY0C3oqKD07eWN/efv3+rJ276Sg9VEBCbgjUMTisfNklLnrz+MkEG4CprpbvI3VE8N9DRwv8gQyhcd1afJoOXRyuxTIXU7sWpwT+VsX+hm0aNFSpg5VVQPknO2D/bs5TWXVqiEIoA1gtpULtxVs87a/JVM3Py8pZ48GoOW/LslHV8roFV2lfsmbVPjAi1Iod4X/TnKPBGwggEfx165YIVtV+U9Pgkfk8BQGK/7o8fRYHvPwdT526TEGkzsGVXv18ughNv3s0uPturyNY+YTbXrMvNy9Yr4tVPnvpO7d3TXhcRLwSiAuVYmnVoNVebZBFx6xeGnqSU1Ez+HxM2Inw/H4OaIXavPgyLPasmVLeWCsSHxRe5Uv/kZk68ySsm1TcjrFT01N88BG8l4dTdmtjdfFf3+Ubm6BvolTnuv2ynhtU/6ZQFWwHwmQAAmQAAnENIE2KmRp1oUQmWDsSYYKxPTHILSTR+xp9hxZJXmt/ePs3ylSo3ot+xXboDE+Zwm5psZIuafRdClXoEXQGlNTz8uvG5+VNxfUkb/+/SFofVRAAiRAAiRAApmVAA3XzHrnfbjufPnySb36tWXj7z409qPJ6RMih7bFS+0a9f3oFf6mlQpdIQ82mS9dq70jyHIQrOw+tkbGrOwuX66+Ufaf2BSsOvYnARIgARIggUxHgIZrprvl/l3wE489I3/9JvL7/9s7D/goiv6NP5FeQi8CUkOX3kGQoBQLqC9/C+iLoqgoIgL2ggRFfO1iB1HBitgFRYoaeg0dpHcCSO8Q2n+fjRsvx93lyl6uPeNns3s7M7+Z+c7hPTv7m5kvfcvnKfVsw1bOs4UwcOBAT8nCJu6yCg+amxc0v6S3LXVaaqw88PLMavhj41Bb7MmICIiACIiACMQKAQnXWOlpP9t500034bbbb8KMMcChXX4acci2eRFA/9YP3v0YpUuXdogJ78uCuUuha+0PcG+TP1CpSGtbKjtp/SC8PrsOVv79gy32ZEQEREAEREAEop2AhGu097AN7ft8zDiUKFUEX9gwQDruSWDgI/3RsWNHG2qW/SYSirXD/c1m4Ppa7yBfrqIBV2D30ZX4dElXfLHsFuw9nr07iQVceRkQAREQAREQgWwmIOGazcAjtbg5sxbg6D7gvVuB/dt9b8UsY2WClzul53vtlTd8NxBmOVqVf8B0H2hR/n5barZs1zi8MrMGpm54zhZ7MiICIiACIiAC0UhAwjUaezUIbapatSqOHDmCenUaYlQvYLGXa+tzRYKvHgFmfQbcdMv/GRsaeFx9Db2b/GkeXWq+GYRW2GuyQK4S+E+t94wdvqahctHLbTE+ZcNgvDarNlbs/s4WezIiAiIgAiIgAtFEIGc0NUZtCS4BrjIwK3kR3n//XfTp0xcb5+ZG4bJpKFYeKJVgHFWAE8Z233s3A3u2pJ83zM2BipUuwY8/Dsf111+fZQWrFEvMMk24JahiiNb7DPE6Z9v7mGz4rR4/bQxNBxD+PvYXPlt6I+qWvtFc/7VkgZoBWFNWERABERABEYgeAjEhXMvGNzB7zDpHT/eFpiX33/8A6tdviDFjxmDWvD8x9ed1F1SkVJkiaNKkMW5+rA0G27BrzwUFhOGNlobbQIOLb8Gk9c8aIvbdgGu4fPe34NE+YTA6JCQFbE8GREAEREAERCDSCcTEzlmR3knhXn+6EKSkpJhHhQoV0KhRIyQkGEOwMRw2H5iJSRsGYeP+ZFsolCxQw9w6tl7pm2yxJyMiIAIiIAIikJ0E7No5KyZGXLOzY2KxrPj4eCQmJppHLLbfVZsrFW1t+urO2z7CGIEdhGNpe1wl8/renmNr8MXSm7G0dFd0THgepQvW9jqvEoqACIiACIhAtBCIWeHKkbARC9t51Y8vdXQ/ocgXO5x45MmH8/HJcV7Vxy47Wb2CHrEgERsPTMuyTnbZqVK0rTHRKTnL8iIpATctqH9xN9P3ddbWtwOu+ord3xsTt77HlVUGGSOwWoEgYKAyIAIiIAIiEFEEtKpARHWXKhuJBLhdLLeN5faxCcWusKUJv2983lg+qzqW7vraFnsyIgIiIAIiIAKRQCBmR1yL5qtkTnoJtJN8scO0ngJHLr0JdtlJKJrosbjG5Xp6HCG2MttlJ6t2WeVF6rlikVbGzlu/Y/72D033gaNpuwNqyt7j6/Dlsm6GeB1r+r9eXLBOQPaUWQREQAREQATCnYAmZ4V7D6l+UUng1JkjpvvAzK3DbWvfFVWeNgVsHLxzObGtYBkSAREQAREQgSwI2DU5S64CWYBWtAgEg0CenPHgJgsPNJ+DasXb21LEHxtfMNwHqmHJrq9ssScjIiACIiACIhBuBCRcw61HVJ+YIlChcAvc3XgKbrx0FOLzlAm47fuOb8BXy27FmMXXY+eRZQHbkwEREAEREAERCCcCEq7h1BuqS8wSaFquFx5rvQ5tKg6whcGqPT/jzTn18du6p3Du/FlbbMqICIiACIiACISagIRrqHtA5YvAPwRy5yiAzjVeR9/m81C9eEdbuPy56UXTfWDxzi9ssScjIiACIiACIhBKAhKuoaSvskXABYHyhZuhV+NJuOnSj1E4TzkXKXy7tf/EJoxd/l+MXtwFqUeW+JZZqUVABERABEQgjAhIuIZRZ6gqIuBIoEm5O/Go4T5weaWHHW/7ff3XngkYPqchJq57AmfPn/bbjjKKgAiIgAiIQKgISLiGirzKFQEvCOTKkQ/XVn8VD7ZYgBolrvIiR9ZJkje9hFdmVMOi1M+yTqwUIiACIiACIhBGBCRcw6gzVBURcEfgkkJNcFejibi5zmgUyVveXTKv7x84uQVfr7gdnyy6FjsOL/I6nxKKgAiIgAiIQCgJSLiGkr7KFgEfCTQue4fpPtC20qM+5nSdfPXeX/HW3Mb4de1jOHPulOtEuisCIiACIiACYUJAwjVMOkLVEAFvCeS8KA+uqf4y+rVIQc0S13ibzWO6aZtfMVcfSEkd4zGdIkVABERABEQglAQkXENJX2WLQAAEyhVqhDsb/YJb6n6KovkqBmApPevBk9swbkVPfLzoamw/vDBgezIgAiIgAiIgAnYTkHC1m6jsiUA2E2hUpofpPpBY+XFbSl6z9ze8Pbcpfln7CE6fPWGLTRkRAREQAREQATsISLjaQVE2RCDEBHLE5cLV1f6Hh1ouRq2SnW2pzfTNr+HlmdWwcMcnttiTEREQAREQAREIlICEa6AElV8EwohA2fgG6NlwPLrV/RzF8lUOuGaHT+3ANyvvwqiUTth2aH7A9mRABERABERABAIhIOEaCD3lFYEwJdCwzG2m+0C7yk/aUsN1+ybjnXnNMWHNQKSdPWaLTRkRAREQAREQAV8JSLj6SkzpRSBCCFwUlwNXVRuG/i2Xonap62yp9Ywtb5juAwt2fGSLPRkRAREQAREQAV8ISLj6QktpRSACCZSJr4c7GvyE7vW+RPH8CQG34Mipnfh25d34MKUDth6aG7A9GRABERABERABbwlIuHpLSulEIMIJNLi4Ox5rvQ5XVHnalpas3zcV785riZ9X98epM0dssSkjIiACIiACIuCJgISrJzqKE4GoIxCHTlWHYkCr5bi01A22tG7W1uGm+8D87R/aYk9GREAEREAERMAdAQlXd2R0XwSimMDFBevg9gY/4NZ6Y1Eif7WAW3o0bTe+W3UvRi68ElsOzg7YngyIgAiIgAiIgCsCEq6uqOieCMQIgfoX32KsPrAWV1Z51pYWb9j/B96bfxl+Wt0PJ88cssWmjIiACIiACIiARUDC1SKhswjEMIGOVYfg4ctWok7p/7OFwuytb5vuA3O3j7DFnoyIgAiIgAiIAAlIuOp7IAIiYBIoVaA2etT/Fv+tPw4lC9QImMqxtD34YdV9GLGgHTYfmBmwPRkQAREQAREQAQlXfQdEQAQyEahb+iY8ctlqdEhIynTf3w8bDyTj/QVt8ONffXHi9AF/zSifCIiACIiACGjEVd8BERAB1wTaJww2BOxfoJC1I8zZ9q7pPjBn2/t2mJMNERABERCBGCSgEdcY7HQ1WQS8JVCyQE3TdaBH/e9AV4JAw/HT+4yR1z74YEFbbDwwPVBzyi8CIiACIhBjBCRcY6zD1VwR8IdAndJdzclbHas+50/2C/JsMkTrCEO8/mCI2OOn914QrxsiIAIiIAIi4IqAhKsrKronAiLgksCVVQYZy2etAZfRsiPMNdwGXp5ZDbO3vmOHOdkQAREQARGIcgISrlHewWqeCNhNoET+6ubGBdzAoLSxkUGg4cTpg8a6rw/i/fmtsWH/n4GaU34REAEREIEoJiDhGsWdq6aJQDAJcMvYgcbWsdxCFogLuKjNB2cZO29dge9X9cbRtL8DticDIiACIiAC0UdAwjX6+lQtEoFsJXBFlafxWOt1aHBxd1vKnbd9JF4x3AdmbX3LFnsyIgIiIAIiED0EJFyjpy/VEhEIGYHi+RPQvd6XuKPhTygTXy/gepw8cxg/r37I2D62Fdbv/z1gezIgAiIgAiIQHQQkXKOjH9UKEQgLArVLXof+LZfiqmrDEBeXI+A6bTk4Bx8ubI/vVt2DI2m7ArYnAyIgAiIgApFNQMI1svtPtReBsCTQrvKTpvtAwzK32VK/+dtH4ZUZ1TBzy5u22JMRERABERCByCQg4RqZ/aZai0DYEyiWrzK61f0cPRuOR9n4BgHX99TZoxi/ZgDendcC6/ZNCdieDIiACIiACEQeAQnXyOsz1VgEIopArZKd8VDLxbi62v+QIy5XwHXfemgeRqV0xLcre+HwqdSA7cmACIiACIhA5BCQcI2cvlJNs5HAiBEjsH37dpclDhs2DO++++4FcSdOnMDXX3+Np59+GjfccAPuvfdejBw5EidPnrwgrT83ypcvj6uuusqrrIsXL8bgwYOxf/9+r9JnR6LEyo8bmxesQ6MyPWwpbsGOj83VB2Zsed0WezIiAiIgAiIQ/gTizhvBUzWHDBmCpKQkM0kWST2ZUZwIRBSB+vXro1ixYpgyZQpy5syZqe5xcXGoXbs2Vq5cmXF/2bJl6N69O1atWpVxz7ro0KEDJk+ebH30+8xy69Wrh6VLl2Zp4+6778ZHH32Er776Ct26dcsyvS8JDh48aNqtU6cO2rRp40vWjLSr9/6KyesHYcfhRRn3ArkoX7gpOlZ9HtWLdwrEjPKKgAiIgAgEiUC7du2QnJxsWg9ET2rENUgdJLORT4D/wP73v/9l2ZAdO3aAQpei9dZbb8WSJUtw/PhxrF27Fvfffz969LBnhDHLijgkePTRR/HGG2/g2muvdbhrz+Uvv/yCPn36YMOGDX4brFniGvRrkYJrqr+MnBfl8duOlXHboQX4KOUqfLPiThw66Xqk3EobjufTp0+b3xl+b9wdZ86cCVrVp06dan7Xp02bFrQy/DF85MgRkI2CZwJ808P/Vy1fvtxzQsWKQBQQkHCNgk5UE4JHYNCgQZg9e7bHAp544gkzvm/fvvjiiy9MEZsvXz5Uq1YN7733ntfCla4G8+fPx3fffQeO4Lr7wd61axfGjx9vPrkeOHAgU92YZ9GiRTh27Bguv/xylzYoOH/66Sf8/vvvoC1X4dSpU2Ydvv/+e7P9tMewZ88ebNy40bymMGdZK1asMD/786dtpUdN94HGZe/wJ/sFeRamjsbLxuYF0ze/ekFcON945JFHUKBAAY/H22+/HbQm8Pv05JNP2vJmwNdKnjt3zmWWBQsWoFChQqhZs6bL77HLTDF6k29X2H90EVIQgWgnIOEa7T2s9gVEoFSpUrjlllvgLBAto/Qh/fzzz82PlkuNFefLee7cuahevTqaN2+OG2+80RS/TZo0wZo1azKZoaAtU6YMrrvuOvC1C90ZPvnkk4w0FJaNGzfOOD799NOMOI5e3Xnnnahatarpg9u+fXvT1rPPPgvH1zZ//vmnmYajyP/3f/+Hyy67DAULFjRFzQ8//ACmZ3jxxRfNcq6++uqMMvy5KJK3PG6uMxp3NZqISwo18cdEpjxnzp3EL2sfxVtzDX57J2aKi+QPdBWJpvDZZ5+Z33OKZleBbzIY+KDk7iHOVT7dEwERiG4CEq7R3b9qXYAE6CPKSVq9e/d2aYmjjgwUb8WLF3eZJqubFL8dO3Y0y+HICV/3URRSpN588804e/ZsJhNvvfUWKHRffz19UtJdd91ljp4yUdGiRTFhwgRwpNg5DBgwAKNHjzbdByhAOcGMYvn555/Hxx9/bCb/66+/cMUVV5h1eeedd8BRr2+++QbXX389KKT/85//ZNh+5plnMGfOHPz888/ORfn1uUaJq/BgiwW4tvqryHVRPr9sOGbacTgFHy+6BuNW3IGDJ7c6RoXt9R133GH2N/vc+XjooYfCtt7+VOzNN980v+Pu8nbu3Bn8ni5cuBD58+d3l0z3RUAEYoxA5lknMdZ4NVcEsiJAEffYY4/h5Zdfxocffoh77rknUxbLzzMhISHTfb5aHzhwYMY9jlxSnLoKXMGAo6H9+vUDRSgDJz7xtd+4cePAEVCOjjJwctaDDz5oXnN0tkqVKuboKVcQuPLKK0EXBVd+rRTBFMXMzxEua/SudevW5qgXR2179eplrohA4xQVDzzwgFkOBStHga1Qo0YN85JtbtGihXXbtvPllR5G/TLdzMlbC3f8O5rsbwEpqZ9iya6x6GRM3mpb6TF/zdieb8qGJFOgc7UFK7BfLrrI/XjCvn37wFfr8fHxyJs3r5XNPFPoWqtIlChRIqOP6YKybt06HD161BxJ51sEbwO/x/St5Yh7jhyZd0Ljd5Z14et86/tEu0y/efNm7N69GxdffLH5HXWMZ5rDhw+b6XhNO4cOHeJlRjn08+UoK98q5Mrlegk1lsN2paWlmQ9g/O47BtaNtumCwQmWtMcHM17TjcedXUcbjtesE9+AkHutWrWwd+9e86GPvqVW2WwH20omDBwtZp/w3ynfjjB4w4dtYr/lyZPHrOemTZtMRnzQZN9nFVJTU8ERa6YvXLhwVskVLwIRRcD9/yEjqhmqrAgEj8Bzzz1nCj4ub+W8aoD1I8IfNcdAH1EuhWUdnnzPOJmLgcLTMVhC1xrVdYyzrjkSyjrMmjXLFBHWfeezVQZ9WjlqyuW6eFiv/SlsGWiHwZX4NSOy6U/hPOVw06Ufo1fjSShfuFnApZ49l4Zf1z6O4XMaYfWeXwK2F6iBPcdWY+qGIZi47gms2P291+b4IEXh6cothQ8kjOMDBb9/q1evNvuYo5V0+6DLR+nSpdGsWTOPI52OlWnUqBGKFCmSMaLvGEdxxjjLnYWijaPCFIQUhnwoolsKxSt9pa3AVSkopqzvHCcv0g4Pa/WNxMTEjHstW7a0sppnCj9+b1kOV/do0KCBOSLLtxMU9lbgWwna5BuF4cOHI3fu3CaHSy+91Hw7YpVlpXd3pogkbwpg8mCZ5EiefPvBlXcYKNRZHtvGstkPPJo2bYoPPvjAFJ7e8KEtvk2hLbr9sDzy5AMkmVPMU8i6CnwDwvaVK1fO7GfaePzxxzMeElzl0T0RiDQCEq6R1mOqb7YT4KgHXQYYbrrppkzlcySFwXk2L0dX6Df65ZdfZkrv6gNHbhjq1q2bKdoa4bLiM0U6fKDPK4On9WK3bNlipuEPIEfjHA+OtHJ0lT/Qf//9tymEKTjCIVQv3hF9m89D5xqvI3eOAgFXKfXIYnyyuDPGLu+BAyc2B2zPXwOT1qf7CTP/14Yrw/G09O8ARwg5WuZ4cKTUCtZoOyf9Oft9Wu4eFEccFWRfchIeA8UTR98Y6P7BBxOO6NkZ+H23/K1ZljUaz3rwjYP10EcBSZFpPfQxHT/zKFu2rFmlhg0bZtTXuY5MRzHKwOuePXuatujSQhHr/BDJB8/+/fubApBL0zGQM98iWCO95k03f+gSY4lTcqRgZZv4QEkha8U5ZudDJ/uN7WY7+SbFWz6OdsiMQp/i3qo7VzvhgwiFsnOgQGaeLl26mA8xjOfbIj5AK4hAtBCQcI2WnlQ7gkqAox70+bR+fK3CKFz5w0QxMGPGDOu2T+cKFSqY6fkD5RhmzpxpfrTiHeOsa74O3blzp/kj5ckPsHLlymYW/qCNGjXqgoM+tRQUHLHjj7qzELfKczxT6GZXaFNxAB5rvQ5Ny/WypcjFOz83Vx9I3pT1cme2FOhgZMXu77B89zcZd9KMrWzX/rOFLVeU4GiZ4/HSSy9lpOWavPy+sY8cRwz5Wtia5GS5s3BVCb7GZhxXq+DIqDWiTr/tX3/9NcOuHRcc3aPby7x588yyOPq3detW8zU57VNUMfB7yuWb+BDFwFU5+JkHBRkD7Vg+3OaNf/789ttvpt8rP06fPt3MQ7HMNZX53WW7KOqdA918mIbMrFUxyHDiRM+T9/ia/5VXXjHNsT7kyH+XdKVhoKuC8zrPvM+HSbolfPvtt6Y45qizt3yY3woU9awvJ1my7hyltv6NvvDCC1ayjDOFNd+q0O9827ZtGSuacMRZQQSihYCEa7T0pNoRdAJcu9R5Bj1/hOlfysCRH+eloRxfXbqrIF/dMnDEyAocTZs0aZL5kaM6VuBrUsfAySv8AbZ8YB3jHK/5mpGBP77OKyQ4Cma+imbgD65jYDuslQcsXz3+iGdniM9TBjdeOgr3NJ6CCoUD9609d/6M8ar+Sbw5pwH+2uN6Znsw2uc42mrZP3Z6j3VpjgzyQck6KHqsQF9Tbi7BMGbMGOt2xsoWfCPg+KDD18QcxWTfUdBQEFvfJ0vAZRix4YKbcFjfZ36v6LJAdxaG9evXB1yCNaL73//+N9PmF9xVjpMPGay3I1ZhFHoWM97jQxzdZRisNxHmBxd/LHcGRnHinBWstZn5by8lJcW6nXFmeda/E0f/Xl/5cCkwuihYgW9lLPceCn3nwCX56MbAwAdRcmLg6LDzCL0ZoT8iEIEEckZgnVVlEQgJAf4AcYKTNTnJqgQFLQUkR7P4w8JRTf7g8Iea9xkcf7ysfNaZgpfbyHLUk6/7OTGL68Fy9IgjbJxQZQWO+PL1aNu2bc3RX2t0ia9DPQXWhyKGo3f8Qb3vvvvMOnLEiSO2/PHlyBHrMXbsWNAe/eg4AYyjZnzdyF3EKJApbjnqRxYU7lzJgILeejXsqR52xFUt3h48Zm4dbk7gOnXmSEBmdx5ZitGLr0ODMreaE7iK5asSkD1PmX/f+Dz+PrbKbZIOXeth8ndL3cYzgitccHMJPuhw+TOuZmG9CrZcCSwDHHVjn1qjndZ9nt2t4euYxtdrvh7nahV8QOLrdMdgLW/leM/Xa+vBkD6wzsF6OOPawlkFCngG54chBiD5AAA6LElEQVQ453yWGw7vc/TVEqOOLgbOD5NMawljXjsGO/hYPr/ky8lzjsLWsSxeWw8pvGbZ/LeqIAKRTkDCNdJ7UPXPVgL8IaOotDYdYOGcUUy/M/rdUSDwla312pa+fvRv4w5a7gLz87UqR2koSKxAEeL4mpj3OXrF16XW6CxfDfLVPyeBOAbnJbQYN3ToUFPkWPXkvUsuuQS33XabufQShStHoygOKI64ziYPBpbDERwG+vxStJKBtSh+yZIls024mpUw/rSu8BAaXNwNk4ytY+dv/9C67fd5yc4vsdRYfYBbx15R+Sm/7bjLuPf4WkNo/+vb6irdzqPLkJI6Bp42ZOCDU6dOncwRea46wYcljp7yAcdxC16ueEFfUY4KMvCBgw8i9Ht1dnlxVRdf73GlAE4csoQjR4y5pBW3KLbeHvhq0zk9hTgDX7s7B8dVFqwNM5zTWJ+9XVGArDliS5HIiVh0veC/E7oyWIGToZyD4yi5FWcXH0fxyXpZbkBWOY5nTytUOKbTtQhEEgEJ10jqLdU12wjwx9Zd4IgqZ/s6Bv6YcZIGD/6YcCSnYsWKFyxZ5JjH8ZoCkoKUI67MzxEha3KWlY7+dJUqVTLvczSWI23u/Fq5KxaD4w8868itYLlLE0e/OEPZWrbHKoNn/hDTj4+TXFgXvnq0lvux0vGVdNeuXc1RYcY7igYrTXacC+Yujf+rPTJDwG45ODugYs+fP4dJ657G0p3py2fVLpX+mjsgo/9kzkq0WmWMW9ETJfJXQ8UiraxbF5z5SphikCOt1qia9arcSsyHJYpWClYKXGudYfpF+ypcmSerwIcYilYKPS7hRuHKwC2CPQlXywUlK/uMp0jj63vHEU8rH2fyM/BtgKdRSCu9t2f6h/IVP9s3evRo898C/10w0O/d2+Wm/OXjXE/HSVmuBLJzen0WgWgjcFG0NUjtEYHsIGDNfnZVFn+4OVLjj5hjHvooOotWlsOZ/hSfdDugT5+zaOUrS7on0D+VvoD8AXdcf9WqK/NTKLsSrVYanmmfQtlZtFppWEdfxLmVLxjnhGJXoE+zWbiu5lvIm7NwwEXsOrocY5bcgC+Xdce+44H7Zq78+wdjNPdCn0R3FeVKA0fTMr9qd0x71VVXmX1IEUcxxb6mC4kV6M9I1w4GjhJaotWKdz5b3zdrHVgrnj61DM6T9Vy9YrcmOtElxRKtlh1XZ6tMazktV2mc79HlhcGaZOYYP3XqVPOjqxFQx3S+XvO1P915GPggQNHK0e1p06ZlrHVsRmbxx1c+7sxZk+r4b9jdv013eXVfBKKBgIRrNPSi2iACBgHOYuZMbY6G8geW/q+W8IgVQJdVeNBcfaD5Ja53OvOVA10HXp5ZDb9vHOpr1kzpXU3IypTgnw9njYUa0oxVqnbuW49P5//X7Ef2JQ9HX0o+wHDDCitwuSfHBxlLFDLeURhyZj1dXRgcJw5yeTQGxjn6vlpCka4pfNXNwNFOboPsHPjAxuA4mssJWtaMdke7TEfhxcA3DdYKFVmN7HLpNgYu/cXVC6zAiYLWqC7XW7YzcJUGPhzwYZBuNDw4gu3oluFNeb7yoU2+GXGcVMXRa2spsGjbSc0bhkojAiQg4arvgQhECQH637722mvm5CqKBGtGcZQ0z+tmFMhdEl1rf4DeTf5EpSKtvc7nKeFkw4/29dl1sMIYOfU1/LHxBew+usKrbH8lG7uW3ZB+PNBmSoY7B0fHrYl+liFrFJCfrSWwrDj6Nlr3OAP+uuuuMzcj4MQ/upkwWBPzeE13AgYKZH6PrKW2rJn0XO6Nr8S5YgDdT6zRXDPTP3+4ZikD/Z+5+QDL5ZsHKy1HKukLbgVrTWTeo3DmpCNriSwrjfOZ66NaqxRwMuA111xj+tHSd5eBvtjWjH/nvP5+5i5gDHxQID8eFPR8TU/x6Gr02czg9MdXPsxOdhT4bCdHkukzzEDfeU4KVRCBWCQg4RqLva42RyUB/vhzm1mOhllL4kRlQ71sVJViibi/2QzcUOsd5MsV+Gzq3UdX4rMlXfHFslvAiVbeBLoZcPJYViHOj/8Tc0IcxStdBOg64hy4/qglDjlZkJOyKPq4SgRn5VNIcr1PBopArnVKlwMGy4eUQpGrBFiBApZpHV/VWytmcJKftboF4z///HNT3HHFiqeeSp/s9v7771umzBUznnzySfMzBTN9VDnJzNpAwN3EIrrCJCUlmXWl+OYoJOvNUWe+vudoNIOV35pUaN78549VZyuNY5zzNddLtYQ9RSRX0KBAJj9O2LLa5mjLsu9oy1c+zMt2caMItpMj2fzMEWUydRxht8p2Ltfxs+O1Y710LQKRRiDOcIw/76nSnGzC/0kwZJHUTKM/IiACIhBuBI6f3mcKyLnb/hVOgdaxQ8IQtE/wvErAV8tuxZJd6buuBVJer0a/oXqJTheY4GgfX6978mGlSwDT0V/ZEnU0RAHLGeqWWOU9rkbB0XqO8DrfZ3qOulpLQjG9q0Dhydn/zn7Y3AGO9p0fqihaOQrM+1nZdi6PO4zxVTr9wrNbmFHQc5IcRz8d3TGc6+j82Rs+9EumqOeDCUewrVFyMs3udjrXX59FwF8CXHXEeusSiJ704znf3yornwiIgAiEhkD+XMXxn1rv4b6m01C56OW2VGLKhsF4bVZtYxes71zaW/X3T7aIVhrnZK19JzZeUA6FpyfRygyMtyb2ORqg2HMUp4yjbyxXtHB1nzP6vRGWHAmki4DjiCBt842As2jlfZZVq1Ytr2wzvWPgJElOEAymmONoJ7dEtsQjy+coqOUC4Q0Txzr7yoejqeyrUIhzx3rrWgTChYCEa7j0hOohAiIQdAIUrRSvFLEUs4GGv4/9hc+X3mgcNxkbC6zOZG7SBs+jsZkSZ/HhaNpujFv+785NWSRXtE0EODrKUU9OdORoJ7ekpZ8vxSddLxisyWc2FSkzIiACWRCQcM0CkKJFQASij0CL8vebqw+0Kv+ALY1bvvtbY/S1FqZsSDLt/bnpRew6sswW25aRzQdn4puVd1kfdc4GAhSo3FqVGz4wcPkx+vky0M+Vo67WFrfmTf0RAREIOgH5uAYdsQoQAREIZwIUhJxAtXF/si3VLJa/Cg4c34zzOGeLPWcjV1UbhnaV0yc1Ocfpc/AIcDky7kZ26tQpc6a/ry4CvtSMvrv0KeYEPOdd8Xyxo7QiEE4E7PJxTZ9+GU4tU11EQAREIBsJcMksLp01b/sIU8AeS9sTUOn7j1/oixqQQafMv617ytxZq27pG51i9DGYBDixjb672RHou+tpk5PsqIPKEIFwJSBXgXDtGdVLBEQgWwlw04LHWq8DNzEI98DJWjuPLA33aqp+IiACImA7AQlX25HKoAiIQKQS4Hax3DaW28dWNbaRDddw+uxxc6WBNOOsIAIiIAKxREDCNZZ6W20VARHwikDFIq1wT5Pf8X+1R6Jg7tJe5cnuRBxxHWeMvCqIgAiIQCwRkHCNpd5WW0VABHwi0OySe/BYm3VoXfEhn/JlV2KuZkCfVwUREAERiBUCEq6x0tNqpwiIgF8E8uSIR5cab+KB5nNQrXh7v2wEMxOX3lq445NgFiHbIiACIhA2BCRcw6YrVBEREIFwJlChcAvc3XgKbrx0FArlKRNWVeX6rpsPzAyrOqkyIiACIhAMAhKuwaAqmyIgAlFLoGm5XnjUWH2gTcUBYdVGrjTAHbYUREAERCCaCUi4RnPvqm0iIAJBIZA7RwF0rvE6WlXoGxT7/hjdf2Ijvta2sP6gUx4REIEIIiDhGkGdpaqKgAiED4GDJ7di/vYPw6dCRk3W7puEn1aH/zq0YQVNlREBEYgoAhKuEdVdqqwIiEC4EJi8/lmcOXcqXKqTUY/ZW9/BrK1vZXzWhQiIgAhEEwEJ12jqTbVFBEQgWwis2TsRKaljsqUsfwr5efVDYB0VREAERCDaCEi4RluPqj0iIAJBJ8DR1nAPnKy17/j6cK+m6icCIiACPhGQcPUJlxKLgAjEOoHpm1/F9sMLwx7DsbQ95rawYV9RVVAEREAEfCAg4eoDLCUVARGIbQKHTm5HJIy2Wr205eBsY1vYntZHnUVABEQg4gnkjPgWqAEiIAIikE0EJq8fhNPnTthSWr5zQJk070ztzA2c8DDMUOWkezsHNo7BolM50ahsD6BgRSC+kvvE+5YAaYfcx1sxdtnJXRgo3sCyeuH5yGbg6JYL7zvfyS47aQeBfUudS3f9uUxb1/d5NxR2itcHchdxX6ed09zHWTFZcbbS6SwCQSQg4RpEuDItAiIQPQS41NTC1NG2NKjoGeCJ7d6bGnExsDGv+/S9d7mPM2N2fQSkGEfjwcaR5D7x7P6ANwLGLjsUd12S3ddn7Wij3kPcx1sx2WVn7xJgQjurVM/ne8+7jw+Fnc5/AmUT3ddpvIc4x1zV7wASRzve0bUIZCsBD8/w2VoPFSYCIiACYU3AThcBjrZ6EqLBAnHkVFYKN1gly25UEOBIu6dR26hopBoR7gQ04hruPaT6iYAIhJzAjC2vY9uh+bbVI9V49c9RVE+v+B0Lo6uAp0Bb3oScacnocfYocuco6Dp5qzeBUwddxzne9eRuwHTe2slTxNHqhdfVexr+FIkX3ne+k112SjQAOHIZaAiFHZbpKXjTLvZ7Vn3vqQzFiYANBCRcbYAoEyIgAtFL4PCp1KBNyLJr1NVrO2lrzJUGetT/znWHefI3dZ3D9V277NgllOyyw9FGT6/bXdO48G642WEN7WjXhS3VHRGwnYBcBWxHKoMiIALRRIATstLOHouaJq3Y/T0mrnsiatqjhoiACMQWAQnX2OpvtVYERMAHAuv2TcGCHR/7kCMykiZveslolzFZS0EEREAEIoyAhGuEdZiqKwIikH0E7JyQlX219q6kb1fejU0HpnuXWKlEQAREIEwISLiGSUeoGiIgAuFFYOaW4dh6aG54Vcrm2nBb2COndtpsVeZEQAREIHgEJFyDx1aWRUAEIpQAl42avGFQhNbe+2ofOLFZ28J6j0spRUAEwoCAVhUIg05QFURABMKLQHyei/HcFYex88gypB5ZbBxL0o/DS3DyzMGAK8sNCBofTTeTUhA4EML/E9OP98e/HsANtd4NuF0yEMUE1o4Gjhi7mMUba7lymTIFEQgRgRD+7zJELVaxIiACIuAlgTLx9cCjMe7IyLH3+LoMEWuJWl9ft1O4dvhH/3Ipq1AKVzZszrb3UCJ/NbSu2D+jnd5cTNmQhKkbst7ZqkrRtujdNNmtSW/t0MBLHc+7tbNxfzJGLPRuZyu77PRu8ieqFEt0W6fHJ8e5jXOMsMtO+4TB6JCQ5Gjanus1o9N3VeMuZRKu9jCVFb8ISLj6hU2ZREAEYpUABR6PeqVvykBw6NQOpBqjsTsdRmf3Hd+QER8JF+PXDDDbVbPktZFQXdVRBEQgRglIuMZox6vZIiAC9hEonKccCpcsh1oOou/E6f2ZXAzobrDr6HL7Cg2CJU7WeqD5bEPAVvfKekLRRCAh66RF81XymMhbOx6NGJEshyOOgQZf7GTVNm/rY5cdk2WgAJRfBMKYQNx5I3iq35AhQ5CUlGQmySKpJzOKEwEREIGYJ3Dm3ElTzB7dPA61571h8hhVJifW5TF8B8IkVCjcAn0M8Rpn/KcQHQQsd4WsXDY8tnZ84r+uAl2SPSZVpAi4ItCuXTskJyebUYHoSY24uqKreyIgAiIQBAI5L8oLCkOUPGlYTxeudzeegp3xRf8dnTVGZncabgcnbJgE5k8TuATYOGPk9ZY6n/qTXXlEQAREIKgEJFyDilfGRUAERCBrAmXi6xuTwOq7nQS2859VDQ6fSs3amA0pFqV+Zvq7XlnF/ZJgC1NH4+CJLSiSryKalO1pQ6kyIQIiIAJZE5BwzZqRUoiACIhAthNwPwlssTEJ7J/luYxzsCaBcdcw1qH+xd1ctj1lx2hsPDANfP0s4eoSkW6KgAgEgYCEaxCgyqQIiIAIBIPAv5PAOmeYzzQJjILWcDOwaxIYJ2tRvJYr1DijPF2IgAiIQCgJSLiGkr7KFgEREIEACeTLVQwJxa4wD8uUNQmMItbaPIGjtGfOnbKSeHU+ey7N3FmrT7PZyJuzkFd5lEgEREAEgklAwjWYdGVbBERABFwRyFME4ELuDLy2OViTwMyJYA6203cCSxezlrvBidMHHFJceLn76EpzstbtDX64MFJ3REAERCCbCUi4ZjNwFScCIiACKN4ACMGSQv/uBHZ7RidwJ7B0EbvUcDNI397WeRLYyr9/xK9rH8M11V/OyKeLGCMQgu9rjBFWc70kIOHqJSglEwEREIFoJGBNAqvrsBPYYe4E9o+/rOVqMG3zK6a/a7NL7olGDGqTCIhAhBCQcI2QjlI1RUAERCC7CBQydgLjUbPEv9u/0qWAInbjgWRjJYHE7KqKyhEBERCBTAQkXDPh0AcREAEREAFXBPLlKmpMAGvnKsrWe0eOHEFKSgoaNWqEQoU0IcwOuL2b/GmayZvLfn9qO+onGyLgCwEJV19oKa0IiIAIiICtBJYtW4bp06dj2pzfsChlMTau+XeThSrVy6Bx00Zo0egK1K1bFx06dLC17FgxVqVYYqw0Ve2MAQISrjHQyWqiCIiACIQjgTGfjUa/hx7A+YtOoVS1syjTDGh2N1ChHrBzDY+dWLXmFyQPn4Q9W8/g7nt64sORn4RjU1QnERCBbCIg4ZpNoFWMCIiACEQTgd5NkwNqzv/d1gHffzkVV/QGmnS90FSZGsaKYcaRHs5g6UTgy0/GYMKv4/Hjd7+gefPmVqTOIiACMUTgohhqq5oqAiIgAuFB4MhmIGVI+sHrGArz5s1DXFwcFqyYivs/dy1aXeGofzXQd9x55C+3Dy1atMCQIQY/hewjsG+JMQQ+DeBZQQRCSEDCNYTwVbQIiECMEjCFa5IhXI0jxoRrmzatUcdwVe3+ChBf0vf+v/F54MahQFJSEjZs2OC7AeXwj8Ds/sD4RIBnBREIIQEJ1xDCV9EiIAIiEEsEOl3bFjnzncE1jwTW6ipNgQbGSl2db2gfmCHlFgERiDgCEq4R12WqsAiIgAhEHoERI0Zg8q/T0e5ee+rezvCN3bFzG5KGJNljUFZEQAQigoCEa0R0kyopAiIgApFL4OjRo3hu6CA07ALTTcCOluTKAyTeexZDkoZgypQpdpiMWhuPT44DjxELEqO2jWpY7BCQcI2dvlZLRUAERCAkBLhWa+r2PaZwtbMCtRKBoqVzY80aY+0sBREQgZggkDMmWqlGioAIiIAI2EqA27+ePHMIeXMWRtn4Bh5tU7gWK5cDJSqe9ZjOn8jC5dKwbNVCf7IqjwiIQAQSkHCNwE5TlUVABEQg1ATGr+6PjQemoUrRtshqTdfZ85JRpqb9opUMil0CrFy1NNQ4VL4IiEA2EZCrQDaBVjEiIAIiEKsE5s6dj5KVg9P6UgnAhnVbgmNcVkVABMKOgIRr2HWJKiQCIiAC0UNg+vTp2LxhmzkyGoxWFSgKHDpwLBimZVMERCAMCUi4hmGnqEoiIAIiEMkEjh8/jvfeew/16tVD27Zt0ahpPaT+FZwW/W3sQVApoWxwjMuqCIhA2BGQj2vYdYkqJAIiEPUEyiYC956PumYuWLAAo0ePxqhRo3Du3DnkzJkTDz/8MPIVyI0xPy4KSnv3Gl4C1WtUD4ptGXUg0CXZ4YMuRSB0BDTiGjr2KlkEREAEooLAJ598ghYtWqBZs2b46KOPkJaWhjNnzpji9fHHH0ejBk2xc3UcTh23v7kHtl+ERnVb2W9YFkVABMKSgEZcw7JbVCkREAERCG8CqRuO4c9vgbcmzsSpE9Nx/nz6CPKpU6fMiufOnRtPPfUUSpYsaboMnEk7j62LgWqX2deuw3uA/dvjUKNGDfuMypIIiEBYE5BwDevuUeVEQAREIPwIPProo3j11YXImRs4k+Z6mav8+fODo60MCQkJuLn7fzDlox9sFa7zxxk7cdWric6dO4cfpDCqUfuEwWZtiuarFEa1UlVEwD8CEq7+cVMuERABEYhZAq+88grmrP0cc37Z5ZJBjhw58MwzzyBv3rwZ8e+/M8qYRPUbfh52Atc9lXHb74vNhsvsop+BWbNGomDBgn7biYWMHRKSYqGZamOMEJCPa4x0tJopAiIgAnYS6PFMDVRpCuTIFXeB2dKlS5uTshwjihUrhvff+RCrpwFblznG+Hc97kng1TeHoVUr+bf6R1C5RCAyCUi4Rma/qdYiIAIiEFIC3C3rqXs/Ro64XMiTJ0+mugwenP5qOtNN48Ntt91muAx0xdhHgYM7nWO9+3z2DPBZ3xzmigU7tuzB1KlTvcuoVCIgAlFBQMI1KrpRjRABEYgoAmkHgZ3G0CMPXkdgeOONN3DXXXehX79+qFChAjgZi6F69eq499573bbo6y+/w/PDBmFkT2DJL26TuYxY8B3w2rVAoVyV8Oyzz2LOnDno0KEDihcvjh49euCrr77CoUOHXObVzQAJ7FuS/n3lWUEEQkhAwjWE8FW0CIhAjBLYa/z4j09MP3gdYWHQoEEYOHAgKF7p7zp+/HgULlzYbIW70VbHJj7z5HNYunQpUmdVxM9Dc2DHSuDUUccU/16fPQ1wk4FJw4EZn+TAs4MHYfXK9WAdKFy3bt2KYcOGmYKV4rVIkSLo1KkThg8fjnXr1v1rSFeBEZjdP/37yrOCCISQgCZnhRC+ihYBERCBSCPQt29fvPvuuxgzZgxuv/12s/pcjuqnn37CI488gltvvdWrJnFXreULN2PAw/0w9vUvsGv7fhQvfxGKlT+HUgnAvq3GUlfb4swjvnB+Y53Ylhgx43k0b948k/3y5cujd+/e5nHy5En8+uuv5vHSSy+hf//+aNiwIa655hrzkD9sJnT6IAIRSUDCNSK7TZUWAREQgewnQFH6/fffmyOszktQtWzZEk8//bTPlXrjtbfAY+3atUhJScHc+TOxaNEidGraGK36XI46deqgdu3aXtnlKgZdu3Y1D2aYOXMmfvnlF7O+L7zwAsqVK4drr702Q8jmypXLK7tKJAIiED4EJFzDpy9UExEQAREISwJHjhzBTTfdhGXLluGPP/5wO5OfI5v+BvrG8ujevbu/Ji7I17p1a/B48cUXsWbNmozR2BtuuMH0ybVGYnmmqI3WMGJhO7NpZQvWR5eab0ZrM9WuGCEgH9cY6Wg1UwREQAT8IbBlyxZceeWV2L59O37//Xe3otUf29mZh+4MAwYMwJQpU7B//358/PHHyJcvH7iZwiWXXGIKXPrKLlkSeT7HWXHcuD8ZPFKPRF/bsmq74qOPgIRr9PWpWiQCIiACthCgiGvfvr25kQBFa61atTLsUgRtPDAtIsVQ0aJFzaW5vvzySxw8eBCTJk1CkyZN8Mknn5g+sRz5pX8sRa4/YfPmzf5kUx4REAEvCEi4egFJSURABEQg1gjQJYAjrfQvpWjlpgKOYfzq/hixIBE8R3ro2LEj3nzzTXMVgsWLF+OOO+7AggULwPuWyP3iiy9w4MABr5pKv9rnnnvOq7RKJAIi4BsBCVffeCm1CIiACEQ9gW+//dYUrdddd525WkAsTWJq0KCBOcls1qxZpnvEyy+/jOPHj+POO+8Ed//iurEUuZxM5i78/PPP4LJgnMC2a5frbXHd5dV9ERABzwQkXD3zUawIiIAIxBSBDz/80JyIxXVa+eo8lgMnbN1zzz344YcfcOzYMfNcuXJlvPrqq6DPLEXuU089Za5eYHFiusmTJ5sfOVLNZb+4RJeCCIiAPQQkXO3hKCsiIAIiEPEEuPYpd70aOnQoXnvttYhvj50N4KgzVyMYOXKkORLLEVmOqE6cOBFt2rRB2bJlTZGblJSUUSzXld2zZ4+5BNfzzz+fcd+XCy4RtnOnn/vj+lKQ0opAhBDQclgR0lGqpgiIQBQRKJsI3Hs+rBr0+OOPg6/FublAnz59wqpu4VgZbmbAgyKfO3RZGx+MGjUKFLmnT5/OVG1uUTt//nx89NFHKFWqVKY468PRo0dBNw2K1dnzk7F8yWqcTjtjRl9crjgaN22I5o0uR926dU0RbeXLlnMrYxmtUweBPEWypTgVIgLuCEi4uiOj+yIgAiIQIwT4OpyC66uvvkK3bt1ipNX2NbNatWp46KGHzINbzh46dMil8alTp5qic/To0bj66qszpZk9ezb6DeiN5ctWo9gl51DUOFoYS9pyF7GTR4ydxLbtw9qtUzF/VDL2bD2DDp3a4b13RqJq1aqZ7ATtQ/EGQTMtwyLgCwEJV19oKa0IiIAIRBGB8+fPm/6s9MnkklCcRa/gP4Fp06a5Fa20StcBHtzwgK4DzzzzjFnY8LdeR/+HHkY9Q8veNRIolHkBB6cKncGmhcDvHySDgvmtt97Cgw8+6JRGH0UgeglIuEZv36plIiACIuCWwL59+0zRunHjRnO5q6ZNm7pNqwjvCNBdgNvO0k0gR44cuOii9Gkk586dw9mzZ83DsjRo0CDTJWDe/DnYmbob1xm75da83Ir1fK7cBLh71Hl8nwT069fP9LP1NAGsfcJg02DRfJU8G1asCEQAAQnXCOgkVVEEREAE7CRAn0xu4UphxZnvCQnG+2iFgAmsWLEC119/PQoVKpTpiI+Pz/jseH39fzqborXfd0Degr4X3zUJWD8X+H7wRIwdO9atm0eHBCOhgghECQEJ1yjpSDVDBEQgNggsW7YMixYtwtpVi7FuZQrWbdiMtVv+Ruli8ahdoxJq122MhFqNTN9H7nrlHObNm2eKVr5m5kQgLrCvYA8BbjzgbRgyZAiWLFqOGwb5J1qtcqq2ABLvBrp37w6OmushxCKjc7QSkHCN1p5Vu0RABKKOwGeffYaBDz2A82dOolrJ06hmTE7vVhuo2wHYc3Q/VqXux8o/F+Hbsbmx+e809O7dGx988EEGh99++80UrVdddRW++eabjPv+XHSp+SZOnj6IvLk0y9xXfnx4eOGF51HH6LfqrX3NfWH6ZjcB21YAfQfcjYk//3lhAt0RgSgiIOEaRZ2ppoiACEQIgTRjWaF9S9MrW7w+kDtr8dfrv13x8Rc/4PnrgWeuyaqdaXjH0C/9PxyBieO/x7jvx2PDhg247bbbzHVaR4wYkZWBLOPLxmuWeZaQ3CQYNuwFlK4ah2secZPAj9uX9wS+fnQWOJLLXbsURCBaCUi4RmvPql0iIALhS2DvEmBCu/T6dTYUJtd1dRM4OteiRQvkzw3MfQJoXtlNQqfbfQ3zHYzR2P/7YI+Zn9FPPPEEXnzxRaeU+pjdBGbMSkaT7unrs9pVdknje1Gx8WmsWGV8t4IRxicCO6cBZdoCXZKDUYJsioBXBLRzlleYlEgEREAEQkPg8jat0agCcOxt70WrVdMaxrJKK4zBt6Qu6XfuvttwhlQIKQH6KB/YdwQlKtlfjWLlgeWG77OCCEQzAQnXaO5dtU0ERCCiCdzQ5SqknT6DOcZIayBhcGfgiprATdcbTpUKISWwdGm6i0jJSvZXo0RFYPP6HfYblkURCCMCEq5h1BmqigiIgAhYBOiH+tOESXjtRsMFNod11//zF72M0dfVmzEkKcl/I8oZMIG5c+eisDESnjc+YFMXGOAuW6dOnjH9mR0jRyxsBx7jV/d3vK1rEYhIAhKuEdltqrQIiEA0Ezhx4gSGDR2Ce9oAA20aJL24EPDR7eeRZEze4U5ZCqEhsG3bNuQvEheUwgsWSze7d+/eTPY37k8Gj9QjQfJ/zVSaPohAcAlIuAaXr6yLgAiIgM8EuJvV1u070ceYB2Nn6NECuKR4LixZIgFjJ1dfbDVr1gz7tp33JYvXafdsSk/KNXoVRCBaCUi4RmvPql0iIAIRS2DTpk3IkzMOtcrY34RqJU5j8fzpARsesSARj0+OA88K3hNo2LAh0o4DB3d6n8fblHs2A8VKFkSxYv8MvXqbUelEIIIISLhGUGepqiIgArFBYMPaVah58XlDvNrf3uqGf+XixZp5bj9Z7yzWrVvXTLhvq3fpfUm1fxtQpWolX7IorQhEHAEJ14jrMlVYBEQg2gmsXrHIEK7BaeWlZYE1G1Nx6NCh4BQgqx4JVKhQARUql0EwhOuh1JyoX7uZx/IVKQKRTkDCNdJ7UPUXARGIOgKr164PipsAQdUtl45Lo66h+9pc3/lGLPgmB/hq366wYR6wds4ZtG2baJdJ2RGBsCQg4RqW3aJKiYAIRDWBsonAvcYEHR68dgq1a9XEylSnmzZ9XGK8TmZo3Lhx+oX+ZjuBt956C8WLlcTkt+wp+uxpYOJrceg34H706NHDHqPOVlq9CXCXN54VRCCEBCRcQwhfRYuACIiAKwK1G7TAqiBM3mFZy4316evWqID4+CAsJOqqMbrnksAH736MHSuB6Z+4jPbp5nhjF9/cOQpg+Ovv+ZTPp8TFG6Q/ZPGsIAIhJCDhGkL4KloEREAEXBGoWrWaOeJ66ISr2MDurdlljLY2kR9kYBQDz3311VfjwQcfxNyxwOpp/tv77Q3DRWAWMO1P44+CCMQAgSDMWY0BamqiCIiACASRQEKCsQWSEegu0Cr90rbS1vydE91atLXNngz5T4AuA+fPn8c7w97B9hVA215Arrze2duyBKBoPWQ8iNBOvXr13GasUjS9v8vGa7TULSRFRAwBCdeI6SpVVAREIFYIVK1aFW1aNcczP83DHwPta/XQXw1bOfPi8ssvt8+oLAVE4O2330b37t3R9aYu+GrlYbTscQYXVwPiS7g2u387sH4OkDwKqF03AQtn/AZ+XzyF3k2TPUUrTgQiioCEa0R1lyorAiIQKwSefGYwrrnmGvT5Enjv1sBbPW8TMOgn4NNP3/M4Ohd4SbLgK4FWrVph/ZotePTRR/FB0gdm9kKl4lDskvMoWQU4eRjGblvA/m1xOHn0vBn/5JNPYtiwYb4WpfQiEPEEJFwjvgvVABEQgWgkYPlAckTujpZA88qBtfKK14G7bu1i26zzLjXfxMnTB5E3V5HAKqbcJoGCBQvi/fffx4ABA5CSkmIeE3+bgMU/bUClypegTdOGaNm7LerUqQNuYlCqVCmRE4GYJCDhGpPdrkaLgAiElEDaQWMIbWl6FYrXN6aEuxZ/9F2c+Ot4tPjfZmwYauyKVNL3WqedBeo/BxxPAz764mffDbjJIX9JN2ACvJ2amopZs2bhyy+/xIEDB7B8+XJTrAZoVtlFIGoISLhGTVeqISIgAhFDYO8SYEK79OpybUwXa7labVm3fhNat2yChGdS8MFtQG8f3FNfnwI8/K2hi3PlRN++95m+kHQ/uPLKK9G+fXsUKFDAKsaW84ETm5GSOsYrW43L3oGi+Sq5TTt1wxC3cY4R4WaHE6GqFEt0rGKm64Wpo3HwxJZM99YuT8X0iavw+0/LsHf3YeTKzZ/mODz11FNuRasrO5mM/vOhSL6KaFK2p6so3+6NTwR2TgPKGBO9uiT7llepRcBGAhKuNsKUKREQAREIBoGZcxbis88+w+23344pfwED2sPcEra4C9155lz6agTvJgMfzgCuv7Y9fpxgKFgjdOzYESNHjgTdDxhat25t+tFSyDZrFvgSWRSuUzYkmbaz+kOB50m4Rqqd9gmDPQrXlB2jsfHANHPL1zVG/6z6w/Bd3W7MmcsNnDFGxRlOp51B8VLxeO45Y6jcTbDsuInOuE3OtgjXDIu6EIHQEpBwDS1/lS4CIiACXhHgjkj169fHowMeQOuXZ5p56DpQ3XB1rFMO2HvEEKzGpgWrdsbh2KnzqHBJaXzwwRD07t07w/67776baQRv5syZWLhwoTmyV7Ro0QwRy9HY8uXLZ+TThT0EDh48iKlfbseMX4Dd6wyxmssQq6fTbVui1Srp7sfbI0eOHNZHnUVABP4hEGesIZc+RdENkiFDhiApKcmMzSKpGwu6LQIiIAIikIlAarLXrgKZ8v3zYfHixViyZAkWL5yLJYsWYPHy1ah4SRm0aNkSTZq3NlcN4LqenPDjHDji6ihmHeMplOLi4nDmzBlwpjt9LRXsJcAHhZZGP5Gxq5ArVy60a9cOkyZNchUduntyFQgd+ygpmd/r5ORkszWB6EmNuEbJF0LNEAERiB0CDRs2BI8777zT50bfe++9+OOPP/DTTz/h5MmTmfKfPWvM5PonvPPOO9alzjYSaNKkien2wbVbXYXTp0/jxReNPVwVREAEXBLQlq8useimCIiACEQvAa5WkC9fPrcNZDyFsUJwCHTr1g333XffBcbz5MljruXaqFGjC+J0QwREIJ2AhKu+CSIgAiIQYwS4BijXDHUOFE5VqlRBnz59nKP02UYCX331FUaNGoWKFSsid25jVtY/oVChQh4nZFnpdBaBWCYg4RrLva+2i4AIxCyBW265xfR1pVi1AkUU3QcaNGiQ4YtmxelsD4Hnn38et956KwYOHIjNmzeb/shWH7z00kvImzevPQXJighEKQEJ1yjtWDVLBERABLIiMHz4cJQuXToj2ejRo80F77kzEydSDB06NCNOF4ET4HJmzz77rLkkGUUqw9ixY5E/f34kJib65bMceK1kQQQii4AmZ0VWf6m2IiACImAbAY70jRgxAtxelluNdu3a1bTNXZu4xmvfvn3BJbM4Uatq1aq2lRtrhtavX2+uwbt9+3ZzYhwfCqyQkJAAug4ULlzYuqWzCIiABwIacfUAR1EiIAIiEBQCJRoA3DGLB69DGK666io89thjeP311zPVgn6uS5cuxYkTJ8z1Yz/55JNM8frgHYGJEyeaS4vRBYDLizmKVstCp06d0KJFC+tjeJ5bvZn+feVZQQRCSEDCNYTwVbQIiECMEshdJH2bV271yusQB+u1tXM16DIwbdo09O/fH3fddRd69eplClnndPrsmgB3KOMWuzfccIM50hrRmzoUb5D+neVZQQRCSEDCNYTwVbQIiIAIRAKBF154ARw5pIjlxK3JkydHQrVDWseHHnoI/fr1Ax8KuOmDggiIgD0EJFzt4SgrIiACIhDVBOhSwN26uOsTX21zkpHChQT27t2La6+9Fh9//DG+++470w3jwlS6IwIi4C8BCVd/ySmfCIiACMQYAW4hy5UHPvzwQ7zyyiu44oorsGrVqhij4L65c+fOxWWXXYbU1FTMnj07Y7Kb+xyKEQER8JWAhKuvxJReBERABGKcwN13322OvubIkcOcuMWVCWI9fP7556ZorV+/vila6R+sIAIiYD8BCVf7mcqiCIiACEQ9gRo1amDKlCl45plnzO1Le/TogUOHDkV9u101MCkpCWz/448/jnHjxnncTtdVft0TARHwnoCEq/eslFIEREAERMCJwODBg/H7778jJSXFnLg1YcIEpxTR+/Hs2bP473//iyFDhuCjjz7CsGHDorexapkIhAkBCdcw6QhVQwREIIYIpCYDI+PSD15HeKCvKydudejQAV26dMETTzwR4S3Kuvpr1qwxXQO4NitXW+ByYVEdZvcHJhgbJ/CsIAIhJCDhGkL4KloEREAEooVA7ty5zWWfPv30U7z//vvmzlsUs9EYfvnlF3NTgfj4eHNTgcsvvzwam5m5TfuMvkxNBnhWEIEQEpBwDSF8FS0CIiAC0UaAvp4UrNzCtGHDhuAi/NEUhg8fjs6dO+Pmm282fXzLli0bTc1TW0Qg7AlIuIZ9F6mCIiACIhBZBCpXrgyOSr744ovmIvy33HIL9uzZE1mNcFHbvn37mruIvfrqq+aososkuiUCIhBkAhKuQQYs8yIgAiIQqwTo6zpjxgzQH5Q7bv3www8RiWL37t3gBgxc8urHH3/Eww8/HJHtUKVFIBoISLhGQy+qDSIgAiIQpgRat25tug785z//MRfkHzhwYJjW1HW1OPmKmwpwRyxeX3/99a4T6q4IiEC2EJBwzRbMKkQEREAEYpvAO++8g7Fjx+Kzzz5Ds2bNMH/+/LAHMmbMGHOSWZMmTcxNBS699NKwr7MqKALRTkDCNdp7WO0TAREQgTAhQF9XTtwqV64cmjdvjtdeey1ManZhNQYNGoSePXvi6aefNgU3V01QEAERCD0BCdfQ94FqIAIiIAIxQ4Cilb6uFK2PPPII6EKwY8eOsGl/WloaunfvjqFDh2L06NHmOWwqp4qIgAhAwlVfAhEQAREQgWwnQF/XefPmmaKVE7e+/vrrbK+Dc4GrVq0y/VnpxjBz5kzccccdzkn0WQREIMQEJFxD3AEqXgREIAYJlGgAdP4z/eB1jAbL1/X2229Ht27dwOWmQhV+/vlnU7QWL17c9GflhCwFBwI1egKNBwM8K4hACAlIuIYQvooWARGIUQK5iwBlE9MPXsd4oNvA999/b7oQcPSVo53ZGV5//XVztYDbbrsNv/32G0qXLp2dxUdGWdV7GsI1CeBZQQRCSEDCNYTwVbQIiIAIiEA6Afq6cuJWjRo10KZNG3PzAl/YHDx40FypYNOmTb5kQ58+fcx1Wd944w1w5QMFERCB8CaQM7yrp9qJgAiIgAjECoGSJUuavq4Urg8++KA58koxyZ24nMPs2bPx1VdfYc3S2Vi9bgO27TqUkaRA3lyoXa08alzaEFVr1sXgwcYrbqewc+dO04c1JSUF48ePN7dxdUqijyIgAmFIQCOuYdgpqpIIiIAIxDIB+rouXrwYhw4dMnfc4tqvjuGD9981/VHXJI9EjZyL8FjiIcx4FDjyFrDxBWBsr9PoWnUjzm/6Dq++NBQtGl9qTgSzbEyfPh2tWrUy7XNTgc6dO1tROouACIQ5AY24hnkHqXoiIAIiEIsELF/XJ598Epy8xa1j//e//6HbDe0xZcZifHQ7cNdlaRegKZgHqFzCmPdWLz1q7sYzePz7VWjRogWSkpJQvnx59OrVC7feeiu4wUDOnPoZvACibohAGBPQiGsYd46qJgIiIAKxTuDFF1/EhAkTMHHiRHDGP/Yuxh5j34K7vJz036IKMO0RIKmLcRjC9d5778Wzzz6LL774QqI11r9can9EEpBwjchuU6VFQAREIHYIXHvttTh98gjubg1M7g+UKOh72wcb3gDnRwBnz5413Qx8t6AcIiAC4UBAwjUcekF1EAERiC0CqcnAyLj0g9cKHgnc1eMm5MdhfNjDYzKvIil8O3XqhN27d3uVXon+ITDbADehHcCzggiEkICEawjhq2gREAEREAHPBL755ht88vm3hmg97zmhl7EdagGPdjSOB+/0MoeSmQT2LQFSkwGeFUQghAQkXEMIX0WLgAiIgAh4JvDmq8PwUlfgypqe0/kS+8y1wPLFc81ND3zJp7QiIAKhJyDhGvo+UA1EQAREQARcEDh58iSWLFuJGxq4iAzgVqG8QINSB7A4ZV4AVpRVBEQgFAQkXENBXWWKgAiIgAhkSWD58uXIk/M8qgdhB9bqpYAl8/7Msg5KIAIiEF4EJFzDqz9UGxEQAREQgX8IrFixAjVLnwsKj9plgcXL1wTFtoyKgAgEj4CEa/DYyrIIiIAIiEAABObPmRE04Vq3HLDj78PYsWNHADVUVhEQgewmIOGa3cRVngiIgAiIgFcEUlNT/Vqz1RvjpeLTU+3atcub5EojAiIQJgQkXMOkI1QNERABERCBzARaXnY5Vu7MfM+uT0u2Ablz5UDjxo3tMik7IiAC2UBAwjUbIKsIERABERAB3wk0atwEq3bl8j2jFzkoXBtfWtGLlEoiAiIQTgRyhlNlVBcREAERiAkC8ZUM1TQ4vam8VnBJoH79+ti85zRSDwFlC7tM4vfNNcbGWY2bNvc7f8xlrNHT6IREQN/XmOv6cGuwhGu49YjqIwIiEP0ETOGaFP3tDLCFpUuXRr1alTF34yZ0bRigMafsa/bH49ZWxhZaCt4RqN7Tu3RKJQJBJiBXgSADlnkREAEREAH/Cdze6wH0/zY3Dhz334ZzzneTgWMXlcKNN97oHKXPIiACYU5AwjXMO0jVEwEREIFYJvDwww+jevWauGWkPRTWGi4Cfb8CXnnnUxQsWNAeo7IiAiKQbQQkXLMNtQoSAREQARHwh8C48X9iyl/A+9P8yZ05T7vXgLdfegqtWrXKHKFPIiACEUFAwjUiukmVFAEREIHYJVCsWDF8/vnn6PMl8MZU/zhwFYHSjwA5CpRE38de8M+IcomACIScgIRryLtAFRABERABEciKwG233YbJkydj4DcAR00XG0LU29BvLNBwKFC5Wm1s3fG3t9mUTgREIAwJSLiGYaeoSiIgAiIgAhcS6NChA44cOYKaza5CI0OIDpkATFgGHHQxcYsbF3w0C2j9CjBiZg4kDR6EuSkrLzSqOyIgAhFFQMthRVR3qbIiIAJRQWDfEmDOgPSmtHwDKN4gKpqVHY3ghKr3P5uI+pd9gPeGv4Sk8ZvNYhsbewm0qQrsOGig3ZQT2/efQcliBdHqsjaY/vlgNG+uNVsD6p/Z/YH9S4Fi9YFWbwZkSplFIBACEq6B0FNeERABEfCHwClDXaUmp+fktYLPBO677z7wSE1NxaJFi5CSkoKUhQtQvkZFDHuoBbh5Qb169Xy2qwxuCPBha6cxO+78eTcJdFsEsoeAhGv2cFYpIiACIiACQSBQtmxZ8OjcuXMQrMukCIhAuBGQj2u49YjqIwIiIAIiIAIiIAIi4JKAhKtLLLopAiIgAiIgAiIgAiIQbgQkXMOtR1QfERABERABERABERABlwQkXF1i0U0REAEREAEREAEREIFwIyDhGm49ovqIgAiIgAiIgAiIgAi4JKBVBVxi0U0REAERCAMCqcnpSxB5U5XGg92nOrIZWDvGfbxjjF12qt8BxFdytJz5OmVI5s/uPoWbnTJtgbKJ7mprcB4NHNniPt6KsctOfEWgek/L6oXn1GTvvkNZ2bnQsu6IQEgISLiGBLsKFQEREAGDQO7CQJ4i7lHsTAa8FXhZCc6UJPflOMbYZYfCzKNw9bI+4WaHfDwJ1zWjvROKdtkhH0/C1dvvUFZ2PH1PHb8/uhaBIBOQq0CQAcu8CIiACFxAoEQDoKAxUsYds7Rr1gV4dCMMCVjf00o3hGHlVKVYIhB33gieGjxkyBAkJSWZSbJI6smM4kRABERABERABERABGKUQLt27ZCcnGy2PhA9qRHXGP0CqdkiIAIiIAIiIAIiEGkEJFwjrcdUXxEQAREQAREQARGIUQISrjHa8Wq2CIiACIiACIiACEQaAQnXSOsx1VcEREAEREAEREAEYpSAhGuMdryaLQIiIAIiIAIiIAKRRkDCNdJ6TPUVAREQAREQAREQgRglIOEaox2vZouACIiACIiACIhApBGQcI20HlN9RUAEREAEREAERCBGCUi4xmjHq9kiIAIiIAIiIAIiEGkEJFwjrcdUXxEQAREQAREQARGIUQISrjHa8Wq2CIiACIiACIiACEQaAQnXSOsx1VcEREAEREAEREAEYpSAhGuMdryaLQIiIAIiIAIiIAKRRiCnLxVu166dL8mVVgREQAREQAREQAREQARsI+CTcE1OTratYBkSAREQAREQAREQAREQAV8IZClcmzVrhmuuuQbHjx/3xa7SioAIiIAIiIAIiIAIiEAmAomJiZk++/oh7rwRfM2k9CIgAiIgAiIgAiIgAiKQ3QQ0OSu7ias8ERABERABERABERABvwhIuPqFTZlEQAREQAREQAREQASym4CEa3YTV3kiIAIiIAIiIAIiIAJ+EZBw9QubMomACIiACIiACIiACGQ3AQnX7Cau8kRABERABERABERABPwiIOHqFzZlEgEREAEREAEREAERyG4CEq7ZTVzliYAIiIAIiIAIiIAI+EVAwtUvbMokAiIgAiIgAiIgAiKQ3QQkXLObuMoTAREQAREQAREQARHwi8D/A/mgxd+tmkzfAAAAAElFTkSuQmCC" + } + }, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tutorial I: The basic functionality\n", + "\n", + "In this first part of the Neural Graphs (NGs) tutorial we will focus on a simple example: training TaylorNet module to approximate a sine wave function. We will build a simple \"model graph\" and show how we can nest it into another graphs.\n", + "\n", + "![neural_graphs_nesting.png](attachment:neural_graphs_nesting.png)\n", + "\n", + "#### This part covers the following:\n", + " * how to create a Neural Graph object\n", + " * how to activate/deactivate graph context (in various ways)\n", + " * how to bind NG inputs and outpus (in various ways)\n", + " * how to nest one graph (representing the our \"trainable model\") into training and validation graphs\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Instantiate the necessary neural modules.\n", + "dl_training = RealFunctionDataLayer(n=10000, batch_size=32)\n", + "dl_validation = RealFunctionDataLayer(n=10000, batch_size=32)\n", + "tn = TaylorNet(dim=4)\n", + "loss = MSELoss()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Build the \"model\"graph.\n", + "simple_model = NeuralGraph(operation_mode=OperationMode.both)\n", + "\n", + "# Activate the \"graph context\".\n", + "simple_model.activate() \n", + "\n", + "# Create bound input port by copying the definition from input port \"x\" of TaylorNet.\n", + "simple_model.inputs[\"input\"] = tn.input_ports[\"x\"]\n", + "# Bind the \"x\" input, so that \"x\" of graph will \"lead\" to input port \"x\" of TaylorNet.\n", + "_ = tn(x=simple_model.inputs[\"input\"])\n", + "# Add the module for the second time, also binding the port.\n", + "_ = tn(x=simple_model.inputs[\"input\"])\n", + "# All outputs will be bound by default.\n", + "\n", + "# Deactivate the graph context.\n", + "simple_model.deactivate()\n", + "\n", + "# Let us see what the graph looks like.\n", + "logging.info(simple_model.summary())\n", + "# Please note that the graph is NOT COMPLETE, as it:\n", + "# * doesn't contain a DataLayer, and\n", + "# * has bound input ports that need to be connected." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# And how about a \"model graph\" with an arbitrary graph with a loop?\n", + "\n", + "# Create a new graph instance.\n", + "simple_model = NeuralGraph(operation_mode=OperationMode.both)\n", + "\n", + "# Activate the new \"graph context\" using the \"with\" statement.\n", + "with simple_model:\n", + " # As this time we decided to stay with the original port name \"x\", we can use the \"default input binding\".\n", + " embeddings = tn(x=simple_model)\n", + " # Now create a loop and pass them back as inputs to TaylorNet instance.\n", + " prediction = tn(x=embeddings)\n", + " # Moreover, we are interested only in the second output, so we must \"manually bind\" it.\n", + " simple_model.outputs[\"prediction\"] = prediction\n", + "# Ending \"with\" closes the \"graph context\".\n", + " \n", + "# Ok, let us see what the graph looks like now.\n", + "logging.info(simple_model.summary())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Anyway, for the rest of the example let's create a simple \"model graph\" wrapping just one module.\n", + "\n", + "# Create a new graph and open it's context in a single line.\n", + "with NeuralGraph(operation_mode=OperationMode.both) as simple_model:\n", + " # As this time we decided to stay with the original port name \"x\", we can use the \"default input binding\".\n", + " prediction = tn(x=simple_model)\n", + " # Moreover, we are interested only in the second output, so we must \"manually bind\" it.\n", + " simple_model.outputs[\"prediction\"] = prediction\n", + " \n", + "# Ok, let us see what the graph looks like now.\n", + "logging.info(simple_model.summary())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Let us now compose a COMPLETE training graph.\n", + "# In particular, we will \"nest\" our \"model graph\" into this new training graph.\n", + "with NeuralGraph(operation_mode=OperationMode.training) as training_graph:\n", + " # Take outputs from the training DL.\n", + " x, t = dl_training()\n", + " # Pass them to \"inner\" graph (nest!).\n", + " p = simple_model(x=x)\n", + " # Pass both of them to loss.\n", + " lss = loss(predictions=p, target=t)\n", + " # We will use \"loss\" as output during training, so we must \"manually bind\" it.\n", + " training_graph.outputs[\"loss\"] = lss\n", + " \n", + "# Ok, let us see what the graph looks like now.\n", + "logging.info(training_graph.summary())\n", + "# In the following plaese note that:\n", + "# * during nesting the graph was flattened - 3 modules, 4 steps\n", + "# * the input passed to \"simple_model\" bound input port were passed to the actual input of TaylorNet\n", + "# * the graph is COMPLETE, i.e. there are no inputs that are bound and there is a single datalayer\n", + "# So in short: we can execute it!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Let us compose a COMPLETE validation graph.\n", + "with NeuralGraph(operation_mode=OperationMode.evaluation) as validation_graph:\n", + " # Take outputs from the training DL.\n", + " x_valid, t_valid = dl_validation()\n", + " # Pass them to the trainable module.\n", + " p_valid = simple_model(x=x_valid)\n", + " loss_valid = loss(predictions=p_valid, target=t_valid)\n", + "\n", + "# Ok, let us see what the graph looks like now.\n", + "logging.info(validation_graph.summary())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create training callback logging loss to console.\n", + "train_callback = SimpleLossLoggerCallback(\n", + " tensors=[lss], print_func=lambda x: logging.info(f'Train Loss: {str(x[0].item())}')\n", + ")\n", + "\n", + "# Create evaluator callback logging/aggregating the validation loss to console.\n", + "def batch_loss_per_batch_callback(tensors, global_vars):\n", + " if \"batch_loss\" not in global_vars.keys():\n", + " global_vars[\"batch_loss\"] = []\n", + " for key, value in tensors.items():\n", + " if key.startswith(\"loss\"):\n", + " global_vars[\"batch_loss\"].append(torch.mean(torch.stack(value)))\n", + "\n", + "\n", + "def batch_loss_epoch_finished_callback(global_vars):\n", + " epoch_loss = torch.max(torch.tensor(global_vars[\"batch_loss\"]))\n", + " logging.info(\"Evaluation Loss: {0}\".format(epoch_loss))\n", + " return dict({\"Evaluation Loss\": epoch_loss})\n", + "\n", + "\n", + "eval_callback = EvaluatorCallback(\n", + " eval_tensors=[loss_valid],\n", + " user_iter_callback=batch_loss_per_batch_callback,\n", + " user_epochs_done_callback=batch_loss_epoch_finished_callback,\n", + " eval_step=100,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Invoke the \"train\" action.\n", + "nf.reset_trainer() # I do not understand why do I have to \"reset the trainer\" when calling train() function again :]\n", + "nf.train(\n", + " training_graph=training_graph,\n", + " callbacks=[train_callback, eval_callback],\n", + " optimization_params={\"num_epochs\": 3, \"lr\": 0.0003},\n", + " optimizer=\"sgd\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "nemo-env", + "language": "python", + "name": "nemo-env" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/nlp/dialogue_state_tracking/data/dialogue_augmentation_for_sgd_format.py b/examples/nlp/dialogue_state_tracking/data/dialogue_augmentation_for_sgd_format.py new file mode 100644 index 000000000000..bc6ccff0ec24 --- /dev/null +++ b/examples/nlp/dialogue_state_tracking/data/dialogue_augmentation_for_sgd_format.py @@ -0,0 +1,521 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +import argparse +import copy +import json +import os +import random +import re +from collections import defaultdict +from pprint import pprint + +import inflect +import numpy as np +from tqdm import tqdm + +p = inflect.engine() + + +def get_ontology(dialogues, schemas): + """ + creates ontology: + (service_name, slot_name) -> + -> is_categorical -> True/False + -> possible_values -> set of values + """ + ontology = defaultdict(defaultdict) + for schema in schemas: + service_name = schema['service_name'] + for slot in schema['slots']: + slot_name = slot['name'] + ontology[(service_name, slot_name)]["is_categorical"] = slot['is_categorical'] + ontology[(service_name, slot_name)]["possible_values"] = set(slot['possible_values']) + + for dialogue in dialogues: + for turn in dialogue["turns"]: + for frame in turn["frames"]: + service_name = frame["service"] + if "state" in frame: + for k, vs in frame["state"]["slot_values"].items(): + for v in vs: + ontology[(service_name, k)]["possible_values"].add(v) + if "actions" in frame: + for action in frame["actions"]: + k = action["slot"] + for v in action["values"]: + if (service_name, k) in ontology: + # some slots like 'count' are not in schema + ontology[(service_name, k)]["possible_values"].add(v) + return ontology + + +def get_affected_future_frames(dialogue, from_turn_id, slot_name, slot_value, service): + """ + determine for all turns starting from from_turn_id if they contain the given combination of slot_name, slot_value, service + if so, return affected List[(turn_id, frame_id, slot_name)] + """ + assert isinstance(from_turn_id, int) + assert isinstance(slot_name, str) + assert isinstance(slot_value, str) + assert isinstance(service, str) + res = [] + for turn_id, turn in enumerate(dialogue["turns"][from_turn_id:], start=from_turn_id): + for frame_id, frame in enumerate(turn["frames"]): + if turn["speaker"] == "SYSTEM": + if frame["service"] == service: + for action in frame["actions"]: + if action["slot"] == slot_name and slot_value in action["values"]: + res.append((turn_id, frame_id, slot_name)) + continue + else: + if frame["service"] == service and slot_value in frame["state"]["slot_values"].get(slot_name, []): + res.append((turn_id, frame_id, slot_name)) + continue + return res + + +def augment_dialog_by_auxiliary_entries(dialogue): + """ + augments dialogue by slot_to_span and state_update. + slot_to_span (dict): slotname-> value-> [start_idx, end_idx] for all values in turn that appear exactly once in utterance. + state_update (dict): slotname-> [(turn_id, frame_id, slot_name)] only contains newly introduced slotnames. + New for system are all slots in "actions". + New for user are all slots who did not appear in previous turn or whose (list of) value has changed. + Returns list of following affected turns/frames. + + """ + prev_service_user = "" + prev_state_slots_user = {} # key, value + for turn_id, turn in enumerate(dialogue["turns"]): + for frame in turn["frames"]: + slot_to_spans = defaultdict(dict) + for slot in frame["slots"]: + k = slot["slot"] + start_idx, end_idx = slot["start"], slot["exclusive_end"] + slot_to_spans[k][turn["utterance"][start_idx:end_idx]] = [start_idx, end_idx] + frame["slot_to_span"] = slot_to_spans + + if turn["speaker"] == "SYSTEM": + for frame in turn["frames"]: + new_slots = defaultdict(list) + for action in frame["actions"]: + slot = action["slot"] + slot_values = action["values"] + for v in slot_values: + new_slots[slot] = get_affected_future_frames( + dialogue, turn_id + 1, slot_name=slot, slot_value=v, service=frame["service"] + ) + if v in turn["utterance"]: + if slot not in frame["slot_to_span"] or v not in frame["slot_to_span"][slot]: + if len(turn["utterance"].split(v)) == 2: + start_idx = turn["utterance"].index(v) + end_idx = start_idx + len(v) + frame["slot_to_span"][slot][v] = [start_idx, end_idx] + frame["state_update"] = new_slots + else: + for frame in turn["frames"]: + new_slots = defaultdict(list) # map from slot_value -> List[frames] in future + for k, vs in frame["state"]["slot_values"].items(): + for v_id, v in enumerate(vs): + if v in turn["utterance"]: + if k not in frame["slot_to_span"] or v not in frame["slot_to_span"][k]: + if len(turn["utterance"].split(v)) == 2: + start_idx = turn["utterance"].index(v) + end_idx = start_idx + len(v) + frame["slot_to_span"][k][v] = [start_idx, end_idx] + if k not in prev_state_slots_user or v not in prev_state_slots_user[k]: + new_slots[k] = get_affected_future_frames( + dialogue, turn_id + 1, slot_name=k, slot_value=v, service=frame["service"] + ) + frame["state_update"] = new_slots + + if len(turn["frames"]) == 1: + use_frame = turn["frames"][0] + else: + use_frame = [frame for frame in turn["frames"] if frame["service"] != prev_service_user][0] + prev_service_user = use_frame["service"] + prev_state_slots_user = use_frame["state"]["slot_values"] + + +def validate(dialogue): + """ + check if dialogue is valid wrt to non categorical slots: + -check if span indices are within utterance length + -check if utterance substring (by span) is found among values in system action + -check if utterance substring (by span) is found among values in user state->slot_values->key + Otherwise raise error with turn id and frame id + """ + for turn_id, turn in enumerate(dialogue["turns"]): + for frame_id, frame in enumerate(turn["frames"]): + for slot in frame["slots"]: + try: + st_idx, end_idx, key = slot["start"], slot["exclusive_end"], slot["slot"] + word = turn["utterance"][st_idx:end_idx] + assert 0 <= st_idx < end_idx <= len(turn["utterance"]) + if turn["speaker"] == "SYSTEM": + found_key = False + for action in frame["actions"]: + if action["slot"] == key: + if word in action["values"]: + found_key = True + assert found_key + else: + if key in frame["state"]["slot_values"]: + assert word in frame["state"]["slot_values"][key] + except Exception: + raise ValueError(f"Turn {turn_id}, frame {frame_id}") + + +def process_dialogues(final_dialogues, dialogue_count, dialogues, replace_turn_prob, replace_word_prob, new_val_func): + """ + iterates through all dialogues and does replacement according to new_val_func + writes out into final_dialogues. + """ + replace_success = 0 + replace_failed = 0 + for dialogue_id, dialogue in tqdm(enumerate(dialogues)): + d_id, d_count = dialogue["dialogue_id"].split("_") + d_id = int(d_id) + dialogue["dialogue_id"] = f"{d_id}_{dialogue_count[d_id]:05d}" + dialogue_count[d_id] += 1 + for turn_id, turn in enumerate(dialogue["turns"]): + if random.random() < replace_turn_prob: + spans = get_sentence_components(turn=turn) + for span in reversed(spans): + if random.random() < replace_word_prob: + old_value = dialogue["turns"][turn_id]["utterance"][span[0] : span[1]] + new_value = new_val_func(dialogue, turn_id, old_value, span[0], span[1]) + if new_value: + tmp_dialogue = copy.deepcopy(dialogue) + try: + replace(tmp_dialogue, turn_id, span[0], span[1], new_value) + validate(tmp_dialogue) + for k, v in tmp_dialogue.items(): + dialogue[k] = v + replace_success += 1 + except Exception: + replace_failed += 1 + + for turn in dialogue["turns"]: + for frame in turn["frames"]: + if 'state_update' in frame: + frame.pop("state_update") + if 'slot_to_span' in frame: + frame.pop("slot_to_span") + final_dialogues[d_id].append(dialogue) + print(f"Replacement success {replace_success}, failed {replace_failed}\n") + + +def update_spans(dialogue, turn_id, frame_id, start_idx, end_idx, old_value, new_value): + """ + update slot spans and slot_to_span + """ + frame = dialogue["turns"][turn_id]["frames"][frame_id] + offset = len(new_value) - len(old_value) + + for slot in frame['slots']: + if start_idx < slot['start']: + slot['start'] += offset + if start_idx < slot['exclusive_end']: + slot['exclusive_end'] += offset + + for k, vs in frame['slot_to_span'].items(): + for v, spans in vs.items(): + if start_idx < spans[0]: + spans[0] += offset + if start_idx < spans[1]: + spans[1] += offset + + +def update_values(dialogue, turn_id, frame_id, key, old_value, new_value): + """ + only update values: actions, state, slot_to_span + """ + frame = dialogue["turns"][turn_id]["frames"][frame_id] + if "actions" in frame: + for action in frame["actions"]: + if key == action["slot"] and old_value in action["values"]: + action["values"].remove(old_value) + action["values"].append(new_value) + if "state" in frame: + for k, vs in frame["state"]["slot_values"].items(): + for v_id, v in enumerate(vs): + if k == key and v == old_value: + vs[v_id] = new_value + + for k, vs in frame["slot_to_span"].items(): + for v, spans in list(vs.items()): + if k == key and v == old_value: + vs.pop(v) + vs[new_value] = spans + + +def get_sentence_components(turn): + """ + return list of start and end indices of slot values/ words that appear in utterance + """ + sentence = turn["utterance"] + word_indices = np.asarray([False for _ in range(len(sentence) + 1)]) + for frame in turn["frames"]: + if "state" in frame: + for k, vs in frame["state"]["slot_values"].items(): + for v in vs: + if v in sentence: + start_idx = sentence.index(v) + end_idx = start_idx + len(v) + word_indices[start_idx:end_idx] = True + if "actions" in frame: + for action in frame["actions"]: + k = action["slot"] + for v in action["values"]: + if v in sentence: + start_idx = sentence.index(v) + end_idx = start_idx + len(v) + word_indices[start_idx:end_idx] = True + + for i in range(len(sentence)): + if sentence[i].isalnum(): + word_indices[i] = True + res = [] + idx = 0 + while idx < len(word_indices): + if word_indices[idx]: + start_idx = idx + while word_indices[idx]: + idx += 1 + end_idx = idx + res.append((start_idx, end_idx)) + idx += 1 + return res + + +def find_word_in_turn(dialogue, turn_id, value, start_idx, end_idx): + """ + find non-cat slot value in turn. + return List[(turn_id, frame_id, key)] + """ + assert isinstance(value, str) + frames = dialogue["turns"][turn_id]["frames"] + res = [] + for frame_id, frame in enumerate(frames): + for slot in frame["slots"]: + if start_idx == slot["start"] and end_idx == slot["exclusive_end"]: + res.append((turn_id, frame_id, slot["slot"])) + return res + + +def get_new_value(dialogue, turn_id, value, start_idx, end_idx): + """ + replace span with another value from ontology if this belongs non-cat slot + return new value + """ + candidates = find_word_in_turn(dialogue, turn_id, value, start_idx, end_idx) + possible_values = set() + for _, frame_id, k in candidates: + frame = dialogue["turns"][turn_id]["frames"][frame_id] + service = frame["service"] + if "possible_values" in ontology[(service, k)]: + possible_values.update(ontology[(service, k)]["possible_values"]) + return random.choice(list(possible_values)) if possible_values else None + + +def replace(dialogue, turn_id, start_idx, end_idx, new_value): + """ + replace utterance at turn_id around start_idx:end_idx with new_value. + If old value is found in turn (non-categorical slot), change all affected frames with new_value: + -update_values + -update_spans + """ + assert isinstance(turn_id, int) + assert isinstance(start_idx, int) + assert isinstance(end_idx, int) + turn = dialogue["turns"][turn_id] + sentence = turn["utterance"] + old_value = sentence[start_idx:end_idx] + affected_values = find_word_in_turn( + dialogue=dialogue, turn_id=turn_id, value=old_value, start_idx=start_idx, end_idx=end_idx + ) + affected_spans = [(turn_id, start_idx, end_idx)] + for _, frame_id, key in affected_values.copy(): + frame = dialogue["turns"][turn_id]["frames"][frame_id] + new_affected_values = frame["state_update"][key] + affected_values += new_affected_values + for a_turn_id, a_frame_id, a_key in new_affected_values: + assert key == a_key + spans = ( + dialogue["turns"][a_turn_id]["frames"][a_frame_id]["slot_to_span"].get(a_key, {}).get(old_value, None) + ) + if spans: + affected_spans += [(a_turn_id, spans[0], spans[1])] + + for a_turn_id, a_frame_id, a_key in affected_values: + update_values(dialogue, a_turn_id, a_frame_id, a_key, old_value, new_value) + for a_turn_id, start_idx, end_idx in affected_spans: + turn = dialogue["turns"][a_turn_id] + assert old_value == turn["utterance"][start_idx:end_idx] + for a_frame_id in range(len(turn["frames"])): + update_spans(dialogue, a_turn_id, a_frame_id, start_idx, end_idx, old_value, new_value) + turn["utterance"] = turn["utterance"][:start_idx] + new_value + turn["utterance"][end_idx:] + + +def num2str(dialogue, turn_id, old_value, start_idx, end_idx): + """ + gets old_value and returns stringified version if old_value was number and does not belong to non-cat span value + """ + res = find_word_in_turn(dialogue, turn_id, old_value, start_idx, end_idx) + if not res and old_value.isnumeric(): + return p.number_to_words(int(old_value)) + " " + old_value + return None + + +def test_helper(dialogue, dialogue_id, turn_id, start_idx, end_idx, new_value): + replace(dialogue, turn_id=turn_id, start_idx=start_idx, end_idx=end_idx, new_value=new_value) + for turn in dialogue["turns"]: + for frame in turn["frames"]: + if "state_update" in frame: + frame.pop("state_update") + + +def test(dialogues, dialogue_id, turn_id, old_value, new_value): + dialogue = copy.deepcopy(dialogues[dialogue_id]) + augment_dialog_by_auxiliary_entries(dialogue) + m = re.search(old_value, dialogue["turns"][turn_id]["utterance"]) + test_helper(dialogue, dialogue_id, turn_id, start_idx=m.start(), end_idx=m.end(), new_value=new_value) + pprint(dialogue) + validate(dialogue) + d_str_new = json.dumps(dialogue, sort_keys=True, indent=2) + d_str_old = json.dumps(dialogues[dialogue_id], sort_keys=True, indent=2) + print(d_str_new == d_str_old) + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--concat_orig_dialogue", action="store_true", help="contenate original dialogue to the augmented one" + ) + parser.add_argument( + "--input_dir", + type=str, + default="", + help="data directory. contains one schema.json and multiple dialogue*.json files", + ) + parser.add_argument("--output_dir", type=str, help="output data directory", default=None) + parser.add_argument("--num2string", action="store_true", help="convert digits to string") + parser.add_argument("--repeat", type=int, default=5, help="number of augmentation sweeps over input data") + parser.add_argument("--replace_turn_prob", type=float, default=1.0, help="likelihood to modify an utterance turn") + parser.add_argument( + "--replace_word_prob", type=float, default=1.0, help="likelihood to modify a word in an utterance" + ) + parser.add_argument("--seed", type=int, default=0) + args = parser.parse_args() + return args + + +if __name__ == "__main__": + + args = parse_args() + print(vars(args)) + random.seed(args.seed) + + if not os.path.exists(args.input_dir): + raise ValueError( + "SGD dataset not found. Dataset can be downloaded from https://github.com/google-research-datasets/dstc8-schema-guided-dialogue" + ) + + in_file_path = args.input_dir + schema_path = os.path.join(in_file_path, 'schema.json') + dialogue_files = [ + os.path.join(in_file_path, f) + for f in os.listdir(in_file_path) + if os.path.isfile(os.path.join(in_file_path, f)) + if "dialogue" in f + ] + dialogue_files.sort() + orig_dialog = [] + for d_file in dialogue_files: + orig_dialog.extend(json.load(open(d_file, 'r'))) + print(f"len(orig_dialog) = {len(orig_dialog)}") + orig_schema = json.load(open(schema_path, 'r')) + + dialogue_count = defaultdict(int) + final_dialogues = defaultdict(list) + + ontology = get_ontology(dialogues=orig_dialog, schemas=orig_schema) + + for dialogue_id, dialogue in tqdm(enumerate(orig_dialog)): + validate(dialogue) # for test purposes + augment_dialog_by_auxiliary_entries(dialogue) + validate(dialogue) # for test purposes + + if args.num2string: + if args.concat_orig_dialogue: + process_dialogues( + final_dialogues=final_dialogues, + dialogue_count=dialogue_count, + dialogues=orig_dialog, + replace_turn_prob=1.0, + replace_word_prob=1.0, + new_val_func=num2str, + ) + else: + process_dialogues( + final_dialogues=defaultdict(list), + dialogue_count=defaultdict(int), + dialogues=orig_dialog, + replace_turn_prob=1.0, + replace_word_prob=1.0, + new_val_func=num2str, + ) + + for _ in range(args.repeat): + dialogues = copy.deepcopy(orig_dialog) + process_dialogues( + final_dialogues=final_dialogues, + dialogue_count=dialogue_count, + dialogues=dialogues, + replace_turn_prob=args.replace_turn_prob, + replace_word_prob=args.replace_word_prob, + new_val_func=get_new_value, + ) + + if args.concat_orig_dialogue and not args.num2string: + for dialogue_id, dialogue in tqdm(enumerate(orig_dialog)): + d_id, d_count = dialogue["dialogue_id"].split("_") + d_id = int(d_id) + dialogue["dialogue_id"] = f"{d_id}_{dialogue_count[d_id]:05d}" + dialogue_count[d_id] += 1 + final_dialogues[d_id].append(dialogue) + + for dir_id, dialogues in final_dialogues.items(): + for dialogue in dialogues: + for turn in dialogue["turns"]: + for frame in turn["frames"]: + if 'state_update' in frame: + frame.pop("state_update") + if 'slot_to_span' in frame: + frame.pop("slot_to_span") + if args.output_dir is None: + output_dir = f"augmented_repeat{args.repeat}_replace_turn_prob{args.replace_turn_prob}_replace_word_prob{args.replace_word_prob}_concatorig{args.concat_orig_dialogue}_num2string{args.num2string}" + else: + output_dir = args.output_dir + os.makedirs(output_dir, exist_ok=True) + for dir_id, dialogues in final_dialogues.items(): + with open(os.path.join(output_dir, f"dialogues_{dir_id:03d}.json"), 'w') as outfile: + json.dump(dialogues, outfile, indent=2) + + with open(os.path.join(output_dir, f"schema.json"), 'w') as outfile: + json.dump(orig_schema, outfile, indent=2) diff --git a/examples/nlp/dialogue_state_tracking/data/multiwoz/__init__.py b/examples/nlp/dialogue_state_tracking/data/multiwoz/__init__.py new file mode 100644 index 000000000000..cd24d1f06b22 --- /dev/null +++ b/examples/nlp/dialogue_state_tracking/data/multiwoz/__init__.py @@ -0,0 +1,16 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= diff --git a/examples/nlp/dialogue_state_tracking/data/multiwoz/correct_categorical_state_values.tsv b/examples/nlp/dialogue_state_tracking/data/multiwoz/correct_categorical_state_values.tsv new file mode 100644 index 000000000000..0672290108ea --- /dev/null +++ b/examples/nlp/dialogue_state_tracking/data/multiwoz/correct_categorical_state_values.tsv @@ -0,0 +1,18 @@ +alpha-milton alpha milton +any dontcare +bed and breakfast guesthouse +boating boat +cam cambridge +concert concerthall +concert hall concerthall +guest house guesthouse +guesthouses guesthouse +moderate|cheap cheap|moderate +museum kettles yard museum +mutiple sports multiple sports +nightclub night club +acorn guesthouse acorn guest house +swimmingpool swimming pool +sports multiple sports +pool swimming pool +theater theatre \ No newline at end of file diff --git a/examples/nlp/dialogue_state_tracking/data/multiwoz/create_data_from_multiwoz.py b/examples/nlp/dialogue_state_tracking/data/multiwoz/create_data_from_multiwoz.py new file mode 100644 index 000000000000..cdbc5c4fa989 --- /dev/null +++ b/examples/nlp/dialogue_state_tracking/data/multiwoz/create_data_from_multiwoz.py @@ -0,0 +1,793 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Converts Multiwoz 2.1 dataset to the data format of SGD.""" +import argparse +import collections +import copy +import json +import os +import re + +import nemo.collections.nlp.data.datasets.sgd_dataset.schema as schema +from nemo import logging + +# Parsing arguments +parser = argparse.ArgumentParser(description='conversion of multiwoz into sgd') + +parser.add_argument('--input_data_dir', type=str, required=True, help='Path of the dataset to convert from.') +parser.add_argument( + '--output_dir', + type=str, + help='Path to output directory. If not specified, generate the dialogues in the same directory as the script.', +) +parser.add_argument( + '--annotate_copy_slots', + action='store_true', + help='Whether to annotate slots whose value is copied from a different slot in ' + 'the previous state. If true, add a new key "copy_from" in the slot ' + 'annotation dict. Its value is the slot that the value is copied from.', +) + +parser.add_argument('--schema_file_name', default='schema.json', type=str, help='Name of the schema file to use.') + +args = parser.parse_args() + +_PATH_MAPPING = [('test', 'testListFile.json'), ('dev', 'valListFile.json'), ('train', '')] + +_DIR_PATH = os.path.dirname(os.path.abspath(__file__)) +# File used for correcting categorical slot values. Each line is a pair of +# the original slot value in MultiWOZ 2.1 annotation and the corrected slot +# value. +_CORRECT_FOR_STATE_PATH = os.path.join(_DIR_PATH, 'correct_categorical_state_values.tsv') + +_DEFAULT_SERVICE_NAME = 'all' +# "Don't care" slot value. +_DONT_CARE = 'dontcare' +_NONE_VALUE = 'none' +_INACTIVE_INTENT = 'NONE' +# Maximum number of dialogues to write in each output file. +_NUM_DIALS_PER_FILE = 512 + +# We try to find the span of non-categorical slot values in the dialog history, +# but sometimes there is no exact match and we choose to find the closest values +# from the utterance. If the found value is contained in the list below, +# we need to check if it is a correct match. +_FOUND_VALUES_NEED_CHECK = [ + 'restaurant', + 'hotel', + 'museum', + 'church', + 'college', + 'cinema', + 'park', + 'guesthouses', + 'guesthouse', + 'great', + 'from', + 'hotels', + 'school', + 'schools', + 'guests', + 'colleges', + 'lodge', + 'theatre', + 'centre', + 'bar', + 'bed and breakfast', + 'train', + 'station', + 'gallery', + 'la', + 'time', + 'house', + 'guest house', + 'old', + 'pool', + 'house', + 'a', + 'b', + 'the', + 'cafe', + 'cambridge', + 'hospital', + 'restaurant\'s', +] + +# A collection of phrases that are semantically similar to the key value, which +# is a word. +_SIMILAR_WORDS = { + 'portuguese': ['portugese', 'portugeuese'], + '01:30': ['1 thirty p . m .'], + '16:30': ['after 16:00'], + 'anatolia': ['anatoilia'], + 'allenbell': ['allenball'], + 'caribbean': ['carribbean'], + 'seafood': ['sea food'], + 'moroccan': ['morrocan'], + 'avalon': ['avaion'], + 'barbeque': ['bbq'], + 'american': ['americas'], + 'italian': ['pizza place'], + 'indian': ['taj tandoori'], + 'british': ['english'], + 'cambride': ['cambridge'], + 'fenditton': ['fen ditton'], + 'cafe': ['caffe'], + 'gonvile': ['gonville'], + 'shaddia': ['shaddai'], +} + +# A collection of phrases that are semantically similar to the key value, which +# is a phrase consisted of more than one word. +_SIMILAR_PHRASES = { + 'alexander bed and breakfast': ['alexander b&b', 'alexander bed and breafast', 'alexander bed & breakfast'], + 'a and b guest house': ['a & b guest house', 'a and b guesthouse', 'a and be guest house'], + 'saint johns chop house': ['saint johns chop shop house'], + 'bridge guest house': ['bridge guesthouse'], + 'finches b and b': ['finches b & b', 'finches b&b'], + 'finches bed and breakfast': ['flinches bed and breakfast', 'finches b&b'], + 'carolina bed and breakfast': ['carolina b&b'], + 'city centre north b and b': ['city centre north b&b', 'city centre north b & b'], + 'lan hong house': ['ian hong house', 'ian hong'], + 'ugly duckling': ['ugly ducking'], + 'sri lankan': ['sri lanken'], + 'cambridge punter': ['cambridge punte'], + 'abc theatre': ['adc theatre'], +} + + +def _locate_boundary(phrase, text): + """Locate the span of the phrase using exact match.""" + + def _locate_token_boundary(pos, text): + """Get the start and end index of a token that covers a certain position.""" + if pos < 0: + raise ValueError('Pos {} should be a positive integer.'.format(pos)) + next_space = text.find(' ', pos) + left_boundary = text.rfind(' ', 0, pos) + 1 + right_boundary = next_space if next_space != -1 else len(text) + return left_boundary, right_boundary + + phrase = phrase.strip() + pos_in_text = text.find(phrase) + if pos_in_text == -1: + return None, None + + tokens = phrase.split() + start_idx, _ = _locate_token_boundary(pos_in_text, text) + last_token = tokens[-1] + find_last_token = text.find(last_token, pos_in_text + len(phrase) - len(last_token)) + if find_last_token == -1: + raise ValueError('Should find the last word for value {}'.format(phrase)) + _, end_idx = _locate_token_boundary(find_last_token, text) + # If it's a number, the value should be exactly the same. + if phrase.isdigit() and text[start_idx:end_idx] != phrase: + return None, None + # If the phrase is short, the value should be exactly the same. + # e.g. we don't want to match "theatre" when searching for "the" + if len(phrase) <= 3 and len(phrase) != (end_idx - start_idx): + return None, None + return start_idx, end_idx + + +def _locate_word(word, text, start_pos): + """Get start and end index of a phrase that semantically equals to a word.""" + # If the word to search for contains 3 or 4 digits, correct it into time. + obj = re.match(r'(? 12: + times_to_try.append(':'.join([str(hour - 12), obj.group(2)])) + if minute == 0: + times_to_try.append(str(hour - 12) + ' pm') + times_to_try.append(str(hour - 12) + 'pm') + times_to_try.append(str(hour - 12) + ' p . m .') + times_to_try.append(str(hour - 12) + ' o\'clock p . m .') + times_to_try.append(str(hour - 12) + ' o\'clock') + times_to_try.append(str(hour) + ' o\'clock') + times_to_try.append(str(hour - 12) + ':00') + times_to_try.append(str(hour)) + elif hour == 12 and minute == 0: + times_to_try.extend(['12 pm', '12pm', '12 o\'clock', '12 p . m .', '12', 'noon']) + else: + times_to_try.append(':'.join([str(hour + 12), obj.group(2)])) + if int(minute) == 0: + times_to_try.append(str(hour) + ' am') + times_to_try.append(str(hour) + 'am') + times_to_try.append(str(hour) + ' a . m .') + times_to_try.append(str(hour) + ' o\'clock a . m .') + times_to_try.append(str(hour) + ' o\'clock') + times_to_try.append(str(hour + 12) + ':00') + times_to_try.append(str(hour)) + if minute == 15 or minute == 45 or minute == 30: + times_to_try.append('after ' + str(hour) + ':' + str(minute - 15)) + if hour < 10: + times_to_try.append('after 0' + str(hour) + ':' + str(minute - 15)) + if minute == 0: + times_to_try.append('after ' + str(hour - 1) + ':45') + for time_value in times_to_try: + # Correct time like "08:15" to "8:15" to increase match possibility. + if time_value[0] == '0': + if len(time_value) > 2 and time_value[1] != [':']: + time_value = time_value[1:] + else: + start_idx, end_idx = _locate_boundary(word, text) + if start_idx is not None: + return start_idx + start_pos, end_idx + start_pos + # Try phrases that is similar to the word to find. + for similar_word in _SIMILAR_WORDS.get(word, []): + start_idx, end_idx = _locate_boundary(similar_word, text) + if start_idx is not None: + return start_idx + start_pos, end_idx + start_pos + + # Slot values ended with 's' can be written in different formats. + # e.g. rosas can be written as rosa, rosa's. + if word.endswith('s') and len(word) > 3: + modified_words = [word[:-1] + '\'s', word[:-1]] + for modified_word in modified_words: + start_idx, end_idx = _locate_boundary(modified_word, text) + if start_idx is not None: + return start_idx + start_pos, end_idx + start_pos + return None, None + + +def exists_in_prev_dialog_states(slot_value, converted_turns): + """Whether slot value exists in the previous dialogue states.""" + for user_turn in converted_turns[::2]: + assert user_turn['speaker'] == 'USER' + for frame in user_turn['frames']: + if 'state' in frame and 'slot_values' in frame['state']: + slot_values_dict = frame['state']['slot_values'] + for slot, values_list in slot_values_dict.items(): + new_list = [] + for value in values_list: + new_list.extend(value.split('|')) + if slot_value in new_list: + return frame['service'], slot, values_list + return None, None, None + + +class Processor(object): + """A processor to convert Multiwoz to the data format used in SGD.""" + + def __init__(self, schemas): + self._schemas = schemas + # For statistically evaluating the modifications. + # Number of non-categorical slot values in dialogue state, which needs span + # annotations. + self._slot_spans_num = 0 + # Dict to track the number of non-categorical slot values whose span can not + # be found. + self._unfound_slot_spans_num = collections.Counter() + + # Dict used to correct categorical slot values annotated in MultiWOZ 2.1. + self._slot_value_correction_for_cat_slots = {} + with open(_CORRECT_FOR_STATE_PATH, 'r') as f: + for line in f: + tok_from, tok_to = line.replace('\n', '').split('\t') + self._slot_value_correction_for_cat_slots[tok_from] = tok_to + + @property + def unfound_slot_span_ratio(self): + """Get the ratio of the slot spans that can't be found in the utterances.""" + ratio_dict = {k: float(v) / float(self._slot_spans_num) for k, v in self._unfound_slot_spans_num.items()} + ratio_dict['total'] = float(sum(self._unfound_slot_spans_num.values())) / float(self._slot_spans_num) + return ratio_dict + + def _basic_text_process(self, text, lower=True): + # Remove redundant spaces. + text = re.sub(r'\s+', ' ', text).strip() + if lower: + text = text.lower() + return text + + def _insert_slots_annotations_to_turn(self, turn, slots_annotations_list, service_name): + """Insert slot span annotations to a turn.""" + found_service = False + for frame in turn['frames']: + if frame['service'] == service_name: + frame['slots'].extend(slots_annotations_list) + found_service = True + continue + if not found_service: + turn['frames'].append({'service': service_name, 'slots': slots_annotations_list, 'actions': []}) + return + + def _correct_state_value_for_noncat(self, slot, val): + """Correct slot values for non-categorical slots.""" + val = val.strip() + if ( + (val == 'cam' and slot == 'restaurant-name') + or (val == 'friday' and slot == 'train-leaveat') + or (val == 'bed' and slot == 'attraction-name') + ): + return '' + if val == 'portugese': + val = 'portuguese' + return val + + def _correct_state_value_for_cat(self, _, val): + """Correct slot values for categorical slots.""" + val = val.strip() + return self._slot_value_correction_for_cat_slots.get(val, val) + + def _get_intent_from_actions(self, state_value_dict, sys_actions, user_actions): + """Generate user intent by rules. + + We assume each service has only one active intent which equals to the domain + mentioned in the current user turn. + We use _infer_domains_from_actions to infer the list of possible domains. + Domains that appear in the user actions and dialogue updates are prioritised + over domains mentioned in the previous system actions. + In the provided schema of MultiWOZ 2.1, every service contains one domain, + so the active_intent is either "NONE" or "find_{domain}" for every service. + + Args: + state_value_dict: a dict, key is the slot name, value is a list. + sys_actions: a list of sys actions in the next turn. + user_actions: a list of user actions. + + Returns: + String, intent of the current user turn. + """ + + def _infer_domains_from_actions(state_value_dict, sys_actions, user_actions): + """Infer the domains involved in the current turn from actions.""" + user_mentioned_domains = set() + for user_action in user_actions: + domain = user_action['act'].lower().split('-')[0] + if domain not in ['general', 'booking']: + user_mentioned_domains.add(domain) + sys_mentioned_domains = set() + for sys_action in sys_actions: + domain = sys_action['act'].lower().split('-')[0] + if domain not in ['general', 'booking']: + sys_mentioned_domains.add(domain) + # Compute domains whose slot values get updated in the current turn. + state_change_domains = set() + for slot, _ in state_value_dict.items(): + domain_name = slot.split('-')[0] + state_change_domains.add(domain_name) + # Infer the possible domains involved in the current turn for a certain + # service. + return list(user_mentioned_domains.union(state_change_domains)) or list(sys_mentioned_domains) + + domains = _infer_domains_from_actions(state_value_dict, sys_actions, user_actions) + return 'find_' + domains[0] if domains else _INACTIVE_INTENT + + def _is_filled(self, slot_value): + """Whether a slot value is filled.""" + slot_value = slot_value.lower() + return slot_value and slot_value != 'not mentioned' and slot_value != 'none' + + def _new_service_name(self, domain): + """Get the new service_name decided by the new schema.""" + # If the schema file only contains one service, we summarize all the slots + # into one service, otherwise, keep the domain name as the service name. + return _DEFAULT_SERVICE_NAME if (len(self._schemas.services) == 1) else domain + + def _get_slot_name(self, slot_name, service_name, in_book_field=False): + """Get the slot name that is consistent with the schema file.""" + slot_name = 'book' + slot_name if in_book_field else slot_name + return '-'.join([service_name, slot_name]).lower() + + def _generate_dialog_states(self, frame_dict, overwrite_slot_values): + """Get the dialog states and overwrite some of the slot values.""" + dialog_states = collections.defaultdict(dict) + orig_dialog_states = collections.defaultdict(dict) + for domain_name, values in frame_dict.items(): + dialog_states_of_one_domain = {} + for k, v in values['book'].items(): + if isinstance(v, list): + for item_dict in v: + new_states = { + self._get_slot_name(slot_name, domain_name, in_book_field=True): slot_val + for slot_name, slot_val in item_dict.items() + } + dialog_states_of_one_domain.update(new_states) + if isinstance(v, str) and v: + slot_name = self._get_slot_name(k, domain_name, in_book_field=True) + dialog_states_of_one_domain[slot_name] = v + new_states = { + self._get_slot_name(slot_name, domain_name): slot_val for slot_name, slot_val in values['semi'].items() + } + dialog_states_of_one_domain.update(new_states) + # Get the new service_name that is decided by the schema. If the + # schema file only contains one service, we summarize all the slots into + # one service, otherwise, keep the domain name as the service name. + new_service_name = self._new_service_name(domain_name) + # Record the orig state values without any change. + orig_dialog_state_of_one_domain = copy.deepcopy(dialog_states_of_one_domain) + for (key, value) in orig_dialog_state_of_one_domain.items(): + if key in self._schemas.get_service_schema(new_service_name).slots and self._is_filled(value): + orig_dialog_states[new_service_name][key] = value + # Correct the slot values in the dialogue state. + corrected_dialog_states_of_one_domain = {} + for k, v in dialog_states_of_one_domain.items(): + if k in self._schemas.get_service_schema(new_service_name).categorical_slots: + corrected_dialog_states_of_one_domain[k] = self._correct_state_value_for_cat( + k, self._basic_text_process(v) + ) + else: + corrected_dialog_states_of_one_domain[k] = self._correct_state_value_for_noncat( + k, self._basic_text_process(v) + ) + dialog_states_of_one_domain = { + k: v for k, v in corrected_dialog_states_of_one_domain.items() if self._is_filled(v) + } + + # Overwrite some of the slot values and changes the slot value of a slot + # into a list. + for slot, value in dialog_states_of_one_domain.items(): + dialog_states_of_one_domain[slot] = [value] + if slot in overwrite_slot_values[new_service_name]: + if value in overwrite_slot_values[new_service_name][slot]: + dialog_states_of_one_domain[slot] = sorted( + overwrite_slot_values[new_service_name][slot][value] + ) + # Only track the slot values that are listed in the schema file. Slots + # such as reference number, phone number are filtered out. + for (key, value) in dialog_states_of_one_domain.items(): + if key in self._schemas.get_service_schema(new_service_name).slots: + dialog_states[new_service_name][key] = value + return dialog_states, orig_dialog_states + + def _get_update_states(self, prev_ds, cur_ds): + """Get the updated dialogue states between two user turns.""" + updates = collections.defaultdict(dict) + for service, slot_values_dict in cur_ds.items(): + if service not in prev_ds: + updates[service] = slot_values_dict + continue + for slot, values in slot_values_dict.items(): + for value in values: + if slot not in prev_ds[service] or value not in prev_ds[service][slot]: + updates[service][slot] = updates[service].get(slot, []) + [value] + return updates + + def _generate_slot_annotation(self, orig_utt, slot, slot_value): + """Generate the slot span of a slot value from the utterance. + + Args: + orig_utt: Original utterance in string. + slot: Slot name in string. + slot_value: Slot value to be annotated in string. + + Returns: + slot_ann: A dict that denotes the slot name and slot spans. + slot_value: The corrected slot value based on the utterance. It's + unchanged if the slot value can't be found in the utterance. + """ + slot_ann = [] + utt = orig_utt.lower() + start_idx, end_idx = None, None + # Check if the utterance mentions any phrases that are semantically same as + # the slot value. + for alias_slot_value in [slot_value] + _SIMILAR_PHRASES.get(slot_value, []): + start_idx, end_idx = _locate_boundary(alias_slot_value, utt) + if start_idx is not None: + break + if start_idx is None: + # Tokenize the slot value and find each of them. + splitted_slot_values = slot_value.strip().split() + unfound_tokens_idx = [] + search_start_idx = 0 + # Find if each token exists in the utterance. + for i, value_tok in enumerate(splitted_slot_values): + tok_start_idx, tok_end_idx = _locate_word(value_tok, utt, search_start_idx) + if tok_start_idx is not None and tok_end_idx is not None: + # Hard coded rules + # if the value to find is one of ['and', 'of', 'by'] and + # there's no token prior to them having been found, we don't think + # the value as found since they are fairly common words. + if value_tok in ['and', 'of', 'by'] and start_idx is None: + unfound_tokens_idx.append(i) + continue + if start_idx is None: + start_idx = tok_start_idx + search_start_idx = tok_end_idx + else: + unfound_tokens_idx.append(i) + # Record the last index. + if search_start_idx > 0: + end_idx = search_start_idx + if start_idx is None: + return [], slot_value + new_slot_value = utt[start_idx:end_idx] + + if abs(len(slot_value) - len(new_slot_value)) > 20: + return [], slot_value + if len(new_slot_value.split()) > (len(slot_value.strip().split()) + 2) and ( + new_slot_value not in _SIMILAR_PHRASES.get(slot_value, []) + ): + return [], slot_value + # If the value found from the utterance is one of values below and the real + # slot value contains more than one tokens, we don't think it as a + # successful match. + if new_slot_value.strip() in _FOUND_VALUES_NEED_CHECK and len(slot_value.split()) > 1: + return [], slot_value + # If the value based on the utterance ends with any value below, we don't + # annotate span of it. + if new_slot_value.strip().split()[-1] in ['and', 'the', 'of', 'by']: + return [], slot_value + slot_ann.append( + {'slot': slot, 'value': orig_utt[start_idx:end_idx], 'exclusive_end': end_idx, 'start': start_idx,} + ) + return slot_ann, new_slot_value + + def _update_corrected_slot_values( + self, corrected_slot_values_dict, service_name, slot, slot_value, new_slot_value + ): + """Update the dict that keeps track of the modified state values.""" + if slot not in corrected_slot_values_dict[service_name]: + corrected_slot_values_dict[service_name][slot] = collections.defaultdict(set) + corrected_slot_values_dict[service_name][slot][slot_value] = {slot_value} + corrected_slot_values_dict[service_name][slot][slot_value].add(new_slot_value) + return + + def _get_requested_slots_from_action(self, act_list): + """Get user's requested slots from the action.""" + act_request = [] + for act_dict in act_list: + if 'request' in act_dict['act'].lower(): + slot_name = act_dict['slot'] + if slot_name == 'Arrive': + slot_name = 'arriveby' + elif slot_name == 'Leave': + slot_name = 'leaveat' + act_request.append('-'.join([act_dict['act'].split('-')[0], slot_name]).lower()) + return act_request + + def _generate_actions(self, dialog_act): + """Generate user/system actions.""" + converted_actions = collections.defaultdict(list) + for k, pair_list in dialog_act.items(): + k_list = k.lower().strip().split('-') + domain = k_list[0] + service_name = self._new_service_name(domain) + act_slot_values_dict = collections.defaultdict(list) + for pair in pair_list: + slot = pair[0] + slot_value = pair[1] + if slot != _NONE_VALUE: + act_slot_values_dict[slot].append(slot_value) + if not act_slot_values_dict: + converted_actions[service_name].append({'act': k}) + for slot, values in act_slot_values_dict.items(): + converted_actions[service_name].append({'act': k, 'slot': slot, 'values': values}) + return converted_actions + + def _generate_dial_turns(self, turns, dial_id): + """Generate the dialog turns and the services mentioned in the dialogue.""" + prev_dialog_states = collections.defaultdict(dict) + corrected_slot_values = collections.defaultdict(dict) + converted_turns = [] + appear_services = set() + if len(turns) % 2 != 0: + raise ValueError('dialog ended by user') + for i in range(len(turns))[::2]: + user_info = turns[i] + sys_info = turns[i + 1] + user_utt = self._basic_text_process(user_info['text'], False) + sys_utt = self._basic_text_process(sys_info['text'], False) + user_actions = collections.defaultdict(list) + sys_actions = collections.defaultdict(list) + if 'dialog_act' in user_info: + user_actions = self._generate_actions(user_info['dialog_act']) + if 'dialog_act' in sys_info: + sys_actions = self._generate_actions(sys_info['dialog_act']) + + sys_turn = {'utterance': sys_utt, 'speaker': 'SYSTEM', 'frames': [], 'turn_id': str(i + 1)} + user_turn = {'utterance': user_utt, 'speaker': 'USER', 'frames': [], 'turn_id': str(i)} + dialog_states, _ = self._generate_dialog_states(sys_info['metadata'], corrected_slot_values) + appear_services.update(dialog_states.keys()) + + # Fill in slot spans in the user turn and the previous system turn for + # the non categorical slots. + user_slots = collections.defaultdict(list) + sys_slots = collections.defaultdict(list) + update_states = self._get_update_states(prev_dialog_states, dialog_states) + prev_sys_utt = converted_turns[-1]['utterance'] if converted_turns else '' + for service_name, slot_values_dict in update_states.items(): + new_service_name = self._new_service_name(service_name) + service_schema = self._schemas.get_service_schema(new_service_name) + for slot, slot_value in slot_values_dict.items(): + assert slot_value, 'slot values shouls not be empty' + slot_value = slot_value[0] + if slot in service_schema.categorical_slots: + if slot_value not in service_schema.get_categorical_slot_values(slot) and slot_value not in [ + _DONT_CARE + ]: + logging.error('Value %s not contained in slot %s, dial_id %s, ', slot_value, slot, dial_id) + dialog_states[service_name][slot] = [slot_value] + else: + self._slot_spans_num += 1 + if slot_value == _DONT_CARE: + continue + user_slot_ann, slot_value_from_user = self._generate_slot_annotation( + user_utt, slot, slot_value + ) + sys_slot_ann, slot_value_from_sys = self._generate_slot_annotation( + prev_sys_utt, slot, slot_value + ) + # Values from user utterance has a higher priority than values from + # sys utterance. We correct the slot value of non-categorical slot + # first based on user utterance, then system utterance. + if user_slot_ann and slot_value_from_user != slot_value: + if sys_slot_ann and (slot_value_from_sys == slot_value): + user_slot_ann = None + else: + self._update_corrected_slot_values( + corrected_slot_values, service_name, slot, slot_value, slot_value_from_user + ) + dialog_states[service_name][slot] = list( + corrected_slot_values[service_name][slot][slot_value] + ) + if not user_slot_ann and sys_slot_ann and slot_value_from_sys != slot_value: + self._update_corrected_slot_values( + corrected_slot_values, service_name, slot, slot_value, slot_value_from_sys + ) + dialog_states[service_name][slot] = list( + corrected_slot_values[service_name][slot][slot_value] + ) + if user_slot_ann: + user_slots[service_name].extend(user_slot_ann) + if sys_slot_ann: + sys_slots[service_name].extend(sys_slot_ann) + if not user_slot_ann and not sys_slot_ann: + # First check if it exists in the previous dialogue states. + from_service_name, from_slot, from_slot_values = exists_in_prev_dialog_states( + slot_value, converted_turns + ) + if from_service_name is not None: + self._unfound_slot_spans_num['copy_from_prev_dialog_state'] += 1 + if args.annotate_copy_slots: + user_slots[service_name].append( + {'slot': slot, 'copy_from': from_slot, 'value': from_slot_values} + ) + continue + # Second, trace back the dialogue history to find the span. + for prev_turn in converted_turns[-2::-1]: + prev_utt = prev_turn['utterance'] + prev_slot_ann, prev_slot_value = self._generate_slot_annotation( + prev_utt, slot, slot_value + ) + if prev_slot_ann: + if prev_slot_value != slot_value: + self._update_corrected_slot_values( + corrected_slot_values, service_name, slot, slot_value, prev_slot_value + ) + dialog_states[service_name][slot] = list( + corrected_slot_values[service_name][slot][slot_value] + ) + self._insert_slots_annotations_to_turn(prev_turn, prev_slot_ann, service_name) + break + self._unfound_slot_spans_num[slot] += 1 + continue + # Fill in slot annotations for the system turn. + for service_name in sys_slots: + if not sys_slots[service_name]: + continue + self._insert_slots_annotations_to_turn(converted_turns[-1], sys_slots[service_name], service_name) + # Generate user frames from dialog_states. + latest_update_states = self._get_update_states(prev_dialog_states, dialog_states) + for service_name, slot_values_dict in dialog_states.items(): + user_intent = self._get_intent_from_actions( + latest_update_states[service_name], sys_actions[service_name], user_actions[service_name] + ) + # Fill in values. + user_turn['frames'].append( + { + 'slots': user_slots[service_name], + 'state': { + 'slot_values': {k: v for k, v in slot_values_dict.items() if v}, + 'requested_slots': self._get_requested_slots_from_action(user_actions[service_name]), + 'active_intent': user_intent, + }, + 'service': service_name, + } + ) + non_active_services = set(self._schemas.services) - appear_services + for service_name in non_active_services: + user_intent = self._get_intent_from_actions({}, sys_actions[service_name], user_actions[service_name]) + user_turn['frames'].append( + { + 'service': service_name, + 'slots': [], + 'state': { + 'active_intent': user_intent, + 'requested_slots': self._get_requested_slots_from_action(user_actions[service_name]), + 'slot_values': {}, + }, + } + ) + converted_turns.extend([user_turn, sys_turn]) + prev_dialog_states = dialog_states + return converted_turns, list(appear_services) + + def convert_to_dstc(self, id_list, dialogs): + """Generate a list of dialogues in the dstc8 data format.""" + converted_dialogs = [] + for dial_id in id_list: + converted_turns, covered_services = self._generate_dial_turns(dialogs[dial_id]['log'], dial_id) + dialog = {'dialogue_id': dial_id, 'services': covered_services, 'turns': converted_turns} + converted_dialogs.append(dialog) + return converted_dialogs + + +def change_to_nemo_id(dialogs_list, file_index): + for i, dialogue in enumerate(dialogs_list): + dialogue['dialogue_id'] = f'{file_index}_{i:05d}' + return dialogs_list + + +def main(): + schema_path = os.path.join(_DIR_PATH, args.schema_file_name) + schemas = schema.Schema(schema_path) + processor = Processor(schemas) + data_path = os.path.join(args.input_data_dir, 'data.json') + with open(data_path, 'r') as f: + data = json.load(f) + dev_test_ids = [] + output_dir = args.output_dir or _DIR_PATH + # Generate dev and test set according to the ids listed in the files. Ids not + # included in the dev and test id list files belong to the training set. + for output_dir_name, file_name in _PATH_MAPPING: + output_sub_dir = os.path.join(output_dir, output_dir_name) + if not os.path.exists(output_sub_dir): + os.makedirs(output_sub_dir) + schema_path = os.path.join(output_sub_dir, 'schema.json') + schemas.save_to_file(schema_path) + dial_ids = [] + if file_name: + id_list_path = os.path.join(args.input_data_dir, file_name) + with open(id_list_path) as f: + dial_ids = [id_name.strip() for id_name in f.readlines()] + dev_test_ids.extend(dial_ids) + else: + # Generate the ids for the training set. + dial_ids = list(set(data.keys()) - set(dev_test_ids)) + converted_dials = processor.convert_to_dstc(dial_ids, data) + logging.info('Unfound slot span ratio %s', processor.unfound_slot_span_ratio) + logging.info('Writing %d dialogs to %s', len(converted_dials), output_sub_dir) + for i in range(0, len(converted_dials), _NUM_DIALS_PER_FILE): + file_index = int(i / _NUM_DIALS_PER_FILE) + 1 + # Create a new json file and save the dialogues. + json_file_path = os.path.join(output_sub_dir, 'dialogues_{:03d}.json'.format(file_index)) + dialogs_list = converted_dials[(file_index - 1) * _NUM_DIALS_PER_FILE : file_index * _NUM_DIALS_PER_FILE] + dialogs_list = change_to_nemo_id(dialogs_list, file_index) + with open(json_file_path, 'w') as f: + json.dump(dialogs_list, f, indent=2, separators=(',', ': '), sort_keys=True) + logging.info('Created %s with %d dialogues.', json_file_path, len(dialogs_list)) + + +if __name__ == '__main__': + main() diff --git a/examples/nlp/dialogue_state_tracking/data/multiwoz/schema.json b/examples/nlp/dialogue_state_tracking/data/multiwoz/schema.json new file mode 100644 index 000000000000..c130b0fd818b --- /dev/null +++ b/examples/nlp/dialogue_state_tracking/data/multiwoz/schema.json @@ -0,0 +1,636 @@ +[ + { + "service_name": "hotel", + "slots": [ + { + "name": "hotel-pricerange", + "description": "the price range of the hotel", + "possible_values": [ + "$100", + "cheap", + "cheap>moderate", + "cheap|moderate", + "expensive", + "moderate" + ], + "is_categorical": true + }, + { + "name": "hotel-type", + "description": "the type of the hotel", + "possible_values": [ + "guesthouse", + "hotel", + "hotel|guesthouse" + ], + "is_categorical": true + }, + { + "name": "hotel-parking", + "description": "does the hotel have free parking", + "possible_values": [ + "free", + "no", + "yes" + ], + "is_categorical": true + }, + { + "name": "hotel-bookday", + "description": "the day of hotel booking", + "possible_values": [ + "friday", + "friday>tuesday", + "monday", + "mondaymonday", + "thursday", + "tuesday", + "wednesday", + "wednesday|friday" + ], + "is_categorical": true + }, + { + "name": "hotel-bookpeople", + "description": "number of people to book the hotel for", + "possible_values": [ + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8" + ], + "is_categorical": true + }, + { + "name": "hotel-bookstay", + "description": "the duration of stay or booking", + "possible_values": [ + "1", + "2", + "3", + "3|1", + "4", + "5", + "5|4", + "6", + "7", + "8" + ], + "is_categorical": true + }, + { + "name": "hotel-stars", + "description": "the rating of the hotel", + "possible_values": [ + "0", + "1", + "2", + "3", + "3|4", + "4", + "4|5", + "5" + ], + "is_categorical": true + }, + { + "name": "hotel-internet", + "description": "does it have internet or wifi", + "possible_values": [ + "free", + "no", + "yes" + ], + "is_categorical": true + }, + { + "name": "hotel-name", + "description": "the name of the hotel", + "possible_values": [], + "is_categorical": false + }, + { + "name": "hotel-area", + "description": "the locality of the hotel", + "possible_values": [ + "centre", + "east", + "north", + "south", + "west", + "west|centre" + ], + "is_categorical": true + } + ], + "description": "hotel reservations and vacation stays", + "intents": [ + { + "name": "find_hotel", + "description": "search for a hotel to stay in", + "is_transactional": false, + "required_slots": [], + "optional_slots": { + "hotel-pricerange": "dontcare", + "hotel-type": "dontcare", + "hotel-parking": "dontcare", + "hotel-bookday": "dontcare", + "hotel-bookpeople": "dontcare", + "hotel-bookstay": "dontcare", + "hotel-stars": "dontcare", + "hotel-internet": "dontcare", + "hotel-name": "dontcare", + "hotel-area": "dontcare" + } + } + ] + }, + { + "service_name": "train", + "slots": [ + { + "name": "train-destination", + "description": "the city you want to go to", + "possible_values": [ + "birmingham new street", + "bishops stortford", + "bournemouth", + "broxbourne", + "cambridge", + "centre", + "city centre north", + "copper kettle", + "curry prince", + "ely", + "glastonbury", + "gourmet burger kitchen", + "huntingdon marriott hotel", + "huntington marriott", + "kings lynn", + "leicester", + "liverpool", + "liverpool street", + "london", + "london kings cross", + "london liverpool street", + "norway", + "norwich", + "peterborough", + "stansted airport", + "stevenage" + ], + "is_categorical": true + }, + { + "name": "train-arriveby", + "description": "when should the train reach your destination", + "possible_values": [], + "is_categorical": false + }, + { + "name": "train-departure", + "description": "the location where you want to catch the train from", + "possible_values": [ + "alpha milton", + "aylesbray lodge guest", + "birmingham new street", + "bishops stortford", + "brookshite", + "broxbourne", + "cafe uno", + "camboats", + "cambridge", + "cineworld", + "city hall", + "duxford", + "east london", + "ely", + "hamilton lodge", + "huntingdon", + "kings lynn", + "leicester", + "liverpool", + "london", + "london kings cross", + "london liverpool", + "london liverpool street", + "norwich", + "panahar", + "peterborough", + "stansted airport", + "stevenage", + "stratford", + "wandlebury country park" + ], + "is_categorical": true + }, + { + "name": "train-day", + "description": "the day of the journey", + "possible_values": [ + "friday", + "monday", + "saturday", + "sunday", + "thursday", + "tuesday", + "wednesday" + ], + "is_categorical": true + }, + { + "name": "train-bookpeople", + "description": "number of tickets to buy", + "possible_values": [ + "1", + "10", + "15", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9" + ], + "is_categorical": true + }, + { + "name": "train-leaveat", + "description": "the departure time of the train", + "possible_values": [], + "is_categorical": false + } + ], + "description": "find trains that take you to places", + "intents": [ + { + "name": "find_train", + "description": "search for trains that take you places", + "is_transactional": false, + "required_slots": [], + "optional_slots": { + "train-destination": "dontcare", + "train-arriveby": "dontcare", + "train-departure": "dontcare", + "train-day": "dontcare", + "train-bookpeople": "dontcare", + "train-leaveat": "dontcare" + } + } + ] + }, + { + "service_name": "attraction", + "slots": [ + { + "name": "attraction-area", + "description": "the place where you are located", + "possible_values": [ + "centre", + "centre|west", + "east", + "north", + "south", + "west" + ], + "is_categorical": true + }, + { + "name": "attraction-name", + "description": "the name of the site you want to visit", + "possible_values": [], + "is_categorical": false + }, + { + "name": "attraction-type", + "description": "the type of attractions you are interested in", + "possible_values": [ + "architecture", + "boat", + "boating", + "camboats", + "church", + "churchills college", + "cinema", + "college", + "concert", + "concerthall", + "concerthall|boat", + "entertainment", + "entertainment|cinemas|museums|theatres", + "gallery", + "gastropub", + "hiking|historical", + "hotel", + "multiple sports", + "multiple sports|theatre", + "museum", + "museum kettles yard", + "museum|nightclub", + "night club", + "outdoor", + "park", + "park|boat", + "pool", + "special", + "sports", + "swimming pool", + "theater", + "theatre" + ], + "is_categorical": true + } + ], + "description": "find touristy stuff to do around you", + "intents": [ + { + "name": "find_attraction", + "description": "search for places to see for leisure", + "is_transactional": false, + "required_slots": [], + "optional_slots": { + "attraction-area": "dontcare", + "attraction-name": "dontcare", + "attraction-type": "dontcare" + } + } + ] + }, + { + "service_name": "restaurant", + "slots": [ + { + "name": "restaurant-pricerange", + "description": "indicates how expensive or cheap the restaurant is", + "possible_values": [ + "cheap", + "cheap|moderate", + "expensive", + "moderate" + ], + "is_categorical": true + }, + { + "name": "restaurant-area", + "description": "the locality of the restaurant", + "possible_values": [ + "centre", + "east", + "east|south", + "north", + "south", + "west" + ], + "is_categorical": true + }, + { + "name": "restaurant-food", + "description": "the cuisine or type of food served", + "possible_values": [], + "is_categorical": false + }, + { + "name": "restaurant-name", + "description": "the name of the restaurant", + "possible_values": [], + "is_categorical": false + }, + { + "name": "restaurant-bookday", + "description": "the day of booking at the restaurant", + "possible_values": [ + "friday", + "monday", + "saturday", + "saturday|thursday", + "sunday", + "sunday|thursday", + "thursday", + "tuesday", + "wednesday" + ], + "is_categorical": true + }, + { + "name": "restaurant-bookpeople", + "description": "number of people to reserve the restaurant for", + "possible_values": [ + "1", + "2", + "3", + "4", + "4|7", + "5", + "6", + "7", + "8" + ], + "is_categorical": true + }, + { + "name": "restaurant-booktime", + "description": "the time of the reservation at the restaurant", + "possible_values": [], + "is_categorical": false + } + ], + "description": "find places to dine and whet your appetite", + "intents": [ + { + "name": "find_restaurant", + "description": "search for places to wine and dine", + "is_transactional": false, + "required_slots": [], + "optional_slots": { + "restaurant-pricerange": "dontcare", + "restaurant-area": "dontcare", + "restaurant-food": "dontcare", + "restaurant-name": "dontcare", + "restaurant-bookday": "dontcare", + "restaurant-bookpeople": "dontcare", + "restaurant-booktime": "dontcare" + } + } + ] + }, + { + "service_name": "hospital", + "slots": [ + { + "name": "hospital-department", + "description": "the kind of ailment or sickness you want treated", + "possible_values": [ + "acute medical assessment unit", + "acute medicine for the elderly", + "antenatal", + "cambridge eye unit", + "cardiology", + "cardiology and coronary care unit", + "childrens oncology and haematology", + "childrens surgical and medicine", + "clinical decisions unit", + "clinical research facility", + "coronary care unit", + "diabetes and endocrinology", + "emergency department", + "gastroenterology", + "gynaecology", + "haematology", + "haematology and haematological oncology", + "haematology day unit", + "hepatobillary and gastrointestinal surgery regional referral centre", + "hepatology", + "infectious diseases", + "infusion services", + "inpatient occupational therapy", + "intermediate dependancy area", + "john farman intensive care unit", + "medical decisions unit", + "medicine for the elderly", + "neonatal unit", + "neurology", + "neurology neurosurgery", + "neurosciences", + "neurosciences critical care unit", + "oncology", + "oral and maxillofacial surgery and ent", + "paediatric clinic", + "paediatric day unit", + "paediatric intensive care unit", + "plastic and vascular surgery plastics", + "psychiatry", + "respiratory medicine", + "surgery", + "teenage cancer trust unit", + "transitional care", + "transplant high dependency unit", + "trauma and orthopaedics", + "trauma high dependency unit", + "urology" + ], + "is_categorical": true + } + ], + "description": "making you feel better when you are ill", + "intents": [ + { + "name": "find_hospital", + "description": "search for a medical facility or a doctor", + "is_transactional": false, + "required_slots": [], + "optional_slots": { + "hospital-department": "dontcare" + } + } + ] + }, + { + "service_name": "taxi", + "slots": [ + { + "name": "taxi-leaveat", + "description": "the time you want to depart", + "possible_values": [], + "is_categorical": false + }, + { + "name": "taxi-destination", + "description": "the place you want to get to", + "possible_values": [], + "is_categorical": false + }, + { + "name": "taxi-departure", + "description": "the place you want to board the taxi", + "possible_values": [], + "is_categorical": false + }, + { + "name": "taxi-arriveby", + "description": "the time of your arrival at the destination", + "possible_values": [], + "is_categorical": false + } + ], + "description": "rent cheap cabs to avoid traffic", + "intents": [ + { + "name": "find_taxi", + "description": "search for taxis to avoid traffic", + "is_transactional": false, + "required_slots": [], + "optional_slots": { + "taxi-leaveat": "dontcare", + "taxi-destination": "dontcare", + "taxi-departure": "dontcare", + "taxi-arriveby": "dontcare" + } + } + ] + }, + { + "service_name": "bus", + "slots": [ + { + "name": "bus-departure", + "description": "the departure place of the bus", + "possible_values": [ + "cambridge" + ], + "is_categorical": true + }, + { + "name": "bus-destination", + "description": "the destination of the bus", + "possible_values": [ + "bishops stortford", + "cambridge", + "kohinoor", + "london kings cross" + ], + "is_categorical": true + }, + { + "name": "bus-leaveat", + "description": "the time when bus leaves", + "possible_values": [ + "21:45" + ], + "is_categorical": true + }, + { + "name": "bus-day", + "description": "the day of the bus", + "possible_values": [ + "wednesday" + ], + "is_categorical": true + } + ], + "description": "Bus service for traveling", + "intents": [ + { + "name": "find_bus", + "description": "search for a bus", + "is_transactional": false, + "required_slots": [], + "optional_slots": { + "bus-departure": "dontcare", + "bus-destination": "dontcare", + "bus-day": "dontcare", + "taxi-leaveat": "dontcare" + } + } + ] + } +] diff --git a/examples/nlp/dialogue_state_tracking/dialogue_state_tracking_sgd.py b/examples/nlp/dialogue_state_tracking/dialogue_state_tracking_sgd.py new file mode 100644 index 000000000000..b5edb930720d --- /dev/null +++ b/examples/nlp/dialogue_state_tracking/dialogue_state_tracking_sgd.py @@ -0,0 +1,462 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +''' +This file contains code artifacts adapted from the original implementation: +https://github.com/google-research/google-research/blob/master/schema_guided_dst/baseline/train_and_predict.py +''' + +import argparse +import math +import os + +import nemo.collections.nlp as nemo_nlp +import nemo.collections.nlp.data.datasets.sgd_dataset.data_processor as data_processor +from nemo.collections.nlp.callbacks.sgd_callback import eval_epochs_done_callback, eval_iter_callback +from nemo.collections.nlp.data.datasets.sgd_dataset.schema_processor import SchemaPreprocessor +from nemo.collections.nlp.nm.trainables import SGDDecoderNM, SGDEncoderNM +from nemo.core import Backend, CheckpointCallback, EvaluatorCallback, NeuralModuleFactory, SimpleLossLoggerCallback +from nemo.utils import logging +from nemo.utils.lr_policies import get_lr_policy + +# Parsing arguments +parser = argparse.ArgumentParser(description='Schema_guided_dst') + +# BERT based utterance encoder related arguments +parser.add_argument( + "--max_seq_length", + default=80, + type=int, + help="The maximum total input sequence length after WordPiece tokenization. " + "Sequences longer than this will be truncated, and sequences shorter " + "than this will be padded.", +) +parser.add_argument("--dropout", default=0.1, type=float, help="Dropout rate for BERT representations.") +parser.add_argument( + "--pretrained_model_name", + default="bert-base-cased", + type=str, + help="Name of the pre-trained model", + choices=nemo_nlp.nm.trainables.get_pretrained_lm_models_list(), +) +parser.add_argument("--bert_checkpoint", default=None, type=str, help="Path to model checkpoint") +parser.add_argument("--bert_config", default=None, type=str, help="Path to bert config file in json format") +parser.add_argument( + "--tokenizer_model", + default=None, + type=str, + help="Path to pretrained tokenizer model, only used if --tokenizer is sentencepiece", +) +parser.add_argument( + "--tokenizer", + default="nemobert", + type=str, + choices=["nemobert", "sentencepiece"], + help="tokenizer to use, only relevant when using custom pretrained checkpoint.", +) +parser.add_argument("--vocab_file", default=None, help="Path to the vocab file.") +parser.add_argument( + "--do_lower_case", + action='store_true', + help="Whether to lower case the input text. True for uncased models, False for cased models. " + + "Only applicable when tokenizer is build with vocab file", +) + +# Hyperparameters and optimization related flags. +parser.add_argument( + "--checkpoint_dir", + default=None, + type=str, + help="The folder containing the checkpoints for the model to continue training", +) +parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") +parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.") +parser.add_argument("--num_epochs", default=80, type=int, help="Total number of training epochs to perform.") + +parser.add_argument("--optimizer_kind", default="adam_w", type=str) +parser.add_argument("--learning_rate", default=1e-4, type=float, help="The initial learning rate for Adam.") +parser.add_argument("--lr_policy", default="PolynomialDecayAnnealing", type=str) +parser.add_argument("--weight_decay", default=0.01, type=float) +parser.add_argument( + "--lr_warmup_proportion", + default=0.1, + type=float, + help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.", +) +parser.add_argument("--grad_norm_clip", type=float, default=1, help="Gradient clipping") +parser.add_argument("--local_rank", default=None, type=int) +parser.add_argument("--amp_opt_level", default="O0", type=str, choices=["O0", "O1", "O2"]) +parser.add_argument("--num_gpus", default=1, type=int) + +# Input and output paths and other flags. +parser.add_argument( + "--task_name", + default="dstc8_single_domain", + type=str, + choices=data_processor.FILE_RANGES.keys(), + help="The name of the task to train.", +) +parser.add_argument( + "--data_dir", + type=str, + required=True, + help="Directory for the downloaded DSTC8 data, which contains the dialogue files" + " and schema files of all datasets (eg train, dev)", +) +parser.add_argument( + "--work_dir", + type=str, + default="output/SGD", + help="The output directory where the model checkpoints will be written.", +) +parser.add_argument( + "--schema_embedding_dir", + type=str, + default='schema_embedding_dir', + help="Directory where .npy file for embedding of entities (slots, values, intents) in the dataset_split's schema are stored.", +) +parser.add_argument( + "--overwrite_schema_emb_files", + action="store_true", + help="Whether to generate a new file saving the dialogue examples.", +) +parser.add_argument( + "--joint_acc_across_turn", + action="store_true", + help="Whether to compute joint accuracy across turn instead of across service. Should be set to True when conducting multiwoz style evaluation.", +) +parser.add_argument( + "--no_fuzzy_match", + action="store_true", + help="Whether to use fuzzy string matching when comparing non-categorical slot values. Fuzz match should not be used when conducting multiwoz style evaluation.", +) +parser.add_argument( + "--dialogues_example_dir", + type=str, + default="dialogues_example_dir", + help="Directory where preprocessed DSTC8 dialogues are stored.", +) +parser.add_argument( + "--overwrite_dial_files", action="store_true", help="Whether to generate a new file saving the dialogue examples." +) +parser.add_argument("--no_shuffle", action="store_true", help="Whether to shuffle training data") +parser.add_argument("--no_time_to_log_dir", action="store_true", help="whether to add time to work_dir or not") +parser.add_argument( + "--eval_dataset", type=str, default="dev", choices=["dev", "test"], help="Dataset split for evaluation." +) +parser.add_argument( + "--save_epoch_freq", + default=1, + type=int, + help="Frequency of saving checkpoint '-1' - step checkpoint won't be saved", +) +parser.add_argument( + "--save_step_freq", + default=-1, + type=int, + help="Frequency of saving checkpoint '-1' - step checkpoint won't be saved", +) + +parser.add_argument( + "--loss_log_freq", default=-1, type=int, help="Frequency of logging loss values, '-1' - at the end of the epoch", +) + +parser.add_argument( + "--loss_reduction", + default='mean', + type=str, + help="specifies the reduction to apply to the final loss, choose 'mean' or 'sum'", +) + +parser.add_argument( + "--eval_epoch_freq", default=1, type=int, help="Frequency of evaluation", +) + +parser.add_argument( + "--num_workers", + default=2, + type=int, + help="Number of workers for data loading, -1 means set it automatically to the number of CPU cores", +) + +parser.add_argument( + "--enable_pin_memory", action="store_true", help="Enables the pin_memory feature of Pytroch's DataLoader", +) + +parser.add_argument( + "--state_tracker", + type=str, + default='baseline', + choices=['baseline', 'ret_sys_act'], + help="Specifies the state tracker mode", +) +parser.add_argument( + "--schema_emb_init", + type=str, + default='baseline', + choices=['baseline', 'random', 'last_layer_average'], + help="Specifies how schema embeddings are generated. Baseline uses ['CLS'] token", +) +parser.add_argument( + "--train_schema_emb", action="store_true", help="Specifies whether schema embeddings are trainables.", +) +parser.add_argument( + "--head_transform", + default="", + type=str, + choices=["", "Attention"], + help="transformation to use for computing head. Default uses linear projection.", +) +parser.add_argument( + "--debug_mode", action="store_true", help="Enables debug mode with more info on data preprocessing and evaluation", +) + +parser.add_argument( + "--checkpoints_to_keep", default=1, type=int, help="The number of last checkpoints to keep", +) + +args = parser.parse_args() +logging.info(args) + +if args.debug_mode: + logging.setLevel(10) + +if args.task_name == "multiwoz": + schema_config = { + "MAX_NUM_CAT_SLOT": 9, + "MAX_NUM_NONCAT_SLOT": 4, + "MAX_NUM_VALUE_PER_CAT_SLOT": 47, + "MAX_NUM_INTENT": 1, + } +else: + schema_config = { + "MAX_NUM_CAT_SLOT": 6, + "MAX_NUM_NONCAT_SLOT": 12, + "MAX_NUM_VALUE_PER_CAT_SLOT": 12, + "MAX_NUM_INTENT": 4, + } + +if not os.path.exists(args.data_dir): + raise ValueError(f'Data not found at {args.data_dir}') + +nf = NeuralModuleFactory( + backend=Backend.PyTorch, + local_rank=args.local_rank, + optimization_level=args.amp_opt_level, + log_dir=args.work_dir, + create_tb_writer=True, + checkpoint_dir=args.checkpoint_dir, + files_to_copy=[__file__], + add_time_to_log_dir=not args.no_time_to_log_dir, +) + +pretrained_bert_model = nemo_nlp.nm.trainables.get_pretrained_lm_model( + pretrained_model_name=args.pretrained_model_name, + config=args.bert_config, + vocab=args.vocab_file, + checkpoint=args.bert_checkpoint, +) + +schema_config["EMBEDDING_DIMENSION"] = pretrained_bert_model.hidden_size +schema_config["MAX_SEQ_LENGTH"] = args.max_seq_length + +tokenizer = nemo_nlp.data.tokenizers.get_tokenizer( + tokenizer_name=args.tokenizer, + pretrained_model_name=args.pretrained_model_name, + tokenizer_model=args.tokenizer_model, + vocab_file=args.vocab_file, + do_lower_case=args.do_lower_case, +) + +hidden_size = pretrained_bert_model.hidden_size + +# Run SGD preprocessor to generate and store schema embeddings +schema_preprocessor = SchemaPreprocessor( + data_dir=args.data_dir, + schema_embedding_dir=args.schema_embedding_dir, + schema_config=schema_config, + tokenizer=tokenizer, + bert_model=pretrained_bert_model, + overwrite_schema_emb_files=args.overwrite_schema_emb_files, + bert_ckpt_dir=args.checkpoint_dir, + nf=nf, + mode=args.schema_emb_init, + is_trainable=args.train_schema_emb, +) + +dialogues_processor = data_processor.Dstc8DataProcessor( + task_name=args.task_name, + dstc8_data_dir=args.data_dir, + dialogues_example_dir=args.dialogues_example_dir, + tokenizer=tokenizer, + schema_emb_processor=schema_preprocessor, + overwrite_dial_files=args.overwrite_dial_files, +) + +# define model pipeline +sgd_encoder = SGDEncoderNM(hidden_size=hidden_size, dropout=args.dropout) +sgd_decoder = SGDDecoderNM( + embedding_dim=hidden_size, schema_emb_processor=schema_preprocessor, head_transform="Logits" + args.head_transform +) +dst_loss = nemo_nlp.nm.losses.SGDDialogueStateLossNM(reduction=args.loss_reduction) + + +def create_pipeline(dataset_split='train'): + datalayer = nemo_nlp.nm.data_layers.SGDDataLayer( + dataset_split=dataset_split, + dialogues_processor=dialogues_processor, + batch_size=args.train_batch_size, + shuffle=not args.no_shuffle if dataset_split == 'train' else False, + num_workers=args.num_workers, + pin_memory=args.enable_pin_memory, + ) + data = datalayer() + + # Encode the utterances using BERT. + token_embeddings = pretrained_bert_model( + input_ids=data.utterance_ids, attention_mask=data.utterance_mask, token_type_ids=data.utterance_segment, + ) + encoded_utterance, token_embeddings = sgd_encoder(hidden_states=token_embeddings) + ( + logit_intent_status, + logit_req_slot_status, + logit_cat_slot_status, + logit_cat_slot_value, + logit_noncat_slot_status, + logit_noncat_slot_start, + logit_noncat_slot_end, + ) = sgd_decoder( + encoded_utterance=encoded_utterance, + token_embeddings=token_embeddings, + utterance_mask=data.utterance_mask, + cat_slot_values_mask=data.cat_slot_values_mask, + intent_status_mask=data.intent_status_mask, + service_ids=data.service_id, + ) + + if dataset_split == 'train': + loss = dst_loss( + logit_intent_status=logit_intent_status, + intent_status_labels=data.intent_status_labels, + logit_req_slot_status=logit_req_slot_status, + requested_slot_status=data.requested_slot_status, + req_slot_mask=data.req_slot_mask, + logit_cat_slot_status=logit_cat_slot_status, + categorical_slot_status=data.categorical_slot_status, + cat_slot_status_mask=data.cat_slot_status_mask, + logit_cat_slot_value=logit_cat_slot_value, + categorical_slot_values=data.categorical_slot_values, + logit_noncat_slot_status=logit_noncat_slot_status, + noncategorical_slot_status=data.noncategorical_slot_status, + noncat_slot_status_mask=data.noncat_slot_status_mask, + logit_noncat_slot_start=logit_noncat_slot_start, + logit_noncat_slot_end=logit_noncat_slot_end, + noncategorical_slot_value_start=data.noncategorical_slot_value_start, + noncategorical_slot_value_end=data.noncategorical_slot_value_end, + ) + tensors = [loss] + else: + tensors = [ + data.example_id_num, + data.service_id, + data.is_real_example, + data.start_char_idx, + data.end_char_idx, + logit_intent_status, + logit_req_slot_status, + logit_cat_slot_status, + logit_cat_slot_value, + logit_noncat_slot_status, + logit_noncat_slot_start, + logit_noncat_slot_end, + data.intent_status_labels, + data.requested_slot_status, + data.categorical_slot_status, + data.categorical_slot_values, + data.noncategorical_slot_status, + ] + + steps_per_epoch = math.ceil(len(datalayer) / (args.train_batch_size * args.num_gpus)) + return steps_per_epoch, tensors + + +steps_per_epoch, train_tensors = create_pipeline() +logging.info(f'Steps per epoch: {steps_per_epoch}') +_, eval_tensors = create_pipeline(dataset_split=args.eval_dataset) + +# Create trainer and execute training action +train_callback = SimpleLossLoggerCallback( + tensors=train_tensors, + print_func=lambda x: logging.info("Loss: {:.8f}".format(x[0].item())), + get_tb_values=lambda x: [["loss", x[0]]], + tb_writer=nf.tb_writer, + step_freq=args.loss_log_freq if args.loss_log_freq > 0 else steps_per_epoch, +) + +# we'll write predictions to file in DSTC8 format during evaluation callback +input_json_files = [ + os.path.join(args.data_dir, args.eval_dataset, 'dialogues_{:03d}.json'.format(fid)) + for fid in data_processor.FILE_RANGES[args.task_name][args.eval_dataset] +] +schema_json_file = os.path.join(args.data_dir, args.eval_dataset, 'schema.json') + +# Write predictions to file in DSTC8 format. +prediction_dir = os.path.join(nf.work_dir, 'predictions', 'pred_res_{}_{}'.format(args.eval_dataset, args.task_name)) +output_metric_file = os.path.join(nf.work_dir, 'metrics.txt') +os.makedirs(prediction_dir, exist_ok=True) + +eval_callback = EvaluatorCallback( + eval_tensors=eval_tensors, + user_iter_callback=lambda x, y: eval_iter_callback(x, y, schema_preprocessor, args.eval_dataset), + user_epochs_done_callback=lambda x: eval_epochs_done_callback( + x, + input_json_files, + args.eval_dataset, + args.data_dir, + prediction_dir, + output_metric_file, + args.state_tracker, + args.debug_mode, + schema_preprocessor, + args.joint_acc_across_turn, + args.no_fuzzy_match, + ), + tb_writer=nf.tb_writer, + eval_step=args.eval_epoch_freq * steps_per_epoch, +) + +ckpt_callback = CheckpointCallback( + folder=nf.checkpoint_dir, epoch_freq=args.save_epoch_freq, step_freq=args.save_step_freq, checkpoints_to_keep=1 +) + +lr_policy_fn = get_lr_policy( + args.lr_policy, total_steps=args.num_epochs * steps_per_epoch, warmup_ratio=args.lr_warmup_proportion +) + +nf.train( + tensors_to_optimize=train_tensors, + callbacks=[train_callback, eval_callback, ckpt_callback], + lr_policy=lr_policy_fn, + optimizer=args.optimizer_kind, + optimization_params={ + "num_epochs": args.num_epochs, + "lr": args.learning_rate, + "eps": 1e-6, + "weight_decay": args.weight_decay, + "grad_norm_clip": args.grad_norm_clip, + }, +) diff --git a/examples/speaker_recognition/notebooks/Speaker_Recognition_an4.ipynb b/examples/speaker_recognition/notebooks/Speaker_Recognition_an4.ipynb index 75f62f1c050c..118341cd2b30 100644 --- a/examples/speaker_recognition/notebooks/Speaker_Recognition_an4.ipynb +++ b/examples/speaker_recognition/notebooks/Speaker_Recognition_an4.ipynb @@ -310,7 +310,7 @@ }, "outputs": [], "source": [ - "logging = nemo.logging\n", + "from nemo.utils import logging\n", "yaml = YAML(typ=\"safe\")\n", "with open('../configs/quartznet_spkr_3x1x512_xvector.yaml') as f:\n", " spkr_params = yaml.load(f)\n", diff --git a/examples/speaker_recognition/notebooks/Speaker_Recognition_hi-mia.ipynb b/examples/speaker_recognition/notebooks/Speaker_Recognition_hi-mia.ipynb index 234bd53cbde0..4a25b4856bc9 100644 --- a/examples/speaker_recognition/notebooks/Speaker_Recognition_hi-mia.ipynb +++ b/examples/speaker_recognition/notebooks/Speaker_Recognition_hi-mia.ipynb @@ -198,7 +198,7 @@ }, "outputs": [], "source": [ - "logging = nemo.logging\n", + "from nemo.utils import logging\n", "yaml = YAML(typ=\"safe\")\n", "with open('examples/speaker_recognition/configs/quartznet_spkr_3x2x512_xvector.yaml') as f:\n", " spkr_params = yaml.load(f)\n", diff --git a/examples/speaker_recognition/speaker_reco.py b/examples/speaker_recognition/speaker_reco.py index 3c6cf1b84985..be85e1863769 100644 --- a/examples/speaker_recognition/speaker_reco.py +++ b/examples/speaker_recognition/speaker_reco.py @@ -27,10 +27,9 @@ process_classification_evaluation_batch, process_classification_evaluation_epoch, ) +from nemo.utils import logging from nemo.utils.lr_policies import CosineAnnealing -logging = nemo.logging - def parse_args(): parser = argparse.ArgumentParser( diff --git a/examples/speaker_recognition/spkr_get_emb.py b/examples/speaker_recognition/spkr_get_emb.py index db93f638979f..7fe5d9848bc0 100644 --- a/examples/speaker_recognition/spkr_get_emb.py +++ b/examples/speaker_recognition/spkr_get_emb.py @@ -23,8 +23,7 @@ import nemo import nemo.collections.asr as nemo_asr import nemo.utils.argparse as nm_argparse - -logging = nemo.logging +from nemo.utils import logging def parse_args(): diff --git a/examples/start_here/chatbot_example.py b/examples/start_here/chatbot_example.py index fb59b243c67f..e0e974ab446c 100644 --- a/examples/start_here/chatbot_example.py +++ b/examples/start_here/chatbot_example.py @@ -3,8 +3,7 @@ import shutil import nemo - -logging = nemo.logging +from nemo.utils import logging data_file = "movie_data.txt" diff --git a/examples/start_here/simplest_example.py b/examples/start_here/simplest_example.py index 0bf3fb795dac..1e4bd2de633f 100644 --- a/examples/start_here/simplest_example.py +++ b/examples/start_here/simplest_example.py @@ -1,7 +1,6 @@ # Copyright (c) 2019 NVIDIA Corporation import nemo - -logging = nemo.logging +from nemo.utils import logging nf = nemo.core.NeuralModuleFactory() # To use CPU-only do: diff --git a/examples/tts/fastspeech.py b/examples/tts/fastspeech.py index 94f342a70a8b..15a147fbfd03 100644 --- a/examples/tts/fastspeech.py +++ b/examples/tts/fastspeech.py @@ -23,9 +23,7 @@ from nemo.collections import asr as nemo_asr from nemo.collections import tts as nemo_tts from nemo.utils import argparse as nm_argparse -from nemo.utils import lr_policies - -logging = nemo.logging +from nemo.utils import logging, lr_policies def parse_args(): diff --git a/examples/tts/fastspeech_durations.py b/examples/tts/fastspeech_durations.py index e7b827a9feae..ac692e366cf4 100644 --- a/examples/tts/fastspeech_durations.py +++ b/examples/tts/fastspeech_durations.py @@ -22,8 +22,7 @@ import nemo import nemo.collections.asr as nemo_asr import nemo.collections.tts as nemo_tts - -logging = nemo.logging +from nemo.utils import logging def parse_args(): diff --git a/examples/tts/notebooks/1_Tacotron_inference.ipynb b/examples/tts/notebooks/1_Tacotron_inference.ipynb new file mode 100644 index 000000000000..23b27fecc395 --- /dev/null +++ b/examples/tts/notebooks/1_Tacotron_inference.ipynb @@ -0,0 +1,650 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Copyright 2020 NVIDIA. All Rights Reserved.\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# http://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", + "Instructions for setting up Colab are as follows:\n", + "1. Open a new Python 3 notebook.\n", + "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", + "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", + "4. Run this cell to set up dependencies.\n", + "\"\"\"\n", + "# If you're using Google Colab and not running locally, run this cell.\n", + "!pip install wget\n", + "!pip install nemo_toolkit[tts]\n", + "\n", + "!mkdir configs\n", + "!wget -P configs/ https://raw.githubusercontent.com/NVIDIA/NeMo/master/examples/tts/configs/tacotron2.yaml\n", + "!wget -P configs/ https://raw.githubusercontent.com/NVIDIA/NeMo/master/examples/tts/configs/waveglow.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import argparse\n", + "import math\n", + "import os\n", + "import copy\n", + "import shutil\n", + "import librosa\n", + "import matplotlib.pyplot as plt\n", + "from functools import partial\n", + "from scipy.io.wavfile import write\n", + "import numpy as np\n", + "import IPython.display as ipd\n", + "\n", + "from ruamel.yaml import YAML\n", + "\n", + "import torch\n", + "import nemo\n", + "import nemo.collections.asr as nemo_asr\n", + "import nemo.collections.tts as nemo_tts\n", + "import nemo.utils.argparse as nm_argparse\n", + "\n", + "logging = nemo.logging" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Download config files\n", + "config_path = '../configs/tacotron2.yaml'\n", + "waveglow_config_path = '../configs/waveglow.yaml'\n", + "\n", + "yaml = YAML(typ=\"safe\")\n", + "with open(config_path) as file:\n", + " tacotron2_config = yaml.load(file)\n", + " labels = tacotron2_config[\"labels\"]\n", + " \n", + "with open(waveglow_config_path) as file:\n", + " waveglow_config = yaml.load(file)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Download pre-trained checkpoints\n", + "\n", + "Note: The checkpoint for WaveGlow is very large (>1GB), so please ensure you have sufficient storage space." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "base_checkpoint_path = './checkpoints/'\n", + "WAVEGLOW = os.path.join(base_checkpoint_path, 'WaveGlowNM.pt')\n", + "TACOTRON_ENCODER = os.path.join(base_checkpoint_path, 'Tacotron2Encoder.pt')\n", + "TACOTRON_DECODER = os.path.join(base_checkpoint_path, 'Tacotron2Decoder.pt')\n", + "TACOTRON_POSTNET = os.path.join(base_checkpoint_path, 'Tacotron2Postnet.pt')\n", + "TEXT_EMBEDDING = os.path.join(base_checkpoint_path, 'TextEmbedding.pt')\n", + "\n", + "if not os.path.exists(base_checkpoint_path):\n", + " os.makedirs(base_checkpoint_path)\n", + " \n", + "if not os.path.exists(WAVEGLOW):\n", + " !wget wget https://api.ngc.nvidia.com/v2/models/nvidia/waveglow_ljspeech/versions/2/files/WaveGlowNM.pt -P {base_checkpoint_path};\n", + "\n", + "if not os.path.exists(TACOTRON_ENCODER):\n", + " !wget https://api.ngc.nvidia.com/v2/models/nvidia/tacotron2_ljspeech/versions/2/files/Tacotron2Encoder.pt -P {base_checkpoint_path};\n", + " \n", + "if not os.path.exists(TACOTRON_DECODER):\n", + " !wget https://api.ngc.nvidia.com/v2/models/nvidia/tacotron2_ljspeech/versions/2/files/Tacotron2Decoder.pt -P {base_checkpoint_path};\n", + "\n", + "if not os.path.exists(TACOTRON_POSTNET):\n", + " !wget https://api.ngc.nvidia.com/v2/models/nvidia/tacotron2_ljspeech/versions/2/files/Tacotron2Postnet.pt -P {base_checkpoint_path};\n", + "\n", + "if not os.path.exists(TEXT_EMBEDDING):\n", + " !wget https://api.ngc.nvidia.com/v2/models/nvidia/tacotron2_ljspeech/versions/2/files/TextEmbedding.pt -P {base_checkpoint_path};\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Prepare the Neural Factory\n", + "neural_factory = nemo.core.NeuralModuleFactory(\n", + " optimization_level=\"O0\", backend=nemo.core.Backend.PyTorch\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Text Line Data Layer\n", + "\n", + "Construct a simple datalayer to load a single line of text (accepted from the user) and pass it to the model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nemo.backends.pytorch import DataLayerNM\n", + "from nemo.core.neural_types import *\n", + "from nemo.utils.misc import pad_to\n", + "from nemo.collections.asr.parts.dataset import TranscriptDataset" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SentenceDataLayer(DataLayerNM):\n", + " \"\"\"A simple Neural Module for loading textual transcript data.\n", + " The path, labels, and eos_id arguments are dataset parameters.\n", + "\n", + " Args:\n", + " pad_id (int): Label position of padding symbol\n", + " batch_size (int): Size of batches to generate in data loader\n", + " drop_last (bool): Whether we drop last (possibly) incomplete batch.\n", + " Defaults to False.\n", + " num_workers (int): Number of processes to work on data loading (0 for\n", + " just main process).\n", + " Defaults to 0.\n", + " \"\"\"\n", + "\n", + " @property\n", + " def output_ports(self):\n", + " \"\"\"Returns definitions of module output ports.\n", + "\n", + " texts:\n", + " 0: AxisType(BatchTag)\n", + "\n", + " 1: AxisType(TimeTag)\n", + "\n", + " texts_length:\n", + " 0: AxisType(BatchTag)\n", + "\n", + " \"\"\"\n", + " return {\n", + " 'texts': NeuralType(('B', 'T'), LabelsType()),\n", + " 'texts_length': NeuralType(tuple('B'), LengthsType()),\n", + " }\n", + "\n", + " def __init__(\n", + " self,\n", + " path,\n", + " labels,\n", + " batch_size,\n", + " bos_id=None,\n", + " eos_id=None,\n", + " pad_id=None,\n", + " drop_last=False,\n", + " num_workers=0,\n", + " shuffle=True,\n", + " ):\n", + " super().__init__()\n", + "\n", + " # Set up dataset\n", + " self.dataset_params = {\n", + " 'path': path,\n", + " 'labels': labels,\n", + " 'bos_id': bos_id,\n", + " 'eos_id': eos_id,\n", + " }\n", + "\n", + " self._dataset = TranscriptDataset(**self.dataset_params)\n", + "\n", + " # Set up data loader\n", + " sampler = None\n", + " pad_id = 0 if pad_id is None else pad_id\n", + " \n", + " def update_dataset(self):\n", + " self._dataset = TranscriptDataset(**self.dataset_params)\n", + " logging.info('Dataset updated.')\n", + "\n", + " def __len__(self):\n", + " return len(self._dataset)\n", + "\n", + " @property\n", + " def dataset(self):\n", + " return self._dataset\n", + "\n", + " @property\n", + " def data_iterator(self):\n", + " return None\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create the Tacotron 2 + WaveGlow Neural Modules" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_NMs(tacotron2_config, waveglow_config, labels, decoder_infer=False, waveglow_sigma=0.6):\n", + " data_preprocessor = nemo_asr.AudioToMelSpectrogramPreprocessor(\n", + " **tacotron2_config[\"AudioToMelSpectrogramPreprocessor\"][\"init_params\"]\n", + " )\n", + " \n", + " text_embedding_params = copy.deepcopy(tacotron2_config[\"TextEmbedding\"][\"init_params\"])\n", + " text_embedding_params['n_symbols'] = len(labels) + 3\n", + " \n", + " # Load checkpoint for text embedding\n", + " text_embedding = nemo_tts.TextEmbedding(**text_embedding_params)\n", + " text_embedding.restore_from(TEXT_EMBEDDING)\n", + " \n", + " # Load checkpoint for encoder\n", + " t2_enc = nemo_tts.Tacotron2Encoder(**tacotron2_config[\"Tacotron2Encoder\"][\"init_params\"])\n", + " t2_enc.restore_from(TACOTRON_ENCODER)\n", + " \n", + " # Load checkpoint for decoder\n", + " decoder_params = copy.deepcopy(tacotron2_config[\"Tacotron2Decoder\"][\"init_params\"])\n", + " \n", + " t2_dec = nemo_tts.Tacotron2DecoderInfer(**decoder_params) \n", + " t2_dec.restore_from(TACOTRON_DECODER)\n", + " \n", + " # Load checkpoint for PortNet\n", + " t2_postnet = nemo_tts.Tacotron2Postnet(**tacotron2_config[\"Tacotron2Postnet\"][\"init_params\"])\n", + " t2_postnet.restore_from(TACOTRON_POSTNET)\n", + " \n", + " t2_loss = nemo_tts.Tacotron2Loss(**tacotron2_config[\"Tacotron2Loss\"][\"init_params\"])\n", + " \n", + " makegatetarget = nemo_tts.MakeGate()\n", + "\n", + " total_weights = text_embedding.num_weights + t2_enc.num_weights + t2_dec.num_weights + t2_postnet.num_weights\n", + "\n", + " logging.info('================================')\n", + " logging.info(f\"Total number of parameters (Tacotron 2): {total_weights}\")\n", + " logging.info('================================')\n", + " \n", + " \n", + " # Load WaveGlow model\n", + " waveglow_args = copy.deepcopy(waveglow_config[\"WaveGlowNM\"][\"init_params\"])\n", + " waveglow_args['sigma'] = waveglow_sigma\n", + " \n", + " waveglow = nemo_tts.WaveGlowInferNM(**waveglow_args)\n", + " waveglow.restore_from(WAVEGLOW)\n", + " \n", + " total_weights = waveglow.num_weights\n", + " \n", + " logging.info('================================')\n", + " logging.info(f\"Total number of parameters (WaveGlow): {total_weights}\")\n", + " logging.info('================================')\n", + "\n", + " return (\n", + " data_preprocessor,\n", + " text_embedding,\n", + " t2_enc,\n", + " t2_dec,\n", + " t2_postnet,\n", + " t2_loss,\n", + " makegatetarget,\n", + " ), waveglow" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "neural_modules, waveglow = create_NMs(tacotron2_config, waveglow_config, labels, decoder_infer=True, waveglow_sigma=0.6);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Utility functions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def update_text(text):\n", + " if not os.path.exists('cache/'):\n", + " os.makedirs('cache/')\n", + " \n", + " fp = os.path.join('cache', 'input.txt')\n", + " with open(fp, 'w', encoding='utf8') as f:\n", + " f.write('{}\\n'.format(text))\n", + " f.flush()\n", + " \n", + " logging.info(\"Updated input file with value : %s\", text)\n", + " return fp\n", + " \n", + "def cleanup_cachedir():\n", + " if os.path.exists('cache/'):\n", + " shutil.rmtree('cache/')\n", + " logging.info(\"Cleaned up cache directory !\")\n", + " \n", + "def plot_and_save_spec(spectrogram, i, save_dir=None):\n", + " fig, ax = plt.subplots(figsize=(12, 3))\n", + " im = ax.imshow(spectrogram, aspect=\"auto\", origin=\"lower\", interpolation='none')\n", + " plt.colorbar(im, ax=ax)\n", + " plt.xlabel(\"Frames\")\n", + " plt.ylabel(\"Channels\")\n", + " plt.tight_layout()\n", + " save_file = f\"spec_{i}.png\"\n", + " if save_dir:\n", + " save_file = os.path.join(save_dir, save_file)\n", + " plt.savefig(save_file)\n", + " plt.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Initializing the inference DAG\n", + "\n", + "To initialize the graph, we accept some text from the user. Later, we will accept the actual text that we want to convert to speech !" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "text = input('Please enter some initial text here :')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "filepath = update_text(text)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create inference DAG" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Tacotron 2 DAG\n", + "(_, text_embedding, t2_enc, t2_dec, t2_postnet, _, _) = neural_modules\n", + "\n", + "data_layer = SentenceDataLayer(\n", + " path=filepath,\n", + " labels=labels,\n", + " batch_size=1,\n", + " num_workers=0,\n", + " bos_id=len(labels),\n", + " eos_id=len(labels) + 1,\n", + " pad_id=len(labels) + 2,\n", + " shuffle=False,\n", + ")\n", + "transcript, transcript_len = data_layer()\n", + "\n", + "transcript_embedded = text_embedding(char_phone=transcript)\n", + "\n", + "transcript_encoded = t2_enc(char_phone_embeddings=transcript_embedded, embedding_length=transcript_len,)\n", + "\n", + "mel_decoder, gate, alignments, mel_len = t2_dec(\n", + " char_phone_encoded=transcript_encoded, encoded_length=transcript_len,\n", + ")\n", + "\n", + "mel_postnet = t2_postnet(mel_input=mel_decoder)\n", + "\n", + "# WaveGlow DAG\n", + "audio_pred = waveglow(mel_spectrogram=mel_postnet)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Setup inference tensors\n", + "infer_tensors = [mel_postnet, gate, alignments, mel_len]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run inference DAG" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def run_tacotron2():\n", + " logging.info(\"Running Tacotron 2\")\n", + " # Run tacotron 2\n", + " evaluated_tensors = neural_factory.infer(\n", + " tensors=infer_tensors, offload_to_cpu=False\n", + " )\n", + " logging.info(\"Done Running Tacotron 2\")\n", + " \n", + " mel_len_val = evaluated_tensors[-1]\n", + " \n", + " filterbank = librosa.filters.mel(\n", + " sr=tacotron2_config[\"sample_rate\"],\n", + " n_fft=tacotron2_config[\"n_fft\"],\n", + " n_mels=tacotron2_config[\"n_mels\"],\n", + " fmax=tacotron2_config[\"fmax\"],\n", + " )\n", + " \n", + " return evaluated_tensors, filterbank, mel_len_val\n", + "\n", + "def run_waveglow(save_dir, waveglow_denoiser_strength=0.0):\n", + " # Run Tacotron 2 and WaveGlow\n", + " evaluated_tensors, filterbank, mel_len_val = run_tacotron2()\n", + " \n", + " logging.info(\"Running Waveglow\")\n", + " evaluated_tensors = neural_factory.infer(\n", + " tensors=[audio_pred],\n", + " )\n", + " logging.info(\"Done Running Waveglow\")\n", + " \n", + " if waveglow_denoiser_strength > 0:\n", + " logging.info(\"Setup WaveGlow denoiser\")\n", + " waveglow.setup_denoiser()\n", + " \n", + " logging.info(\"Saving results to disk\")\n", + " for i, batch in enumerate(evaluated_tensors[0]):\n", + " audio = batch.cpu().numpy()\n", + " for j, sample in enumerate(audio):\n", + " sample_len = mel_len_val[i][j] * tacotron2_config[\"n_stride\"]\n", + " sample = sample[:sample_len]\n", + " save_file = f\"sample_{i * 32 + j}.wav\"\n", + " if save_dir:\n", + " save_file = os.path.join(save_dir, save_file)\n", + " if waveglow_denoiser_strength > 0:\n", + " sample, spec = waveglow.denoise(sample, strength=waveglow_denoiser_strength)\n", + " else:\n", + " spec, _ = librosa.core.magphase(librosa.core.stft(sample, n_fft=waveglow_config[\"n_fft\"]))\n", + " write(save_file, waveglow_config[\"sample_rate\"], sample)\n", + " spec = np.dot(filterbank, spec)\n", + " spec = np.log(np.clip(spec, a_min=1e-5, a_max=None))\n", + " plot_and_save_spec(spec, i * 32 + j, save_dir)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Run Tacotron 2 + WaveGlow on input text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "text = input('Please enter some initial text here :')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "filepath = update_text(text)\n", + "data_layer.update_dataset()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prepare directories to save results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "savedir = 'results/'\n", + "saved_audio = os.path.join(savedir, 'sample_0.wav')\n", + "saved_spectrogram = os.path.join(savedir, 'spec_0.png')\n", + "\n", + "if not os.path.exists(savedir):\n", + " os.makedirs(savedir)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Generate the audio\n", + "\n", + "Lets run the Tacotron 2 model and send the results to WaveGlow to generate the audio!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_waveglow(savedir, waveglow_denoiser_strength=0.0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Lets hear the generated audio !" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ipd.Audio(saved_audio, rate=16000)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ipd.Image(saved_spectrogram)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cleanup cachedir" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cleanup_cachedir()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.7.6 64-bit ('NeMo': conda)", + "language": "python", + "name": "python37664bitnemoconda43f94a748a2e4953b0129556ecdf4f62" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/tts/tacotron2.py b/examples/tts/tacotron2.py index 64f034c7b86e..f87ff213a7ba 100644 --- a/examples/tts/tacotron2.py +++ b/examples/tts/tacotron2.py @@ -28,10 +28,9 @@ tacotron2_process_eval_batch, tacotron2_process_final_eval, ) +from nemo.utils import logging from nemo.utils.lr_policies import CosineAnnealing -logging = nemo.logging - def parse_args(): parser = argparse.ArgumentParser( diff --git a/examples/tts/tacotron2_v0p9.py b/examples/tts/tacotron2_v0p9.py index d8e8dd69153e..e6339c5e542a 100644 --- a/examples/tts/tacotron2_v0p9.py +++ b/examples/tts/tacotron2_v0p9.py @@ -33,10 +33,9 @@ tacotron2_process_eval_batch, tacotron2_process_final_eval, ) +from nemo.utils import logging from nemo.utils.lr_policies import CosineAnnealing -logging = nemo.logging - def parse_args(): parser = argparse.ArgumentParser( diff --git a/examples/tts/tts_infer.py b/examples/tts/tts_infer.py index 48a9ffe4623d..df95b31063c0 100644 --- a/examples/tts/tts_infer.py +++ b/examples/tts/tts_infer.py @@ -24,8 +24,7 @@ import nemo import nemo.collections.asr as nemo_asr import nemo.collections.tts as nemo_tts - -logging = nemo.logging +from nemo.utils import logging def parse_args(): diff --git a/examples/tts/waveglow.py b/examples/tts/waveglow.py index 674117bd5825..63e262099f8a 100644 --- a/examples/tts/waveglow.py +++ b/examples/tts/waveglow.py @@ -20,8 +20,7 @@ import nemo.collections.tts as nemo_tts import nemo.utils.argparse as nm_argparse from nemo.collections.tts import waveglow_eval_log_to_tb_func, waveglow_log_to_tb_func, waveglow_process_eval_batch - -logging = nemo.logging +from nemo.utils import logging def parse_args(): diff --git a/examples/tts/waveglow_v0p9.py b/examples/tts/waveglow_v0p9.py index 6ad8fc7d14d1..2bc905ea6973 100644 --- a/examples/tts/waveglow_v0p9.py +++ b/examples/tts/waveglow_v0p9.py @@ -27,8 +27,7 @@ import nemo.collections.tts as nemo_tts import nemo.utils.argparse as nm_argparse from nemo.collections.tts import waveglow_eval_log_to_tb_func, waveglow_log_to_tb_func, waveglow_process_eval_batch - -logging = nemo.logging +from nemo.utils import logging def parse_args(): diff --git a/nemo/backends/pytorch/actions.py b/nemo/backends/pytorch/actions.py index d22fa8bc2c13..47a31c175bdc 100644 --- a/nemo/backends/pytorch/actions.py +++ b/nemo/backends/pytorch/actions.py @@ -537,6 +537,7 @@ def _eval(self, tensors_2_evaluate, callback, step, verbose=False): 'num_workers': dl_nm.num_workers, 'batch_size': dl_nm.batch_size, 'shuffle': False, + 'pin_memory': dl_nm.pin_memory, } if hasattr(dl_nm, 'collate_fn'): dataloader_params['collate_fn'] = dl_nm.collate_fn @@ -555,6 +556,7 @@ def _eval(self, tensors_2_evaluate, callback, step, verbose=False): 'num_workers': dl_nm.num_workers, 'batch_size': dl_nm.batch_size, 'shuffle': dl_nm.shuffle, + 'pin_memory': dl_nm.pin_memory, } if hasattr(dl_nm, 'collate_fn'): dataloader_params['collate_fn'] = dl_nm.collate_fn @@ -712,6 +714,7 @@ def _infer( 'num_workers': dl_nm.num_workers, 'batch_size': dl_nm.batch_size, 'shuffle': False, + 'pin_memory': dl_nm.pin_memory, } if hasattr(dl_nm, 'collate_fn'): dataloader_params['collate_fn'] = dl_nm.collate_fn @@ -730,6 +733,7 @@ def _infer( 'num_workers': dl_nm.num_workers, 'batch_size': dl_nm.batch_size, 'shuffle': dl_nm.shuffle, + 'pin_memory': dl_nm.pin_memory, } if hasattr(dl_nm, 'collate_fn'): dataloader_params['collate_fn'] = dl_nm.collate_fn @@ -1241,6 +1245,7 @@ def train( 'num_workers': dataNM.num_workers, 'batch_size': dataNM.batch_size, 'shuffle': False, + 'pin_memory': dataNM.pin_memory, } if hasattr(dataNM, 'collate_fn'): dataloader_params['collate_fn'] = dataNM.collate_fn @@ -1323,6 +1328,7 @@ def train( 'num_workers': dataNM.num_workers, 'batch_size': dataNM.batch_size, 'shuffle': dataNM.shuffle, + 'pin_memory': dataNM.pin_memory, } if hasattr(dataNM, 'collate_fn'): dataloader_params['collate_fn'] = dataNM.collate_fn diff --git a/nemo/backends/pytorch/common/losses.py b/nemo/backends/pytorch/common/losses.py index 70633a2ffc14..ad25a5dd6773 100644 --- a/nemo/backends/pytorch/common/losses.py +++ b/nemo/backends/pytorch/common/losses.py @@ -5,7 +5,7 @@ from nemo.core.neural_types import LabelsType, LogitsType, LossType, MaskType, NeuralType, RegressionValuesType from nemo.utils.decorators import add_port_docs -__all__ = ['SequenceLoss', 'CrossEntropyLossNM', 'MSELoss', 'LossAggregatorNM'] +__all__ = ['SequenceLoss', 'CrossEntropyLossNM', 'MSELoss', 'LossAggregatorNM', 'BCEWithLogitsLossNM'] class SequenceLoss(LossNM): @@ -159,7 +159,7 @@ def _loss_function(self, logits, labels, loss_mask=None): labels_flatten = labels_flatten[loss_mask_flatten] if len(labels_flatten) == 0: - return 0 + return self._criterion(logits, torch.argmax(logits, dim=-1)) loss = self._criterion(logits_flatten, labels_flatten) return loss @@ -248,3 +248,65 @@ def _loss_function(self, **kwargs): else: loss = loss.add(loss_value) return loss + + +class BCEWithLogitsLossNM(LossNM): + """ + CrossEntropyLoss + Args: + logits_ndim (int): number of dimensions (or rank) of the logits tensor + weight (list): list of rescaling weight given to each class + reduction (str): type of the reduction over the batch + """ + + @property + @add_port_docs() + def input_ports(self): + """Returns definitions of module input ports. + """ + return { + "logits": NeuralType(['B'] + ['ANY'] * (self._logits_dim - 1), LogitsType()), + "labels": NeuralType(['B'] + ['ANY'] * (self._logits_dim - 2), LabelsType()), + "loss_mask": NeuralType(['B'] + ['ANY'] * (self._logits_dim - 2), MaskType(), optional=True), + } + + @property + @add_port_docs() + def output_ports(self): + """Returns definitions of module output ports. + + loss: + NeuralType(None) + """ + return {"loss": NeuralType(elements_type=LossType())} + + def __init__(self, logits_ndim=2, weight=None, reduction='mean'): + super().__init__() + + if weight: + weight = torch.FloatTensor(weight).to(self._device) + self._criterion = nn.BCEWithLogitsLoss(weight=weight, reduction=reduction) + self._logits_dim = logits_ndim + + def _loss_function(self, logits, labels, loss_mask=None): + """ + Args: + logits (float): output of the classifier + labels (long): ground truth labels + loss_mask (bool/float/int): tensor to specify the masking + """ + logits_flatten = torch.flatten(logits, start_dim=0, end_dim=-2) + labels_flatten = torch.flatten(labels, start_dim=0, end_dim=-1) + + if loss_mask is not None: + if loss_mask.dtype is not torch.bool: + loss_mask = loss_mask > 0.5 + loss_mask_flatten = torch.flatten(loss_mask, start_dim=0, end_dim=-1) + logits_flatten = logits_flatten[loss_mask_flatten] + labels_flatten = labels_flatten[loss_mask_flatten] + + if len(labels_flatten) == 0: + return 0 + + loss = self._criterion(logits_flatten, labels_flatten) + return loss diff --git a/nemo/backends/pytorch/nm.py b/nemo/backends/pytorch/nm.py index 2b1c33b48e43..761aa505031e 100644 --- a/nemo/backends/pytorch/nm.py +++ b/nemo/backends/pytorch/nm.py @@ -39,6 +39,9 @@ def __init__(self, pretrained_model_name=None, name=None): nn.Module.__init__(self) # For PyTorch API NeuralModule.__init__(self, name) # For NeuralModule API + # Unfrozen by default. + self._frozen = False + # Set module type. self._type = ModuleType.trainable @@ -115,6 +118,8 @@ def freeze(self, weights=None): for name, param in self.named_parameters(): if weights is None or name in weights: param.requires_grad = False + # Freeze. + self._frozen = True @t.jit.ignore def unfreeze(self, weights=None): @@ -126,6 +131,15 @@ def unfreeze(self, weights=None): for name, param in self.named_parameters(): if weights is None or name in weights: param.requires_grad = True + # Unfreeze. + self._frozen = False + + @t.jit.ignore + def is_frozen(self) -> bool: + """ Returns: + True/False depending whether there are any frozen weights or not. + """ + return self._frozen @property def num_weights(self): @@ -214,6 +228,7 @@ def __init__(self, name=None): self._batch_size = 1 self._num_workers = os.cpu_count() # Use all CPUs by default. self._shuffle = False # Don't shuffle by default. + self._pin_memory = False @property def input_ports(self): @@ -327,6 +342,11 @@ def num_workers(self): # """ Property setting the number of workers. """ # self._num_workers = nw + @property + def pin_memory(self): + """ Property returning the pin memory flag. """ + return self._pin_memory + class LossNM(NeuralModule): """A helper Base class for creating Pytorch-based loss function modules. diff --git a/nemo/collections/asr/audio_preprocessing.py b/nemo/collections/asr/audio_preprocessing.py index 98c96b8520d9..54f3df7e8f0c 100644 --- a/nemo/collections/asr/audio_preprocessing.py +++ b/nemo/collections/asr/audio_preprocessing.py @@ -28,19 +28,18 @@ ] import math -import warnings from abc import abstractmethod import numpy as np import torch from packaging import version -import nemo from .parts.features import FilterbankFeatures from .parts.spectr_augment import SpecAugment, SpecCutout from nemo.backends.pytorch import NonTrainableNM from nemo.core import Optimization from nemo.core.neural_types import * +from nemo.utils import logging from nemo.utils.decorators import add_port_docs try: @@ -54,14 +53,12 @@ HAVE_TORCHAUDIO = True except ModuleNotFoundError: HAVE_TORCHAUDIO = False - warnings.warn('Could not import torchaudio. Some features might not work.') + logging.warning('Could not import torchaudio. Some features might not work.') + try: from apex import amp except (AttributeError, ModuleNotFoundError) as e: - warnings.warn("Unable to import APEX. Mixed precision and distributed training will not work.") - - -logging = nemo.logging + logging.warning("Unable to import APEX. Mixed precision and distributed training will not work.") class AudioPreprocessor(NonTrainableNM): diff --git a/nemo/collections/asr/contextnet.py b/nemo/collections/asr/contextnet.py index c09be485d67a..145a6d79718a 100644 --- a/nemo/collections/asr/contextnet.py +++ b/nemo/collections/asr/contextnet.py @@ -5,15 +5,13 @@ import torch.nn as nn import torch.nn.functional as F -import nemo from .jasper import JasperEncoder from .parts.jasper import init_weights from nemo.backends.pytorch.nm import TrainableNM from nemo.core.neural_types import * +from nemo.utils import logging from nemo.utils.decorators import add_port_docs -logging = nemo.logging - class ContextNetEncoder(JasperEncoder): """ diff --git a/nemo/collections/asr/data_layer.py b/nemo/collections/asr/data_layer.py index ab94c70d53e4..dbaba86c3190 100644 --- a/nemo/collections/asr/data_layer.py +++ b/nemo/collections/asr/data_layer.py @@ -24,7 +24,6 @@ import torch import webdataset as wd -import nemo from .parts.collections import ASRAudioText from .parts.dataset import ( AudioDataset, @@ -40,6 +39,7 @@ from nemo.backends.pytorch import DataLayerNM from nemo.core import DeviceType from nemo.core.neural_types import * +from nemo.utils import logging from nemo.utils.decorators import add_port_docs from nemo.utils.misc import pad_to @@ -51,8 +51,6 @@ 'AudioToSpeechLabelDataLayer', ] -logging = nemo.logging - def _process_augmentations(augmenter) -> AudioAugmentor: """Process list of online data augmentations. @@ -497,7 +495,7 @@ def __init__( self.collate_fn = partial(seq_collate_fn, token_pad_value=pad_id) # Check for distributed and partition shards accordingly - if torch.distributed.is_initialized(): + if torch.distributed.is_available() and torch.distributed.is_initialized(): global_rank = torch.distributed.get_rank() world_size = torch.distributed.get_world_size() diff --git a/nemo/collections/asr/greedy_ctc_decoder.py b/nemo/collections/asr/greedy_ctc_decoder.py index 287db80cd8bf..c4a264f10832 100644 --- a/nemo/collections/asr/greedy_ctc_decoder.py +++ b/nemo/collections/asr/greedy_ctc_decoder.py @@ -1,12 +1,27 @@ -# Copyright (c) 2019 NVIDIA Corporation -import torch +# -*- coding: utf-8 -*- -from nemo.backends.pytorch.nm import TrainableNM -from nemo.core.neural_types import * +# ============================================================================= +# Copyright (c) 2020 NVIDIA. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +from nemo.backends.pytorch.nm import NonTrainableNM +from nemo.core.neural_types import LogprobsType, NeuralType, PredictionsType from nemo.utils.decorators import add_port_docs -class GreedyCTCDecoder(TrainableNM): +class GreedyCTCDecoder(NonTrainableNM): """ Greedy decoder that computes the argmax over a softmax distribution """ @@ -14,23 +29,22 @@ class GreedyCTCDecoder(TrainableNM): @property @add_port_docs() def input_ports(self): - """Returns definitions of module input ports. + """Returns: + Definitions of module input ports. """ - # return {"log_probs": NeuralType({0: AxisType(BatchTag), 1: AxisType(TimeTag), 2: AxisType(ChannelTag),})} return {"log_probs": NeuralType(('B', 'T', 'D'), LogprobsType())} @property @add_port_docs() def output_ports(self): - """Returns definitions of module output ports. + """Returns: + Definitions of module output ports. """ - # return {"predictions": NeuralType({0: AxisType(BatchTag), 1: AxisType(TimeTag)})} return {"predictions": NeuralType(('B', 'T'), PredictionsType())} def __init__(self): super().__init__() def forward(self, log_probs): - with torch.no_grad(): - argmx = log_probs.argmax(dim=-1, keepdim=False) - return argmx + argmx = log_probs.argmax(dim=-1, keepdim=False) + return argmx diff --git a/nemo/collections/asr/helpers.py b/nemo/collections/asr/helpers.py index 7734b48b9ee7..dd36cd412e1e 100644 --- a/nemo/collections/asr/helpers.py +++ b/nemo/collections/asr/helpers.py @@ -2,10 +2,8 @@ import torch -import nemo from .metrics import classification_accuracy, word_error_rate - -logging = nemo.logging +from nemo.utils import logging def __ctc_decoder_predictions_tensor(tensor, labels): diff --git a/nemo/collections/asr/jasper.py b/nemo/collections/asr/jasper.py index b5de8f6b7af4..c83ed59b5e3b 100644 --- a/nemo/collections/asr/jasper.py +++ b/nemo/collections/asr/jasper.py @@ -5,14 +5,12 @@ import torch.nn as nn import torch.nn.functional as F -import nemo from .parts.jasper import JasperBlock, StatsPoolLayer, init_weights, jasper_activations from nemo.backends.pytorch.nm import TrainableNM from nemo.core.neural_types import * +from nemo.utils import logging from nemo.utils.decorators import add_port_docs -logging = nemo.logging - class JasperEncoder(TrainableNM): """ diff --git a/nemo/collections/asr/las/helpers.py b/nemo/collections/asr/las/helpers.py index 92558323d1ef..baa44e48075b 100644 --- a/nemo/collections/asr/las/helpers.py +++ b/nemo/collections/asr/las/helpers.py @@ -3,11 +3,10 @@ import torch -import nemo from nemo.backends.pytorch.common.metrics import char_lm_metrics from nemo.collections.asr.metrics import word_error_rate +from nemo.utils import logging -logging = nemo.logging ENG_MWN = 5.3 diff --git a/nemo/collections/asr/parts/collections.py b/nemo/collections/asr/parts/collections.py index c28f2d9f3e30..de11bfebcf7c 100644 --- a/nemo/collections/asr/parts/collections.py +++ b/nemo/collections/asr/parts/collections.py @@ -6,10 +6,8 @@ import pandas as pd -import nemo from nemo.collections.asr.parts import manifest, parsers - -logging = nemo.logging +from nemo.utils import logging class _Collection(collections.UserList): diff --git a/nemo/collections/nlp/callbacks/sgd_callback.py b/nemo/collections/nlp/callbacks/sgd_callback.py new file mode 100644 index 000000000000..8ba7897952e8 --- /dev/null +++ b/nemo/collections/nlp/callbacks/sgd_callback.py @@ -0,0 +1,225 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +""" +This file contains code artifacts adapted from the original implementation: +https://github.com/google-research/google-research/blob/master/schema_guided_dst +""" + +import json +import os + +import torch + +import nemo.collections.nlp.data.datasets.sgd_dataset.prediction_utils as pred_utils +from nemo import logging +from nemo.collections.nlp.data.datasets.sgd_dataset.evaluate import ( + ALL_SERVICES, + PER_FRAME_OUTPUT_FILENAME, + SEEN_SERVICES, + UNSEEN_SERVICES, + get_dataset_as_dict, + get_in_domain_services, + get_metrics, +) + +__all__ = ['eval_iter_callback', 'eval_epochs_done_callback'] + + +def tensor2list(tensor): + return tensor.detach().cpu().tolist() + + +def get_str_example_id(eval_dataset, ids_to_service_names_dict, example_id_num): + def format_turn_id(ex_id_num): + dialog_id_1, dialog_id_2, turn_id, service_id = ex_id_num + return "{}-{}_{:05d}-{:02d}-{}".format( + eval_dataset, dialog_id_1, dialog_id_2, turn_id, ids_to_service_names_dict[service_id] + ) + + return list(map(format_turn_id, tensor2list(example_id_num))) + + +def eval_iter_callback(tensors, global_vars, schema_processor, eval_dataset): + if 'predictions' not in global_vars: + global_vars['predictions'] = [] + + output = {} + for k, v in tensors.items(): + ind = k.find('~~~') + if ind != -1: + output[k[:ind]] = torch.cat(v) + + predictions = {} + ids_to_service_names_dict = schema_processor.get_ids_to_service_names_dict() + predictions['example_id'] = get_str_example_id(eval_dataset, ids_to_service_names_dict, output['example_id_num']) + + predictions['service_id'] = output['service_id'] + predictions['is_real_example'] = output['is_real_example'] + + # Scores are output for each intent. + # Note that the intent indices are shifted by 1 to account for NONE intent. + predictions['intent_status'] = torch.argmax(output['logit_intent_status'], -1) + + # Scores are output for each requested slot. + predictions['req_slot_status'] = torch.nn.Sigmoid()(output['logit_req_slot_status']) + + # For categorical slots, the status of each slot and the predicted value are output. + cat_slot_status_dist = torch.nn.Softmax(dim=-1)(output['logit_cat_slot_status']) + cat_slot_value_dist = torch.nn.Softmax(dim=-1)(output['logit_cat_slot_value']) + + predictions['cat_slot_status'] = torch.argmax(output['logit_cat_slot_status'], axis=-1) + predictions['cat_slot_status_p'] = torch.max(cat_slot_status_dist, axis=-1)[0] + predictions['cat_slot_value'] = torch.argmax(output['logit_cat_slot_value'], axis=-1) + predictions['cat_slot_value_p'] = torch.max(cat_slot_value_dist, axis=-1)[0] + + # For non-categorical slots, the status of each slot and the indices for spans are output. + noncat_slot_status_dist = torch.nn.Softmax(dim=-1)(output['logit_noncat_slot_status']) + + predictions['noncat_slot_status'] = torch.argmax(output['logit_noncat_slot_status'], axis=-1) + predictions['noncat_slot_status_p'] = torch.max(noncat_slot_status_dist, axis=-1)[0] + + softmax = torch.nn.Softmax(dim=-1) + start_scores = softmax(output['logit_noncat_slot_start']) + end_scores = softmax(output['logit_noncat_slot_end']) + + batch_size, max_num_noncat_slots, max_num_tokens = end_scores.size() + # Find the span with the maximum sum of scores for start and end indices. + total_scores = torch.unsqueeze(start_scores, axis=3) + torch.unsqueeze(end_scores, axis=2) + # Mask out scores where start_index > end_index. + # device = total_scores.device + start_idx = torch.arange(max_num_tokens, device=total_scores.device).view(1, 1, -1, 1) + end_idx = torch.arange(max_num_tokens, device=total_scores.device).view(1, 1, 1, -1) + invalid_index_mask = (start_idx > end_idx).repeat(batch_size, max_num_noncat_slots, 1, 1) + total_scores = torch.where( + invalid_index_mask, + torch.zeros(total_scores.size(), device=total_scores.device, dtype=total_scores.dtype), + total_scores, + ) + max_span_index = torch.argmax(total_scores.view(-1, max_num_noncat_slots, max_num_tokens ** 2), axis=-1) + max_span_p = torch.max(total_scores.view(-1, max_num_noncat_slots, max_num_tokens ** 2), axis=-1)[0] + predictions['noncat_slot_p'] = max_span_p + + span_start_index = torch.div(max_span_index, max_num_tokens) + span_end_index = torch.fmod(max_span_index, max_num_tokens) + + predictions['noncat_slot_start'] = span_start_index + predictions['noncat_slot_end'] = span_end_index + + # Add inverse alignments. + predictions['noncat_alignment_start'] = output['start_char_idx'] + predictions['noncat_alignment_end'] = output['end_char_idx'] + + # added for debugging + predictions['cat_slot_status_GT'] = output['categorical_slot_status'] + predictions['noncat_slot_status_GT'] = output['noncategorical_slot_status'] + + global_vars['predictions'].extend(combine_predictions_in_example(predictions, batch_size)) + + +def combine_predictions_in_example(predictions, batch_size): + ''' + Combines predicted values to a single example. + ''' + examples_preds = [{} for _ in range(batch_size)] + for k, v in predictions.items(): + if k != 'example_id': + v = torch.chunk(v, batch_size) + + for i in range(batch_size): + if k == 'example_id': + examples_preds[i][k] = v[i] + else: + examples_preds[i][k] = v[i].view(-1) + return examples_preds + + +def eval_epochs_done_callback( + global_vars, + input_json_files, + eval_dataset, + data_dir, + prediction_dir, + output_metric_file, + state_tracker, + eval_debug, + schema_emb_preprocessor, + joint_acc_across_turn, + no_fuzzy_match, +): + # added for debugging + in_domain_services = get_in_domain_services( + os.path.join(data_dir, eval_dataset, "schema.json"), os.path.join(data_dir, "train", "schema.json") + ) + ############## + pred_utils.write_predictions_to_file( + global_vars['predictions'], + input_json_files, + prediction_dir, + schemas=schema_emb_preprocessor.schemas, + state_tracker=state_tracker, + eval_debug=eval_debug, + in_domain_services=in_domain_services, + ) + metrics = evaluate( + prediction_dir, + data_dir, + eval_dataset, + output_metric_file, + schema_emb_preprocessor.schemas, + joint_acc_across_turn, + no_fuzzy_match, + ) + return metrics + + +def evaluate( + prediction_dir, data_dir, eval_dataset, output_metric_file, schemas, joint_acc_across_turn, no_fuzzy_match +): + + in_domain_services = get_in_domain_services( + os.path.join(data_dir, eval_dataset, "schema.json"), os.path.join(data_dir, "train", "schema.json") + ) + + with open(os.path.join(data_dir, eval_dataset, "schema.json")) as f: + eval_services = {} + list_services = json.load(f) + for service in list_services: + eval_services[service["service_name"]] = service + f.close() + + dataset_ref = get_dataset_as_dict(os.path.join(data_dir, eval_dataset, "dialogues_*.json")) + dataset_hyp = get_dataset_as_dict(os.path.join(prediction_dir, "*.json")) + + all_metric_aggregate, _ = get_metrics( + dataset_ref, dataset_hyp, eval_services, in_domain_services, joint_acc_across_turn, no_fuzzy_match + ) + if SEEN_SERVICES in all_metric_aggregate: + logging.info(f'Dialog metrics for {SEEN_SERVICES} : {sorted(all_metric_aggregate[SEEN_SERVICES].items())}') + if UNSEEN_SERVICES in all_metric_aggregate: + logging.info(f'Dialog metrics for {UNSEEN_SERVICES}: {sorted(all_metric_aggregate[UNSEEN_SERVICES].items())}') + if ALL_SERVICES in all_metric_aggregate: + logging.info(f'Dialog metrics for {ALL_SERVICES} : {sorted(all_metric_aggregate[ALL_SERVICES].items())}') + # Write the aggregated metrics values. + with open(output_metric_file, "w") as f: + json.dump(all_metric_aggregate, f, indent=2, separators=(",", ": "), sort_keys=True) + f.close() + # Write the per-frame metrics values with the corrresponding dialogue frames. + with open(os.path.join(prediction_dir, PER_FRAME_OUTPUT_FILENAME), "w") as f: + json.dump(dataset_hyp, f, indent=2, separators=(",", ": ")) + f.close() + return all_metric_aggregate[ALL_SERVICES] diff --git a/nemo/collections/nlp/data/datasets/__init__.py b/nemo/collections/nlp/data/datasets/__init__.py index 2342e3f25ead..1e31f3e115f4 100644 --- a/nemo/collections/nlp/data/datasets/__init__.py +++ b/nemo/collections/nlp/data/datasets/__init__.py @@ -31,6 +31,8 @@ BertPunctuationCapitalizationInferDataset, ) from nemo.collections.nlp.data.datasets.qa_squad_dataset.qa_squad_dataset import SquadDataset +from nemo.collections.nlp.data.datasets.sgd_dataset.schema_embedding_dataset import SchemaEmbeddingDataset +from nemo.collections.nlp.data.datasets.sgd_dataset.sgd_dataset import SGDDataset from nemo.collections.nlp.data.datasets.text_classification import ( BertTextClassificationDataset, TextClassificationDataDesc, diff --git a/nemo/collections/nlp/data/datasets/sgd_dataset/data_processor.py b/nemo/collections/nlp/data/datasets/sgd_dataset/data_processor.py new file mode 100644 index 000000000000..7a4b7c2bc05f --- /dev/null +++ b/nemo/collections/nlp/data/datasets/sgd_dataset/data_processor.py @@ -0,0 +1,401 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +""" +This file contains code artifacts adapted from the original implementation: +https://github.com/google-research/google-research/blob/master/schema_guided_dst/baseline/data_utils.py +""" + +import json +import os +import re + +import numpy as np +import torch + +from nemo.collections.nlp.data.datasets.sgd_dataset.input_example import InputExample +from nemo.utils import logging + +__all__ = [ + 'FILE_RANGES', + 'PER_FRAME_OUTPUT_FILENAME', + 'Dstc8DataProcessor', +] + + +FILE_RANGES = { + "dstc8_single_domain": {"train": range(1, 44), "dev": range(1, 8), "test": range(1, 12)}, + "dstc8_multi_domain": {"train": range(44, 128), "dev": range(8, 21), "test": range(12, 35)}, + "dstc8_all": {"train": range(1, 128), "dev": range(1, 21), "test": range(1, 35)}, + "DEBUG": {"train": range(1, 2), "dev": range(1, 2), "test": range(1, 2)}, + "multiwoz": {"train": range(1, 18), "dev": range(1, 3), "test": range(1, 3)}, +} + +# Name of the file containing all predictions and their corresponding frame metrics. +PER_FRAME_OUTPUT_FILENAME = "dialogues_and_metrics.json" + + +class Dstc8DataProcessor(object): + """Data generator for dstc8 dialogues.""" + + def __init__( + self, + task_name, + dstc8_data_dir, + dialogues_example_dir, + tokenizer, + schema_emb_processor, + overwrite_dial_files=False, + ): + """ + Constructs Dstc8DataProcessor + Args: + task_name (str): task name, for example, "dstc8_single_domain" + dstc8_data_dir (str): path to data directory + dialogues_example_dir (str): path to store processed dialogue examples + tokenizer (Tokenizer): such as NemoBertTokenizer + schema_emb_processor (Obj): contains information about schemas + overwrite_dial_files (bool): whether to overwite dialogue files + """ + self.dstc8_data_dir = dstc8_data_dir + self.dialogues_examples_dir = dialogues_example_dir + + self._task_name = task_name + self.schema_config = schema_emb_processor.schema_config + + train_file_range = FILE_RANGES[task_name]["train"] + dev_file_range = FILE_RANGES[task_name]["dev"] + test_file_range = FILE_RANGES[task_name]["test"] + + self._file_ranges = { + "train": train_file_range, + "dev": dev_file_range, + "test": test_file_range, + } + + self._tokenizer = tokenizer + self._max_seq_length = self.schema_config["MAX_SEQ_LENGTH"] + + self.dial_files = {} + + for dataset in ["train", "dev", "test"]: + # Process dialogue files + dial_file = f"{task_name}_{dataset}_examples.processed" + dial_file = os.path.join(dialogues_example_dir, dial_file) + self.dial_files[(task_name, dataset)] = dial_file + + if not os.path.exists(dial_file) or overwrite_dial_files: + logging.debug(f"Start generating the dialogue examples for {dataset} dataset.") + master_device = not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0 + if master_device: + if not os.path.exists(dialogues_example_dir): + os.makedirs(dialogues_example_dir) + dial_examples = self._generate_dialog_examples(dataset, schema_emb_processor.schemas) + with open(dial_file, "wb") as f: + np.save(f, dial_examples) + f.close() + logging.debug(f"The dialogue examples for {dataset} dataset saved at {dial_file}") + logging.debug(f"Finish generating the dialogue examples for {dataset} dataset.") + + # wait until the master process writes to the dialogue processed file + if torch.distributed.is_initialized(): + torch.distributed.barrier() + + def get_dialog_examples(self, dataset): + """ + Returns a list of `InputExample`s of the data splits' dialogues. + Args: + dataset(str): can be "train", "dev", or "test". + Returns: + examples: a list of `InputExample`s. + """ + if (self._task_name, dataset) not in self.dial_files or not os.path.exists( + self.dial_files[(self._task_name, dataset)] + ): + raise ValueError( + f"{dataset} dialogue examples were not processed for {self._task_name} task. Re-initialize Dstc8DataProcessor and add {dataset} dataset to datasets arg." + ) + + dial_file = self.dial_files[(self._task_name, dataset)] + logging.info(f"Loading dialogue examples from {dial_file}.") + with open(dial_file, "rb") as f: + dial_examples = np.load(f, allow_pickle=True) + f.close() + return dial_examples + + def _generate_dialog_examples(self, dataset, schemas): + """ + Returns a list of `InputExample`s of the data splits' dialogues. + Args: + dataset(str): can be "train", "dev", or "test". + schemas(Schema): for all services and all datasets processed by the schema_processor + Returns: + examples: a list of `InputExample`s. + """ + logging.info(f'Creating examples from the dialogues started...') + dialog_paths = [ + os.path.join(self.dstc8_data_dir, dataset, "dialogues_{:03d}.json".format(i)) + for i in self._file_ranges[dataset] + ] + dialogs = Dstc8DataProcessor.load_dialogues(dialog_paths) + + examples = [] + for dialog_idx, dialog in enumerate(dialogs): + if dialog_idx % 1000 == 0: + logging.info(f'Processed {dialog_idx} dialogs.') + examples.extend(self._create_examples_from_dialog(dialog, schemas, dataset)) + + logging.info(f'Finished creating the examples from {len(dialogs)} dialogues.') + return examples + + def _create_examples_from_dialog(self, dialog, schemas, dataset): + """ + Create examples for every turn in the dialog. + Args: + dialog (dict): dialogue example + schemas(Schema): for all services and all datasets processed by the schema_processor + dataset(str): can be "train", "dev", or "test". + Returns: + examples: a list of `InputExample`s. + """ + dialog_id = dialog["dialogue_id"] + prev_states = {} + examples = [] + for turn_idx, turn in enumerate(dialog["turns"]): + # Generate an example for every frame in every user turn. + if turn["speaker"] == "USER": + user_utterance = turn["utterance"] + user_frames = {f["service"]: f for f in turn["frames"]} + if turn_idx > 0: + system_turn = dialog["turns"][turn_idx - 1] + system_utterance = system_turn["utterance"] + system_frames = {f["service"]: f for f in system_turn["frames"]} + else: + system_utterance = "" + system_frames = {} + + turn_id = "{}-{}-{:02d}".format(dataset, dialog_id, turn_idx) + turn_examples, prev_states = self._create_examples_from_turn( + turn_id, system_utterance, user_utterance, system_frames, user_frames, prev_states, schemas + ) + examples.extend(turn_examples) + return examples + + def _get_state_update(self, current_state, prev_state): + """ + Updates dialogue state + Args: + current_state (dict): dict of slot - slot values pairs for the current dialogue turn + prev_state (dict): dict of slot - slot values pairs for the previous dialogue turns + Returns: + state_update (dict): dict of slot - slot values pairs that very added/updated during the current + dialogue turn + """ + state_update = dict(current_state) + for slot, values in current_state.items(): + if slot in prev_state and prev_state[slot][0] in values: + # Remove the slot from state if its value didn't change. + state_update.pop(slot) + return state_update + + def _create_examples_from_turn( + self, turn_id, system_utterance, user_utterance, system_frames, user_frames, prev_states, schemas + ): + """ + Creates an example for each frame in the user turn. + Args: + turn_id (int): turn number + system_utterance (str): last system utterance + user_utterance (str): lst user utterance + system_frames (dict): all system utterances and slot - slot value pairs + user_frames (dict): all user utterances and slot - slot value pairs + prev_states (dict): slot - slot value pairs from the previous turns + schemas (obj): carries information about the service from the current turn + Returns: + examples: a list of `InputExample`s. + prev_states (dict): updated dialogue state + """ + system_tokens, system_alignments, system_inv_alignments = self._tokenize(system_utterance) + user_tokens, user_alignments, user_inv_alignments = self._tokenize(user_utterance) + states = {} + base_example = InputExample(schema_config=self.schema_config, is_real_example=True, tokenizer=self._tokenizer,) + base_example.example_id = turn_id + + _, dialog_id, turn_id_ = turn_id.split('-') + dialog_id_1, dialog_id_2 = dialog_id.split('_') + base_example.example_id_num = [int(dialog_id_1), int(dialog_id_2), int(turn_id_)] + base_example.add_utterance_features( + system_tokens, system_inv_alignments, user_tokens, user_inv_alignments, system_utterance, user_utterance + ) + examples = [] + for service, user_frame in user_frames.items(): + # Create an example for this service. + example = base_example.make_copy_with_utterance_features() + + example.example_id = "{}-{}".format(turn_id, service) + _, dialog_id, turn_id_ = turn_id.split('-') + dialog_id_1, dialog_id_2 = dialog_id.split('_') + example.example_id_num = [ + int(dialog_id_1), + int(dialog_id_2), + int(turn_id_), + schemas.get_service_id(service), + ] + + example.service_schema = schemas.get_service_schema(service) + system_frame = system_frames.get(service, None) + state = user_frame["state"]["slot_values"] + state_update = self._get_state_update(state, prev_states.get(service, {})) + states[service] = state + # Populate features in the example. + example.add_categorical_slots(state_update) + # The input tokens to bert are in the format [CLS] [S1] [S2] ... [SEP] + # [U1] [U2] ... [SEP] [PAD] ... [PAD]. For system token indices a bias of + # 1 is added for the [CLS] token and for user tokens a bias of 2 + + # len(system_tokens) is added to account for [CLS], system tokens and + # [SEP]. + user_span_boundaries = self._find_subword_indices( + state_update, user_utterance, user_frame["slots"], user_alignments, user_tokens, 2 + len(system_tokens) + ) + if system_frame is not None: + system_span_boundaries = self._find_subword_indices( + state_update, system_utterance, system_frame["slots"], system_alignments, system_tokens, 1 + ) + else: + system_span_boundaries = {} + example.add_noncategorical_slots(state_update, user_span_boundaries, system_span_boundaries) + example.add_requested_slots(user_frame) + example.add_intents(user_frame) + examples.append(example) + return examples, states + + def _find_subword_indices(self, slot_values, utterance, char_slot_spans, alignments, subwords, bias): + """Find indices for subwords corresponding to slot values.""" + span_boundaries = {} + for slot, values in slot_values.items(): + # Get all values present in the utterance for the specified slot. + value_char_spans = {} + for slot_span in char_slot_spans: + if slot_span["slot"] == slot: + value = utterance[slot_span["start"] : slot_span["exclusive_end"]] + start_tok_idx = alignments[slot_span["start"]] + end_tok_idx = alignments[slot_span["exclusive_end"] - 1] + if 0 <= start_tok_idx < len(subwords): + end_tok_idx = min(end_tok_idx, len(subwords) - 1) + value_char_spans[value] = (start_tok_idx + bias, end_tok_idx + bias) + for v in values: + if v in value_char_spans: + span_boundaries[slot] = value_char_spans[v] + break + return span_boundaries + + def _tokenize(self, utterance): + """Tokenize the utterance using word-piece tokenization used by BERT. + + Args: + utterance: A string containing the utterance to be tokenized. + + Returns: + bert_tokens: A list of tokens obtained by word-piece tokenization of the + utterance. + alignments: A dict mapping indices of characters corresponding to start + and end positions of words (not subwords) to corresponding indices in + bert_tokens list. + inverse_alignments: A list of size equal to bert_tokens. Each element is a + tuple containing the index of the starting and inclusive ending + character of the word corresponding to the subword. This list is used + during inference to map word-piece indices to spans in the original + utterance. + """ + # utterance = tokenization.convert_to_unicode(utterance) + + # After _naive_tokenize, spaces and punctuation marks are all retained, i.e. + # direct concatenation of all the tokens in the sequence will be the + # original string. + tokens = Dstc8DataProcessor._naive_tokenize(utterance) + # Filter out empty tokens and obtain aligned character index for each token. + alignments = {} + char_index = 0 + bert_tokens = [] + # These lists store inverse alignments to be used during inference. + bert_tokens_start_chars = [] + bert_tokens_end_chars = [] + for token in tokens: + if token.strip(): + subwords = self._tokenizer.text_to_tokens(token) + # Store the alignment for the index of starting character and the + # inclusive ending character of the token. + alignments[char_index] = len(bert_tokens) + bert_tokens_start_chars.extend([char_index] * len(subwords)) + bert_tokens.extend(subwords) + # The inclusive ending character index corresponding to the word. + inclusive_char_end = char_index + len(token) - 1 + alignments[inclusive_char_end] = len(bert_tokens) - 1 + bert_tokens_end_chars.extend([inclusive_char_end] * len(subwords)) + char_index += len(token) + inverse_alignments = list(zip(bert_tokens_start_chars, bert_tokens_end_chars)) + return bert_tokens, alignments, inverse_alignments + + def get_num_dialog_examples(self, dataset): + """ + Gets the number of dilaog examples in the data split. + Args: + dataset: str. can be "train", "dev", or "test". + Returns:from nemo_nlp.data.datasets.sgd import data_utils + example_count: int. number of examples in the specified dataset. + """ + example_count = 0 + dialog_paths = [ + os.path.join(self.dstc8_data_dir, dataset, "dialogues_{:03d}.json".format(i)) + for i in self._file_ranges[dataset] + ] + dst_set = Dstc8DataProcessor.load_dialogues(dialog_paths) + for dialog in dst_set: + for turn in dialog["turns"]: + if turn["speaker"] == "USER": + example_count += len(turn["frames"]) + return example_count + + @classmethod + def _naive_tokenize(cls, s): + """ + Tokenizes a string, separating words, spaces and punctuations. + Args: + s (str): a string + Returns: + seq_tok (list): list of words, spaces and punctuations from the s + """ + # Spaces and punctuation marks are all retained, i.e. direct concatenation + # of all the tokens in the sequence will be the original string. + seq_tok = [tok for tok in re.split(r"([^a-zA-Z0-9])", s) if tok] + return seq_tok + + @classmethod + def load_dialogues(cls, dialog_json_filepaths): + """ + Obtain the list of all dialogues from specified json files. + Args: + dialog_json_filepaths (list): list of json files + Returns: + dialogs (list): the list of all dialogues + """ + dialogs = [] + for dialog_json_filepath in sorted(dialog_json_filepaths): + with open(dialog_json_filepath, 'r') as f: + dialogs.extend(json.load(f)) + f.close() + return dialogs diff --git a/nemo/collections/nlp/data/datasets/sgd_dataset/evaluate.py b/nemo/collections/nlp/data/datasets/sgd_dataset/evaluate.py new file mode 100644 index 000000000000..fb2dba564b78 --- /dev/null +++ b/nemo/collections/nlp/data/datasets/sgd_dataset/evaluate.py @@ -0,0 +1,213 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +""" +Evaluate predictions JSON file, w.r.t. ground truth file. +This file contains code artifacts adapted from the original implementation: +https://github.com/google-research/google-research/blob/master/schema_guided_dst/evaluate.py +""" + +import collections +import glob +import json + +import numpy as np + +import nemo +import nemo.collections.nlp.data.datasets.sgd_dataset.metrics as metrics + +__all__ = [ + 'get_in_domain_services', + 'get_dataset_as_dict', + 'ALL_SERVICES', + 'SEEN_SERVICES', + 'UNSEEN_SERVICES', + 'get_metrics', + 'PER_FRAME_OUTPUT_FILENAME', +] + +ALL_SERVICES = "#ALL_SERVICES" +SEEN_SERVICES = "#SEEN_SERVICES" +UNSEEN_SERVICES = "#UNSEEN_SERVICES" + +# Name of the file containing all predictions and their corresponding frame metrics. +PER_FRAME_OUTPUT_FILENAME = "dialogues_and_metrics.json" + + +def get_service_set(schema_path): + """Get the set of all services present in a schema.""" + service_set = set() + with open(schema_path) as f: + schema = json.load(f) + for service in schema: + service_set.add(service["service_name"]) + f.close() + return service_set + + +def get_in_domain_services(schema_path_1, schema_path_2): + """Get the set of common services between two schemas.""" + return get_service_set(schema_path_1) & get_service_set(schema_path_2) + + +def get_dataset_as_dict(file_path_patterns): + """Read the DSTC8 json dialog data as dictionary with dialog ID as keys.""" + dataset_dict = {} + if isinstance(file_path_patterns, list): + list_fp = file_path_patterns + else: + list_fp = sorted(glob.glob(file_path_patterns)) + for fp in list_fp: + if PER_FRAME_OUTPUT_FILENAME in fp: + continue + nemo.logging.info("Loading file: %s", fp) + with open(fp) as f: + data = json.load(f) + if isinstance(data, list): + for dial in data: + dataset_dict[dial["dialogue_id"]] = dial + elif isinstance(data, dict): + dataset_dict.update(data) + f.close() + return dataset_dict + + +def get_metrics(dataset_ref, dataset_hyp, service_schemas, in_domain_services, joint_acc_across_turn, no_fuzzy_match): + """Calculate the DSTC8 metrics. + + Args: + dataset_ref: The ground truth dataset represented as a dict mapping dialogue + id to the corresponding dialogue. + dataset_hyp: The predictions in the same format as `dataset_ref`. + service_schemas: A dict mapping service name to the schema for the service. + in_domain_services: The set of services which are present in the training + set. + schemas: Schemas with information for all services + + Returns: + A dict mapping a metric collection name to a dict containing the values + for various metrics. Each metric collection aggregates the metrics across + a specific set of frames in the dialogues. + """ + # Metrics can be aggregated in various ways, eg over all dialogues, only for + # dialogues containing unseen services or for dialogues corresponding to a + # single service. This aggregation is done through metric_collections, which + # is a dict mapping a collection name to a dict, which maps a metric to a list + # of values for that metric. Each value in this list is the value taken by + # the metric on a frame. + metric_collections = collections.defaultdict(lambda: collections.defaultdict(list)) + + # Ensure the dialogs in dataset_hyp also occur in dataset_ref. + assert set(dataset_hyp.keys()).issubset(set(dataset_ref.keys())) + nemo.logging.info("len(dataset_hyp)=%d, len(dataset_ref)=%d", len(dataset_hyp), len(dataset_ref)) + + # Store metrics for every frame for debugging. + per_frame_metric = {} + for dial_id, dial_hyp in dataset_hyp.items(): + dial_ref = dataset_ref[dial_id] + + if set(dial_ref["services"]) != set(dial_hyp["services"]): + raise ValueError( + "Set of services present in ground truth and predictions don't match " + "for dialogue with id {}".format(dial_id) + ) + + joint_metrics = [metrics.JOINT_GOAL_ACCURACY, metrics.JOINT_CAT_ACCURACY, metrics.JOINT_NONCAT_ACCURACY] + for turn_id, (turn_ref, turn_hyp) in enumerate(zip(dial_ref["turns"], dial_hyp["turns"])): + metric_collections_per_turn = collections.defaultdict(lambda: collections.defaultdict(lambda: 1.0)) + if turn_ref["speaker"] != turn_hyp["speaker"]: + raise ValueError("Speakers don't match in dialogue with id {}".format(dial_id)) + + # Skip system turns because metrics are only computed for user turns. + if turn_ref["speaker"] != "USER": + continue + + if turn_ref["utterance"] != turn_hyp["utterance"]: + nemo.logging.info("Ref utt: %s", turn_ref["utterance"]) + nemo.logging.info("Hyp utt: %s", turn_hyp["utterance"]) + raise ValueError("Utterances don't match for dialogue with id {}".format(dial_id)) + + hyp_frames_by_service = {frame["service"]: frame for frame in turn_hyp["frames"]} + + # Calculate metrics for each frame in each user turn. + for frame_ref in turn_ref["frames"]: + service_name = frame_ref["service"] + if service_name not in hyp_frames_by_service: + raise ValueError( + "Frame for service {} not found in dialogue with id {}".format(service_name, dial_id) + ) + service = service_schemas[service_name] + frame_hyp = hyp_frames_by_service[service_name] + + active_intent_acc = metrics.get_active_intent_accuracy(frame_ref, frame_hyp) + slot_tagging_f1_scores = metrics.get_slot_tagging_f1( + frame_ref, frame_hyp, turn_ref["utterance"], service + ) + requested_slots_f1_scores = metrics.get_requested_slots_f1(frame_ref, frame_hyp) + goal_accuracy_dict = metrics.get_average_and_joint_goal_accuracy( + frame_ref, frame_hyp, service, no_fuzzy_match + ) + + frame_metric = { + metrics.ACTIVE_INTENT_ACCURACY: active_intent_acc, + metrics.REQUESTED_SLOTS_F1: requested_slots_f1_scores.f1, + metrics.REQUESTED_SLOTS_PRECISION: requested_slots_f1_scores.precision, + metrics.REQUESTED_SLOTS_RECALL: requested_slots_f1_scores.recall, + } + if slot_tagging_f1_scores is not None: + frame_metric[metrics.SLOT_TAGGING_F1] = slot_tagging_f1_scores.f1 + frame_metric[metrics.SLOT_TAGGING_PRECISION] = slot_tagging_f1_scores.precision + frame_metric[metrics.SLOT_TAGGING_RECALL] = slot_tagging_f1_scores.recall + frame_metric.update(goal_accuracy_dict) + + frame_id = "{:s}-{:03d}-{:s}".format(dial_id, turn_id, frame_hyp["service"]) + per_frame_metric[frame_id] = frame_metric + # Add the frame-level metric result back to dialogues. + frame_hyp["metrics"] = frame_metric + + # Get the domain name of the service. + domain_name = frame_hyp["service"].split("_")[0] + domain_keys = [ALL_SERVICES, frame_hyp["service"], domain_name] + if frame_hyp["service"] in in_domain_services: + domain_keys.append(SEEN_SERVICES) + else: + domain_keys.append(UNSEEN_SERVICES) + for domain_key in domain_keys: + for metric_key, metric_value in frame_metric.items(): + if metric_value != metrics.NAN_VAL: + if joint_acc_across_turn and metric_key in joint_metrics: + metric_collections_per_turn[domain_key][metric_key] *= metric_value + else: + metric_collections[domain_key][metric_key].append(metric_value) + if joint_acc_across_turn: + # Conduct multiwoz style evaluation that computes joint goal accuracy + # across all the slot values of all the domains for each turn. + for domain_key in metric_collections_per_turn: + for metric_key, metric_value in metric_collections_per_turn[domain_key].items(): + metric_collections[domain_key][metric_key].append(metric_value) + + all_metric_aggregate = {} + for domain_key, domain_metric_vals in metric_collections.items(): + domain_metric_aggregate = {} + for metric_key, value_list in domain_metric_vals.items(): + if value_list: + # Metrics are macro-averaged across all frames. + domain_metric_aggregate[metric_key] = round(float(np.mean(value_list)) * 100.0, 2) + else: + domain_metric_aggregate[metric_key] = metrics.NAN_VAL + all_metric_aggregate[domain_key] = domain_metric_aggregate + return all_metric_aggregate, per_frame_metric diff --git a/nemo/collections/nlp/data/datasets/sgd_dataset/input_example.py b/nemo/collections/nlp/data/datasets/sgd_dataset/input_example.py new file mode 100644 index 000000000000..a9361bd7cfdb --- /dev/null +++ b/nemo/collections/nlp/data/datasets/sgd_dataset/input_example.py @@ -0,0 +1,393 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +""" +This file contains code artifacts adapted from the original implementation: +https://github.com/google-research/google-research/blob/master/schema_guided_dst/baseline/data_utils.py +""" + +from nemo import logging + +__all__ = ['InputExample', 'STR_DONTCARE', 'STATUS_OFF', 'STATUS_ACTIVE', 'STATUS_DONTCARE', 'truncate_seq_pair'] + +STR_DONTCARE = "dontcare" + +# These are used to represent the status of slots (off, active, dontcare) and +# intents (off, active) in dialogue state tracking. +STATUS_OFF = 0 +STATUS_ACTIVE = 1 +STATUS_DONTCARE = 2 + + +class InputExample(object): + """An example for training/inference.""" + + def __init__( + self, + schema_config, + service_schema=None, + example_id="NONE", + example_id_num=[], + is_real_example=False, + tokenizer=None, + ): + """Constructs an InputExample. + + Args: + max_seq_length: The maximum length of the sequence. Sequences longer than + this value will be truncated. + service_schema: A ServiceSchema object wrapping the schema for the service + corresponding to this example. + example_id: Unique identifier for the example, like: 'train-1_00000-00-Restaurants_1' + example_id_num: dialogue_id and turn_id combined and service id combined into a list of ints, + like: [1, 0, 0, 18] + is_real_example: Indicates if an example is real or used for padding in a + minibatch. + tokenizer (Tokenizer): such as NemoBertTokenizer + """ + self.schema_config = schema_config + self.service_schema = service_schema + self.example_id = example_id + self.example_id_num = example_id_num + + self.is_real_example = is_real_example + self._max_seq_length = schema_config["MAX_SEQ_LENGTH"] + self._tokenizer = tokenizer + if self.is_real_example and self._tokenizer is None: + raise ValueError("Must specify tokenizer when input is a real example.") + + self.user_utterance = '' + self.system_utterance = '' + # The id of each subword in the vocabulary for BERT. + self.utterance_ids = [0] * self._max_seq_length + # Denotes the identity of the sequence. Takes values 0 (system utterance) and 1 (user utterance). + self.utterance_segment = [0] * self._max_seq_length + # Mask which takes the value 0 for padded tokens and 1 otherwise. + self.utterance_mask = [0] * self._max_seq_length + # Start and inclusive end character indices in the original utterance + # corresponding to the tokens. This is used to obtain the character indices + # from the predicted subword indices during inference. + # NOTE: A positive value indicates the character indices in the user + # utterance whereas a negative value indicates the character indices in the + # system utterance. The indices are offset by 1 to prevent ambiguity in the + # 0 index, which could be in either the user or system utterance by the + # above convention. Now the 0 index corresponds to padded tokens. + self.start_char_idx = [0] * self._max_seq_length + self.end_char_idx = [0] * self._max_seq_length + + # Number of categorical slots present in the service. + self.num_categorical_slots = 0 + # The status of each categorical slot in the service. + self.categorical_slot_status = [STATUS_OFF] * schema_config["MAX_NUM_CAT_SLOT"] + # Masks out categorical status for padded cat slots + self.cat_slot_status_mask = [0] * len(self.categorical_slot_status) + # Number of values taken by each categorical slot. + self.num_categorical_slot_values = [0] * schema_config["MAX_NUM_CAT_SLOT"] + # The index of the correct value for each categorical slot. + self.categorical_slot_values = [0] * schema_config["MAX_NUM_CAT_SLOT"] + # Masks out categorical slots values for slots not used in the service + self.cat_slot_values_mask = [ + [0] * schema_config["MAX_NUM_VALUE_PER_CAT_SLOT"] for _ in range(schema_config["MAX_NUM_CAT_SLOT"]) + ] + + # Number of non-categorical slots present in the service. + self.num_noncategorical_slots = 0 + # The status of each non-categorical slot in the service. + self.noncategorical_slot_status = [STATUS_OFF] * schema_config["MAX_NUM_NONCAT_SLOT"] + # Masks out non-categorical status for padded cat slots + self.noncat_slot_status_mask = [0] * len(self.noncategorical_slot_status) + # The index of the starting subword corresponding to the slot span for a + # non-categorical slot value. + self.noncategorical_slot_value_start = [0] * schema_config["MAX_NUM_NONCAT_SLOT"] + # The index of the ending (inclusive) subword corresponding to the slot span + # for a non-categorical slot value. + self.noncategorical_slot_value_end = [0] * schema_config["MAX_NUM_NONCAT_SLOT"] + + # Total number of slots present in the service. All slots are included here + # since every slot can be requested. + self.num_slots = 0 + # Takes value 1 if the corresponding slot is requested, 0 otherwise. + self.requested_slot_status = [STATUS_OFF] * ( + schema_config["MAX_NUM_CAT_SLOT"] + schema_config["MAX_NUM_NONCAT_SLOT"] + ) + # Masks out requested slots that are not used for the service + self.requested_slot_mask = [0] * len(self.requested_slot_status) + + # Total number of intents present in the service. + self.num_intents = 0 + # Takes value 1 if the intent is active, 0 otherwise. + self.intent_status = [STATUS_OFF] * schema_config["MAX_NUM_INTENT"] + # Masks out intents that are not used for the service, [1] for none intent + self.intent_status_mask = [1] + [0] * len(self.intent_status) + # Label for active intent in the turn + self.intent_status_labels = 0 + + @property + def readable_summary(self): + """Get a readable dict that summarizes the attributes of an InputExample.""" + seq_length = sum(self.utterance_mask) + utt_toks = self._tokenizer.convert_ids_to_tokens(self.utterance_ids[:seq_length]) + utt_tok_mask_pairs = list(zip(utt_toks, self.utterance_segment[:seq_length])) + active_intents = [ + self.service_schema.get_intent_from_id(idx) + for idx, s in enumerate(self.intent_status) + if s == STATUS_ACTIVE + ] + if len(active_intents) > 1: + raise ValueError("Should not have multiple active intents in a single service.") + active_intent = active_intents[0] if active_intents else "" + slot_values_in_state = {} + for idx, s in enumerate(self.categorical_slot_status): + if s == STATUS_ACTIVE: + value_id = self.categorical_slot_values[idx] + slot_values_in_state[ + self.service_schema.get_categorical_slot_from_id(idx) + ] = self.service_schema.get_categorical_slot_value_from_id(idx, value_id) + elif s == STATUS_DONTCARE: + slot_values_in_state[self.service_schema.get_categorical_slot_from_id(idx)] = STR_DONTCARE + for idx, s in enumerate(self.noncategorical_slot_status): + if s == STATUS_ACTIVE: + slot = self.service_schema.get_non_categorical_slot_from_id(idx) + start_id = self.noncategorical_slot_value_start[idx] + end_id = self.noncategorical_slot_value_end[idx] + # Token list is consisted of the subwords that may start with "##". We + # remove "##" to reconstruct the original value. Note that it's not a + # strict restoration of the original string. It's primarily used for + # debugging. + # ex. ["san", "j", "##ose"] --> "san jose" + readable_value = " ".join(utt_toks[start_id : end_id + 1]).replace(" ##", "") + slot_values_in_state[slot] = readable_value + elif s == STATUS_DONTCARE: + slot = self.service_schema.get_non_categorical_slot_from_id(idx) + slot_values_in_state[slot] = STR_DONTCARE + + summary_dict = { + "utt_tok_mask_pairs": utt_tok_mask_pairs, + "utt_len": seq_length, + "num_categorical_slots": self.num_categorical_slots, + "num_categorical_slot_values": self.num_categorical_slot_values, + "num_noncategorical_slots": self.num_noncategorical_slots, + "service_name": self.service_schema.service_name, + "active_intent": active_intent, + "slot_values_in_state": slot_values_in_state, + } + return summary_dict + + def add_utterance_features( + self, system_tokens, system_inv_alignments, user_tokens, user_inv_alignments, system_utterance, user_utterance + ): + """Add utterance related features input to bert. + + Note: this method modifies the system tokens and user_tokens in place to + make their total length <= the maximum input length for BERT model. + + Args: + system_tokens: a list of strings which represents system utterance. + system_inv_alignments: a list of tuples which denotes the start and end + charater of the tpken that a bert token originates from in the original + system utterance. + user_tokens: a list of strings which represents user utterance. + user_inv_alignments: a list of tuples which denotes the start and end + charater of the token that a bert token originates from in the original + user utterance. + """ + # Make user-system utterance input (in BERT format) + # Input sequence length for utterance BERT encoder + max_utt_len = self._max_seq_length + + # Modify lengths of sys & usr utterance so that length of total utt + # (including cls_token, setp_token, sep_token) is no more than max_utt_len + is_too_long = truncate_seq_pair(system_tokens, user_tokens, max_utt_len - 3) + if is_too_long: + logging.debug(f'Utterance sequence truncated in example id - {self.example_id}.') + + # Construct the tokens, segment mask and valid token mask which will be + # input to BERT, using the tokens for system utterance (sequence A) and + # user utterance (sequence B). + utt_subword = [] + utt_seg = [] + utt_mask = [] + start_char_idx = [] + end_char_idx = [] + + utt_subword.append(self._tokenizer.cls_token) + utt_seg.append(0) + utt_mask.append(1) + start_char_idx.append(0) + end_char_idx.append(0) + + for subword_idx, subword in enumerate(system_tokens): + utt_subword.append(subword) + utt_seg.append(0) + utt_mask.append(1) + st, en = system_inv_alignments[subword_idx] + start_char_idx.append(-(st + 1)) + end_char_idx.append(-(en + 1)) + + utt_subword.append(self._tokenizer.sep_token) + utt_seg.append(0) + utt_mask.append(1) + start_char_idx.append(0) + end_char_idx.append(0) + + for subword_idx, subword in enumerate(user_tokens): + utt_subword.append(subword) + utt_seg.append(1) + utt_mask.append(1) + st, en = user_inv_alignments[subword_idx] + start_char_idx.append(st + 1) + end_char_idx.append(en + 1) + + utt_subword.append(self._tokenizer.sep_token) + utt_seg.append(1) + utt_mask.append(1) + start_char_idx.append(0) + end_char_idx.append(0) + + utterance_ids = self._tokenizer.tokens_to_ids(utt_subword) + + # Zero-pad up to the BERT input sequence length. + while len(utterance_ids) < max_utt_len: + utterance_ids.append(0) + utt_seg.append(0) + utt_mask.append(0) + start_char_idx.append(0) + end_char_idx.append(0) + self.utterance_ids = utterance_ids + self.utterance_segment = utt_seg + self.utterance_mask = utt_mask + self.start_char_idx = start_char_idx + self.end_char_idx = end_char_idx + + self.user_utterances = user_utterance + self.system_utterance = system_utterance + + def make_copy_with_utterance_features(self): + """Make a copy of the current example with utterance features.""" + new_example = InputExample( + schema_config=self.schema_config, + service_schema=self.service_schema, + example_id=self.example_id, + example_id_num=self.example_id_num, + is_real_example=self.is_real_example, + tokenizer=self._tokenizer, + ) + new_example.utterance_ids = list(self.utterance_ids) + new_example.utterance_segment = list(self.utterance_segment) + new_example.utterance_mask = list(self.utterance_mask) + new_example.start_char_idx = list(self.start_char_idx) + new_example.end_char_idx = list(self.end_char_idx) + new_example.user_utterance = self.user_utterance + new_example.system_utterance = self.system_utterance + return new_example + + def add_categorical_slots(self, state_update): + """Add features for categorical slots.""" + categorical_slots = self.service_schema.categorical_slots + self.num_categorical_slots = len(categorical_slots) + for slot_idx, slot in enumerate(categorical_slots): + values = state_update.get(slot, []) + # Add categorical slot value features. + slot_values = self.service_schema.get_categorical_slot_values(slot) + self.num_categorical_slot_values[slot_idx] = len(slot_values) + # set slot mask to 1, i.e. the slot is active in the service + self.cat_slot_status_mask[slot_idx] = 1 + # set the number of active slot values for this slots in the service + for slot_value_idx in range(len(self.service_schema._categorical_slot_values[slot])): + self.cat_slot_values_mask[slot_idx][slot_value_idx] = 1 + + if not values: + self.categorical_slot_status[slot_idx] = STATUS_OFF + elif values[0] == STR_DONTCARE: + self.categorical_slot_status[slot_idx] = STATUS_DONTCARE + else: + self.categorical_slot_status[slot_idx] = STATUS_ACTIVE + self.categorical_slot_values[slot_idx] = self.service_schema.get_categorical_slot_value_id( + slot, values[0] + ) + + def add_noncategorical_slots(self, state_update, system_span_boundaries, user_span_boundaries): + """Add features for non-categorical slots.""" + noncategorical_slots = self.service_schema.non_categorical_slots + self.num_noncategorical_slots = len(noncategorical_slots) + for slot_idx, slot in enumerate(noncategorical_slots): + values = state_update.get(slot, []) + self.noncat_slot_status_mask[slot_idx] = 1 + if not values: + self.noncategorical_slot_status[slot_idx] = STATUS_OFF + elif values[0] == STR_DONTCARE: + self.noncategorical_slot_status[slot_idx] = STATUS_DONTCARE + else: + self.noncategorical_slot_status[slot_idx] = STATUS_ACTIVE + # Add indices of the start and end tokens for the first encountered + # value. Spans in user utterance are prioritized over the system + # utterance. If a span is not found, the slot value is ignored. + if slot in user_span_boundaries: + start, end = user_span_boundaries[slot] + elif slot in system_span_boundaries: + start, end = system_span_boundaries[slot] + else: + # A span may not be found because the value was cropped out or because + # the value was mentioned earlier in the dialogue. Since this model + # only makes use of the last two utterances to predict state updates, + # it will fail in such cases. + logging.debug( + f'"Slot values {str(values)} not found in user or system utterance in example with id - {self.example_id}.' + ) + + continue + self.noncategorical_slot_value_start[slot_idx] = start + self.noncategorical_slot_value_end[slot_idx] = end + + def add_requested_slots(self, frame): + all_slots = self.service_schema.slots + self.num_slots = len(all_slots) + for slot_idx, slot in enumerate(all_slots): + self.requested_slot_mask[slot_idx] = 1 + if slot in frame["state"]["requested_slots"]: + self.requested_slot_status[slot_idx] = STATUS_ACTIVE + + def add_intents(self, frame): + all_intents = self.service_schema.intents + self.num_intents = len(all_intents) + for intent_idx, intent in enumerate(all_intents): + if intent == frame["state"]["active_intent"]: + self.intent_status[intent_idx] = STATUS_ACTIVE + # adding +1 to take none intent into account + # supports only 1 active intent in the turn + self.intent_status_labels = intent_idx + 1 + self.intent_status_mask[intent_idx + 1] = 1 + + +# Modified from run_classifier._truncate_seq_pair in the public bert model repo. +# https://github.com/google-research/bert/blob/master/run_classifier.py. +def truncate_seq_pair(tokens_a, tokens_b, max_length): + """Truncate a seq pair in place so that their total length <= max_length.""" + is_too_long = False + # This is a simple heuristic which will always truncate the longer sequence + # one token at a time. This makes more sense than truncating an equal percent + # of tokens from each, since if one sequence is very short then each token + # that's truncated likely contains more information than a longer sequence. + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_length: + break + is_too_long = True + if len(tokens_a) > len(tokens_b): + tokens_a.pop() + else: + tokens_b.pop() + return is_too_long diff --git a/nemo/collections/nlp/data/datasets/sgd_dataset/metrics.py b/nemo/collections/nlp/data/datasets/sgd_dataset/metrics.py new file mode 100644 index 000000000000..6ad5f81dbdae --- /dev/null +++ b/nemo/collections/nlp/data/datasets/sgd_dataset/metrics.py @@ -0,0 +1,284 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Evaluation metrics for Schema-guided dialogue. + +This library provides functions for calculating the evaluation metrics for a +single dialogue. The following metrics are defined: + +(1) Active intent accuracy: The fraction of user turns for which the active + intent has been correctly predicted. +(2) Slot tagging F1: The macro-averaged F1 score for tagging slot values for + non-categorical slots. This metric is optional to report in the final paper + if participants decide not to use slot tagging. +(3) Requested slots F1: The macro-averaged F1 score for requested slots over the + turns. For a turn, if there are no requested slots in both the ground truth + and the prediction, that turn is skipped. The reported number is the average + F1 score for all un-skipped user turns. This metric is optional to report in + the final paper. +(4) Average goal accuracy: For each turn, participants must predict a single + value for each slot present in the dialogue state. The slots which have a + non-empty assignment in the ground truth dialogue state are only considered. + This is the average accuracy of predicting the value of a slot correctly. A + fuzzy matching based score is used for non-categorical slots. +(5) Joint goal accuracy: This is the average accuracy of predicting all slot + assignments for a turn correctly. A fuzzy matching based score is used for + non-categorical slots. This is the primary evaluation metric used for ranking + submissions. More details to follow with the evaluation script. + +This file contains code artifacts adapted from the original implementation: +https://github.com/google-research/google-research/blob/master/schema_guided_dst/metrics.py +""" + +import collections + +import numpy as np +from rapidfuzz import fuzz + +F1Scores = collections.namedtuple("F1Scores", ["f1", "precision", "recall"]) + +# Evaluation and other relevant metrics for DSTC8 Schema-guided DST. +# (1) Active intent accuracy. +ACTIVE_INTENT_ACCURACY = "active_intent_accuracy" +# (2) Slot tagging F1. +SLOT_TAGGING_F1 = "slot_tagging_f1" +SLOT_TAGGING_PRECISION = "slot_tagging_precision" +SLOT_TAGGING_RECALL = "slot_tagging_recall" +# (3) Requested slots F1. +REQUESTED_SLOTS_F1 = "requested_slots_f1" +REQUESTED_SLOTS_PRECISION = "requested_slots_precision" +REQUESTED_SLOTS_RECALL = "requested_slots_recall" +# (4) Average goal accuracy. +AVERAGE_GOAL_ACCURACY = "average_goal_accuracy" +AVERAGE_CAT_ACCURACY = "average_cat_accuracy" +AVERAGE_NONCAT_ACCURACY = "average_noncat_accuracy" +# (5) Joint goal accuracy. +JOINT_GOAL_ACCURACY = "joint_goal_accuracy" +JOINT_CAT_ACCURACY = "joint_cat_accuracy" +JOINT_NONCAT_ACCURACY = "joint_noncat_accuracy" + +NAN_VAL = "NA" + + +def compute_f1(list_ref, list_hyp): + """Compute F1 score from reference (grouth truth) list and hypothesis list. + + Args: + list_ref: List of true elements. + list_hyp: List of postive (retrieved) elements. + + Returns: + A F1Scores object containing F1, precision, and recall scores. + """ + + ref = collections.Counter(list_ref) + hyp = collections.Counter(list_hyp) + true = sum(ref.values()) + positive = sum(hyp.values()) + true_positive = sum((ref & hyp).values()) + precision = float(true_positive) / positive if positive else 1.0 + recall = float(true_positive) / true if true else 1.0 + if precision + recall > 0.0: + f1 = 2.0 * precision * recall / (precision + recall) + else: # The F1-score is defined to be 0 if both precision and recall are 0. + f1 = 0.0 + + return F1Scores(f1=f1, precision=precision, recall=recall) + + +def fuzzy_string_match(str_ref, str_hyp): + """Returns fuzzy string similarity score in range [0.0, 1.0].""" + + # The higher the score, the higher the similarity between the two strings. + return fuzz.token_sort_ratio(str_ref, str_hyp) / 100.0 + + +def noncat_slot_value_match(str_ref_list, str_hyp, no_fuzzy_match): + """Calculate non-categorical slots correctness. + + Args: + str_ref_list: a list of reference strings. + str_hyp: the hypothesis string. + use_fuzzy_match: whether to use fuzzy string matching. + + Returns: + score: The highest fuzzy string match score of the references and hypotheis. + """ + score = 0.0 + for str_ref in str_ref_list: + if no_fuzzy_match: + match_score = float(str_ref == str_hyp) + else: + match_score = fuzzy_string_match(str_ref, str_hyp) + score = max(score, match_score) + return score + + +def compare_slot_values(slot_values_ref, slot_values_hyp, service, no_fuzzy_match): + """Compare and get correctness of goal state's slot_values. + + Args: + slot_values_ref: goal state slot_values from reference (ground truth). + slot_values_hyp: goal state slot_values from hypothesis (prediction). + service: a service data structure in the schema. We use it to obtain the + list of slots in the service and infer whether a slot is categorical. + use_fuzzy_match: whether to use fuzzy string matching for non-categorical + slot values + + Returns: + (list_cor, slot_active, slot_cat) + list_cor: list of corectness scores, each corresponding to one slot in the + service. The score is a float either 0.0 or 1.0 for categorical slot, + and in range [0.0, 1.0] for non-categorical slot. + slot_active: list indicating whether the element in list_cor corresponds to + an active ground-truth slot. + slot_cat: list indicating whether the element in list_cor corresponds to a + categorical slot. + """ + list_cor = [] + slot_active = [] + slot_cat = [] + + for slot in service["slots"]: + slot_name = slot["name"] + slot_cat.append(slot["is_categorical"]) + + if slot_name in slot_values_ref: # REF=active + slot_active.append(True) + if slot_name in slot_values_hyp: # HYP=active, apply matching + value_ref_list = slot_values_ref[slot_name] + value_hyp = slot_values_hyp[slot_name][0] + if slot["is_categorical"]: + cor = float(value_ref_list[0] == value_hyp) + else: + cor = noncat_slot_value_match(value_ref_list, value_hyp, no_fuzzy_match) + + list_cor.append(cor) + else: # HYP=off + list_cor.append(0.0) + else: # REF=off + slot_active.append(False) + if slot_name in slot_values_hyp: # HYP=active + list_cor.append(0.0) + else: # HYP=off + list_cor.append(1.0) + + assert len(list_cor) == len(service["slots"]) + assert len(slot_active) == len(service["slots"]) + assert len(slot_cat) == len(service["slots"]) + return list_cor, slot_active, slot_cat + + +def get_active_intent_accuracy(frame_ref, frame_hyp): + """Get active intent accuracy of a frame. + + Args: + frame_ref: single semantic frame from reference (ground truth) file. + frame_hyp: single semantic frame from hypothesis (prediction) file. + + Returns: + 1.0 if the intent prediction is correct, otherwise 0.0. + """ + return float(frame_ref["state"]["active_intent"] == frame_hyp["state"]["active_intent"]) + + +def get_slot_tagging_f1(frame_ref, frame_hyp, utt, service): + """Get slot tagging (non-categorical slots only) F1 scores of a frame. + + Args: + frame_ref: single semantic frame from reference (ground truth) file. + frame_hyp: single semantic frame from hypothesis (prediction) file. + utt: user utterance. Slot tagging annotations are the character positions in + the utterance. + service: a service data structure in the schema. We use it to infer whether + a slot is non-categorical. + + Returns: + A F1Scores object containing F1, precision, and recall scores. + """ + + list_noncat_slots = [s["name"] for s in service["slots"] if not s["is_categorical"]] + if "slots" not in frame_hyp: + return None + else: + list_ref = [ + (s["slot"], utt[s["start"] : s["exclusive_end"]]) + for s in frame_ref["slots"] + if s["slot"] in list_noncat_slots + ] + list_hyp = [ + (s["slot"], utt[s["start"] : s["exclusive_end"]]) + for s in frame_hyp["slots"] + if s["slot"] in list_noncat_slots + ] + return compute_f1(list_ref, list_hyp) + + +def get_requested_slots_f1(frame_ref, frame_hyp): + """Get requested slots F1 scores of a frame. + + Args: + frame_ref: single semantic frame from reference (ground truth) file. + frame_hyp: single semantic frame from hypothesis (prediction) file. + + Returns: + A F1Scores object containing F1, precision, and recall scores. + """ + return compute_f1(frame_ref["state"]["requested_slots"], frame_hyp["state"]["requested_slots"]) + + +def get_average_and_joint_goal_accuracy(frame_ref, frame_hyp, service, no_fuzzy_match): + """Get average and joint goal accuracies of a frame. + + Args: + frame_ref: single semantic frame from reference (ground truth) file. + frame_hyp: single semantic frame from hypothesis (prediction) file. + service: a service data structure in the schema. We use it to obtain the + list of slots in the service and infer whether a slot is categorical. + use_fuzzy_match: whether to use fuzzy string matching for comparing + non-categorical slot values. + + Returns: + goal_acc: a dict whose values are average / joint + all-goal / categorical-goal / non-categorical-goal accuracies. + """ + goal_acc = {} + + list_acc, slot_active, slot_cat = compare_slot_values( + frame_ref["state"]["slot_values"], frame_hyp["state"]["slot_values"], service, no_fuzzy_match + ) + + # (4) Average goal accuracy. + active_acc = [acc for acc, active in zip(list_acc, slot_active) if active] + goal_acc[AVERAGE_GOAL_ACCURACY] = np.mean(active_acc) if active_acc else NAN_VAL + # (4-a) categorical. + active_cat_acc = [acc for acc, active, cat in zip(list_acc, slot_active, slot_cat) if active and cat] + goal_acc[AVERAGE_CAT_ACCURACY] = np.mean(active_cat_acc) if active_cat_acc else NAN_VAL + # (4-b) non-categorical. + active_noncat_acc = [acc for acc, active, cat in zip(list_acc, slot_active, slot_cat) if active and not cat] + goal_acc[AVERAGE_NONCAT_ACCURACY] = np.mean(active_noncat_acc) if active_noncat_acc else NAN_VAL + + # (5) Joint goal accuracy. + goal_acc[JOINT_GOAL_ACCURACY] = np.prod(list_acc) if list_acc else NAN_VAL + # (5-a) categorical. + cat_acc = [acc for acc, cat in zip(list_acc, slot_cat) if cat] + goal_acc[JOINT_CAT_ACCURACY] = np.prod(cat_acc) if cat_acc else NAN_VAL + # (5-b) non-categorical. + noncat_acc = [acc for acc, cat in zip(list_acc, slot_cat) if not cat] + goal_acc[JOINT_NONCAT_ACCURACY] = np.prod(noncat_acc) if noncat_acc else NAN_VAL + + return goal_acc diff --git a/nemo/collections/nlp/data/datasets/sgd_dataset/prediction_utils.py b/nemo/collections/nlp/data/datasets/sgd_dataset/prediction_utils.py new file mode 100644 index 000000000000..7207406e3321 --- /dev/null +++ b/nemo/collections/nlp/data/datasets/sgd_dataset/prediction_utils.py @@ -0,0 +1,357 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +""" +Prediction and evaluation-related utility functions. +This file contains code artifacts adapted from the original implementation: +https://github.com/google-research/google-research/blob/master/schema_guided_dst/baseline/pred_utils.py +""" + +import collections +import json +import os + +from nemo import logging +from nemo.collections.nlp.data.datasets.sgd_dataset.input_example import STATUS_ACTIVE, STATUS_DONTCARE, STR_DONTCARE + +REQ_SLOT_THRESHOLD = 0.5 + +__all__ = ['get_predicted_dialog_baseline', 'write_predictions_to_file'] + + +def get_predicted_dialog_ret_sys_act(dialog, all_predictions, schemas, eval_debug, in_domain_services): + """Update labels in a dialogue based on model predictions. + Args: + dialog: A json object containing dialogue whose labels are to be updated. + all_predictions: A dict mapping prediction name to the predicted value. See + SchemaGuidedDST class for the contents of this dict. + schemas: A Schema object wrapping all the schemas for the dataset. + Returns: + A json object containing the dialogue with labels predicted by the model. + """ + # This approach retreives slot values from the history of system actions if slot is active but it can not find it in user utterance + # Overwrite the labels in the turn with the predictions from the model. For + # test set, these labels are missing from the data and hence they are added. + dialog_id = dialog["dialogue_id"] + # The slot values tracked for each service. + all_slot_values = collections.defaultdict(dict) + sys_prev_slots = collections.defaultdict(dict) + sys_rets = {} + + for turn_idx, turn in enumerate(dialog["turns"]): + if turn["speaker"] == "SYSTEM": + for frame in turn["frames"]: + for action in frame["actions"]: + if action["slot"] and len(action["values"]) > 0: + sys_prev_slots[frame["service"]][action["slot"]] = action["values"][0] + elif turn["speaker"] == "USER": + user_utterance = turn["utterance"] + system_utterance = dialog["turns"][turn_idx - 1]["utterance"] if turn_idx else "" + turn_id = "{:02d}".format(turn_idx) + for frame in turn["frames"]: + cat_slot_status_acc = 0 + cat_slot_status_num = 0 + noncat_slot_status_num = 0 + noncat_slot_status_acc = 0 + + predictions = all_predictions[(dialog_id, turn_id, frame["service"])] + slot_values = all_slot_values[frame["service"]] + service_schema = schemas.get_service_schema(frame["service"]) + + # Remove the slot spans and state if present. + true_slots = frame.pop("slots", None) + true_state = frame.pop("state", None) + + # The baseline model doesn't predict slot spans. Only state predictions + # are added. + state = {} + + # Add prediction for active intent. Offset is subtracted to account for + # NONE intent. + active_intent_id = predictions["intent_status"] + state["active_intent"] = ( + service_schema.get_intent_from_id(active_intent_id - 1) if active_intent_id else "NONE" + ) + + # Add prediction for requested slots. + requested_slots = [] + for slot_idx, slot in enumerate(service_schema.slots): + if predictions["req_slot_status"][slot_idx] > REQ_SLOT_THRESHOLD: + requested_slots.append(slot) + state["requested_slots"] = requested_slots + + # Add prediction for user goal (slot values). + # Categorical slots. + categorical_slots_dict = {} + non_categorical_slots_dict = {} + + predictions["cat_slot_status_p"] = predictions["cat_slot_status_p"].cpu().numpy() + predictions["cat_slot_status"] = predictions["cat_slot_status"].cpu().numpy() + predictions["cat_slot_value"] = predictions["cat_slot_value"].cpu().numpy() + predictions["cat_slot_value_p"] = predictions["cat_slot_value_p"].cpu().numpy() + + predictions["noncat_slot_status_p"] = predictions["noncat_slot_status_p"].cpu().numpy() + predictions["noncat_slot_status"] = predictions["noncat_slot_status"].cpu().numpy() + predictions["noncat_slot_p"] = predictions["noncat_slot_p"].cpu().numpy() + + predictions["noncat_alignment_start"] = predictions["noncat_alignment_start"].cpu().numpy() + predictions["noncat_alignment_end"] = predictions["noncat_alignment_end"].cpu().numpy() + predictions["cat_slot_status_GT"] = predictions["cat_slot_status_GT"].cpu().numpy() + predictions["noncat_slot_status_GT"] = predictions["noncat_slot_status_GT"].cpu().numpy() + + for slot_idx, slot in enumerate(service_schema.categorical_slots): + # debugging info + cat_slot_status_num += 1 + categorical_slots_dict[slot] = ( + predictions["cat_slot_status_GT"][slot_idx], + predictions["cat_slot_status"][slot_idx], + predictions["cat_slot_status_p"][slot_idx], + service_schema.get_categorical_slot_values(slot)[predictions["cat_slot_value"][slot_idx]], + predictions["cat_slot_value_p"][slot_idx], + ) + + if predictions["cat_slot_status_GT"][slot_idx] == predictions["cat_slot_status"][slot_idx]: + cat_slot_status_acc += 1 + #### + slot_status = predictions["cat_slot_status"][slot_idx] + if slot_status == STATUS_DONTCARE: + slot_values[slot] = STR_DONTCARE + elif slot_status == STATUS_ACTIVE: + # print(predictions["cat_slot_status_p"][slot_idx]) + if ( + predictions["cat_slot_status_p"][slot_idx] + predictions["cat_slot_value_p"][slot_idx] + ) / 2 > 0.9: + value_idx = predictions["cat_slot_value"][slot_idx] + slot_values[slot] = service_schema.get_categorical_slot_values(slot)[value_idx] + else: + if slot in sys_prev_slots[frame["service"]]: + # debugging info + sys_rets[slot] = sys_prev_slots[frame["service"]][slot] + ## + slot_values[slot] = sys_prev_slots[frame["service"]][slot] + print("pooooy", slot_values[slot]) + else: + value_idx = predictions["cat_slot_value"][slot_idx] + slot_values[slot] = service_schema.get_categorical_slot_values(slot)[value_idx] + + for slot_idx, slot in enumerate(service_schema.non_categorical_slots): + tok_start_idx = predictions["noncat_slot_start"][slot_idx] + tok_end_idx = predictions["noncat_slot_end"][slot_idx] + ch_start_idx = predictions["noncat_alignment_start"][tok_start_idx] + ch_end_idx = predictions["noncat_alignment_end"][tok_end_idx] + + # debugging nfo + noncat_slot_status_num += 1 + + non_categorical_slots_dict[slot] = ( + predictions["noncat_slot_status_GT"][slot_idx], + predictions["noncat_slot_status"][slot_idx], + predictions["noncat_slot_status_p"][slot_idx], + (ch_start_idx, ch_end_idx), + user_utterance[ch_start_idx - 1 : ch_end_idx] + if (ch_start_idx > 0 and ch_end_idx > 0) + else system_utterance[-ch_start_idx - 1 : -ch_end_idx], + predictions["noncat_slot_p"][slot_idx], + ) + if predictions["noncat_slot_status_GT"][slot_idx] == predictions["noncat_slot_status"][slot_idx]: + noncat_slot_status_acc += 1 + + slot_status = predictions["noncat_slot_status"][slot_idx] + if slot_status == STATUS_DONTCARE: + slot_values[slot] = STR_DONTCARE + elif slot_status == STATUS_ACTIVE: + tok_start_idx = predictions["noncat_slot_start"][slot_idx] + tok_end_idx = predictions["noncat_slot_end"][slot_idx] + ch_start_idx = predictions["noncat_alignment_start"][tok_start_idx] + ch_end_idx = predictions["noncat_alignment_end"][tok_end_idx] + # logging.debug(ch_start_idx, ch_end_idx) + # logging.debug(f'Active Slot: {slot}') + # logging.debug(f'{predictions["noncat_slot_p"][slot_idx]}, ({ch_start_idx}, {ch_end_idx}), {user_utterance[ch_start_idx - 1 : ch_end_idx]}') + if ch_start_idx > 0 and ch_end_idx > 0: + # Add span from the user utterance. + slot_values[slot] = user_utterance[ch_start_idx - 1 : ch_end_idx] + # elif ch_start_idx < 0 and ch_end_idx < 0: + # Add span from the system utterance. + # slot_values[slot] = system_utterance[-ch_start_idx - 1 : -ch_end_idx] + else: + if slot in sys_prev_slots[frame["service"]]: + # debugging info + sys_rets[slot] = sys_prev_slots[frame["service"]][slot] + ## + slot_values[slot] = sys_prev_slots[frame["service"]][slot] + # elif ch_start_idx < 0 and ch_end_idx < 0: + # slot_values[slot] = system_utterance[-ch_start_idx - 1 : -ch_end_idx] + # print("hoooy", slot_values[slot]) + + if eval_debug and frame["service"] in in_domain_services: + logging.debug("-----------------------------------New Frame------------------------------") + logging.debug(f'SYS : {system_utterance}') + logging.debug(f'USER: {user_utterance}') + + logging.debug("\n") + logging.debug(f"PRED CAT: {categorical_slots_dict}") + logging.debug(f"PRED NON-CAT: {non_categorical_slots_dict}") + + logging.debug("\n") + logging.debug(f"SLOTS - LABEL: {true_slots}") + logging.debug(f"STATE - LABEL: {true_state['slot_values']}") + logging.debug(f"STATE - PRED : {slot_values}") + + logging.debug("\n") + logging.debug(f"SYS PREV SLOT: {sys_prev_slots}") + logging.debug(f"SYS RETS: {sys_rets}") + cat_slot_status_acc = ( + "NAN" if cat_slot_status_num == 0 else cat_slot_status_acc / cat_slot_status_num + ) + logging.debug(f"CAT STATUS ACC: {cat_slot_status_acc}") + noncat_slot_status_acc = ( + "NAN" if noncat_slot_status_num == 0 else noncat_slot_status_acc / noncat_slot_status_num + ) + logging.debug(f"NONCAT STATUS ACC: {noncat_slot_status_acc}") + + # Create a new dict to avoid overwriting the state in previous turns + # because of use of same objects. + state["slot_values"] = {s: [v] for s, v in slot_values.items()} + frame["state"] = state + + return dialog + + +def get_predicted_dialog_baseline(dialog, all_predictions, schemas): + """Update labels in a dialogue based on model predictions. + Args: + dialog: A json object containing dialogue whose labels are to be updated. + all_predictions: A dict mapping prediction name to the predicted value. See + SchemaGuidedDST class for the contents of this dict. + schemas: A Schema object wrapping all the schemas for the dataset. + Returns: + A json object containing the dialogue with labels predicted by the model. + """ + # Overwrite the labels in the turn with the predictions from the model. For + # test set, these labels are missing from the data and hence they are added. + dialog_id = dialog["dialogue_id"] + # The slot values tracked for each service. + all_slot_values = collections.defaultdict(dict) + for turn_idx, turn in enumerate(dialog["turns"]): + if turn["speaker"] == "USER": + user_utterance = turn["utterance"] + system_utterance = dialog["turns"][turn_idx - 1]["utterance"] if turn_idx else "" + turn_id = "{:02d}".format(turn_idx) + for frame in turn["frames"]: + predictions = all_predictions[(dialog_id, turn_id, frame["service"])] + slot_values = all_slot_values[frame["service"]] + service_schema = schemas.get_service_schema(frame["service"]) + # Remove the slot spans and state if present. + frame.pop("slots", None) + frame.pop("state", None) + + # The baseline model doesn't predict slot spans. Only state predictions + # are added. + state = {} + + # Add prediction for active intent. Offset is subtracted to account for + # NONE intent. + active_intent_id = predictions["intent_status"] + state["active_intent"] = ( + service_schema.get_intent_from_id(active_intent_id - 1) if active_intent_id else "NONE" + ) + + # Add prediction for requested slots. + requested_slots = [] + for slot_idx, slot in enumerate(service_schema.slots): + if predictions["req_slot_status"][slot_idx] > REQ_SLOT_THRESHOLD: + requested_slots.append(slot) + state["requested_slots"] = requested_slots + + # Add prediction for user goal (slot values). + # Categorical slots. + for slot_idx, slot in enumerate(service_schema.categorical_slots): + slot_status = predictions["cat_slot_status"][slot_idx] + if slot_status == STATUS_DONTCARE: + slot_values[slot] = STR_DONTCARE + elif slot_status == STATUS_ACTIVE: + value_idx = predictions["cat_slot_value"][slot_idx] + slot_values[slot] = service_schema.get_categorical_slot_values(slot)[value_idx] + # Non-categorical slots. + for slot_idx, slot in enumerate(service_schema.non_categorical_slots): + slot_status = predictions["noncat_slot_status"][slot_idx] + if slot_status == STATUS_DONTCARE: + slot_values[slot] = STR_DONTCARE + elif slot_status == STATUS_ACTIVE: + tok_start_idx = predictions["noncat_slot_start"][slot_idx] + tok_end_idx = predictions["noncat_slot_end"][slot_idx] + ch_start_idx = predictions["noncat_alignment_start"][tok_start_idx] + ch_end_idx = predictions["noncat_alignment_end"][tok_end_idx] + if ch_start_idx < 0 and ch_end_idx < 0: + # Add span from the system utterance. + slot_values[slot] = system_utterance[-ch_start_idx - 1 : -ch_end_idx] + elif ch_start_idx > 0 and ch_end_idx > 0: + # Add span from the user utterance. + slot_values[slot] = user_utterance[ch_start_idx - 1 : ch_end_idx] + # Create a new dict to avoid overwriting the state in previous turns + # because of use of same objects. + state["slot_values"] = {s: [v] for s, v in slot_values.items()} + frame["state"] = state + return dialog + + +def write_predictions_to_file( + predictions, input_json_files, output_dir, schemas, state_tracker, eval_debug, in_domain_services +): + """Write the predicted dialogues as json files. + + Args: + predictions: An iterator containing model predictions. This is the output of + the predict method in the estimator. + input_json_files: A list of json paths containing the dialogues to run + inference on. + schemas: Schemas to all services in the dst dataset (train, dev and test splits). + output_dir: The directory where output json files will be created. + """ + logging.info(f"Writing predictions to {output_dir} started.") + + # Index all predictions. + all_predictions = {} + for idx, prediction in enumerate(predictions): + if not prediction["is_real_example"]: + continue + _, dialog_id, turn_id, service_name = prediction['example_id'].split('-') + all_predictions[(dialog_id, turn_id, service_name)] = prediction + logging.info(f'Predictions for {idx} examples are getting processed.') + + # Read each input file and write its predictions. + for input_file_path in input_json_files: + with open(input_file_path) as f: + dialogs = json.load(f) + logging.info(f'{input_file_path} file is loaded') + pred_dialogs = [] + for d in dialogs: + if state_tracker == 'baseline': + pred_dialog = get_predicted_dialog_baseline(d, all_predictions, schemas) + elif state_tracker == 'ret_sys_act': + pred_dialog = get_predicted_dialog_ret_sys_act( + d, all_predictions, schemas, eval_debug, in_domain_services + ) + else: + raise ValueError(f"tracker_mode {state_tracker} is not defined.") + pred_dialogs.append(pred_dialog) + f.close() + input_file_name = os.path.basename(input_file_path) + output_file_path = os.path.join(output_dir, input_file_name) + with open(output_file_path, "w") as f: + json.dump(pred_dialogs, f, indent=2, separators=(",", ": "), sort_keys=True) + f.close() diff --git a/nemo/collections/nlp/data/datasets/sgd_dataset/schema.py b/nemo/collections/nlp/data/datasets/sgd_dataset/schema.py new file mode 100644 index 000000000000..1462c6329892 --- /dev/null +++ b/nemo/collections/nlp/data/datasets/sgd_dataset/schema.py @@ -0,0 +1,182 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +""" +Wrappers for schemas of different services. +This file contains code artifacts adapted from the original implementation: +https://github.com/google-research/google-research/blob/master/schema_guided_dst/schema.py +https://github.com/google-research/google-research/blob/master/schema_guided_dst +""" + +import json + +from nemo import logging + +__all__ = ['ServiceSchema', 'Schema'] + + +class ServiceSchema(object): + """A wrapper for schema for a service.""" + + def __init__(self, schema_json, service_id=None): + self._service_name = schema_json["service_name"] + self._description = schema_json["description"] + self._schema_json = schema_json + self._service_id = service_id + + # Construct the vocabulary for intents, slots, categorical slots, + # non-categorical slots and categorical slot values. These vocabs are used + # for generating indices for their embedding matrix. + self._intents = sorted(i["name"] for i in schema_json["intents"]) + self._slots = sorted(s["name"] for s in schema_json["slots"]) + self._categorical_slots = sorted( + s["name"] for s in schema_json["slots"] if s["is_categorical"] and s["name"] in self.state_slots + ) + self._non_categorical_slots = sorted( + s["name"] for s in schema_json["slots"] if not s["is_categorical"] and s["name"] in self.state_slots + ) + slot_schemas = {s["name"]: s for s in schema_json["slots"]} + categorical_slot_values = {} + categorical_slot_value_ids = {} + for slot in self._categorical_slots: + slot_schema = slot_schemas[slot] + values = sorted(slot_schema["possible_values"]) + categorical_slot_values[slot] = values + value_ids = {value: idx for idx, value in enumerate(values)} + categorical_slot_value_ids[slot] = value_ids + self._categorical_slot_values = categorical_slot_values + self._categorical_slot_value_ids = categorical_slot_value_ids + + @property + def schema_json(self): + return self._schema_json + + @property + def state_slots(self): + """Set of slots which are permitted to be in the dialogue state.""" + state_slots = set() + for intent in self._schema_json["intents"]: + state_slots.update(intent["required_slots"]) + state_slots.update(intent["optional_slots"]) + return state_slots + + @property + def service_name(self): + return self._service_name + + @property + def service_id(self): + return self._service_id + + @property + def description(self): + return self._description + + @property + def slots(self): + return self._slots + + @property + def intents(self): + return self._intents + + @property + def categorical_slots(self): + return self._categorical_slots + + @property + def non_categorical_slots(self): + return self._non_categorical_slots + + def get_categorical_slot_values(self, slot): + return self._categorical_slot_values[slot] + + def get_slot_from_id(self, slot_id): + return self._slots[slot_id] + + def get_intent_from_id(self, intent_id): + return self._intents[intent_id] + + def get_categorical_slot_from_id(self, slot_id): + return self._categorical_slots[slot_id] + + def get_non_categorical_slot_from_id(self, slot_id): + return self._non_categorical_slots[slot_id] + + def get_categorical_slot_value_from_id(self, slot_id, value_id): + slot = self.categorical_slots[slot_id] + return self._categorical_slot_values[slot][value_id] + + def get_categorical_slot_value_id(self, slot, value): + return self._categorical_slot_value_ids[slot][value] + + +class Schema(object): + """Wrapper for schemas for all services in a dataset.""" + + def __init__(self, schema_json_paths): + """ + TODO fix: + schema_json_paths: list of .json path to schema files of a single str with path to the json file. + """ + # Load the schema from the json file. + if isinstance(schema_json_paths, str): + with open(schema_json_paths, "r") as f: + all_schemas = json.load(f) + f.close() + else: + # load multiple schemas from the list of the json files + all_schemas = [] + completed_services = [] + for schema_json_path in schema_json_paths: + with open(schema_json_path, "r") as f: + schemas = json.load(f) + f.close() + logging.debug("Num of services in %s: %s", schema_json_path, len(schemas)) + + for service in schemas: + if service['service_name'] not in completed_services: + completed_services.append(service['service_name']) + all_schemas.append(service) + + self._services = sorted(schema["service_name"] for schema in all_schemas) + self._services_vocab = {v: k for k, v in enumerate(self._services)} + self._services_id_to_vocab = {v: k for k, v in self._services_vocab.items()} + service_schemas = {} + for schema in all_schemas: + service = schema["service_name"] + service_schemas[service] = ServiceSchema(schema, service_id=self.get_service_id(service)) + + self._service_schemas = service_schemas + self._schemas = all_schemas + + def get_service_id(self, service): + return self._services_vocab[service] + + def get_service_from_id(self, service_id): + return self._services[service_id] + + def get_service_schema(self, service): + return self._service_schemas[service] + + @property + def services(self): + return self._services + + def save_to_file(self, file_path): + with open(file_path, "w") as f: + json.dump(self._schemas, f, indent=2) diff --git a/nemo/collections/nlp/data/datasets/sgd_dataset/schema_embedding_dataset.py b/nemo/collections/nlp/data/datasets/sgd_dataset/schema_embedding_dataset.py new file mode 100644 index 000000000000..9bb5ff65148b --- /dev/null +++ b/nemo/collections/nlp/data/datasets/sgd_dataset/schema_embedding_dataset.py @@ -0,0 +1,357 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +""" +Extract BERT embeddings for slots, values, intents in schema. + +This file contains code artifacts adapted from the original implementation: +https://github.com/google-research/google-research/blob/master/schema_guided_dst/baseline/extract_schema_embedding.py +""" + +import collections +import random +import re + +import numpy as np +import torch +from torch.utils.data import Dataset + +from nemo import logging +from nemo.collections.nlp.data.datasets.sgd_dataset.input_example import truncate_seq_pair + +# Separator to separate the two sentences in BERT's input sequence. +_NL_SEPARATOR = "|||" + +__all__ = ['SchemaEmbeddingDataset'] + + +class SchemaEmbeddingDataset(Dataset): + def __init__(self, schema_config, tokenizer, schemas): + """Generate the embeddings for a schema's elements. + + Args: + tokenizer (tokenizer): such as NemoBertTokenizer + max_seq_length: Sequence length used for BERT model + schemas: Schemas for all services in the datasets + """ + self._tokenizer = tokenizer + self.schema_config = schema_config + self.schemas = schemas + + input_features = self._get_input_features() + + self.features = collections.defaultdict(list) + + for feature in input_features: + self.features["input_ids"].append(feature.input_ids) + self.features["input_mask"].append(feature.input_mask) + self.features["input_type_ids"].append(feature.input_type_ids) + self.features["embedding_tensor_name"].append(feature.embedding_tensor_name) + self.features["service_id"].append(feature.service_id) + self.features["intent_or_slot_id"].append(feature.intent_or_slot_id) + self.features["value_id"].append(feature.value_id) + + def __len__(self): + return len(self.features['input_ids']) + + def __getitem__(self, idx): + return ( + np.array(self.features['input_ids'][idx]), + np.array(self.features['input_mask'][idx], dtype=np.long), + np.array(self.features['input_type_ids'][idx]), + ) + + def _create_feature(self, line, embedding_tensor_name, service_id, intent_or_slot_id, value_id=-1): + """Create a single InputFeatures instance.""" + seq_length = self.schema_config["MAX_SEQ_LENGTH"] + # line = tokenization.convert_to_unicode(input_line) + line = line.strip() + text_a = None + text_b = None + m = re.match(r"^(.*) \|\|\| (.*)$", line) + if m is None: + text_a = line + else: + text_a = m.group(1) + text_b = m.group(2) + + tokens_a = self._tokenizer.text_to_tokens(text_a) + tokens_b = None + if text_b: + tokens_b = self._tokenizer.text_to_tokens(text_b) + + if tokens_b: + # Modifies `tokens_a` and `tokens_b` in place so that the total + # length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + truncate_seq_pair(tokens_a, tokens_b, seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > seq_length - 2: + tokens_a = tokens_a[0 : (seq_length - 2)] + + # The convention in BERT is: + # (a) For sequence pairs: + # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] + # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 + # (b) For single sequences: + # tokens: [CLS] the dog is hairy . [SEP] + # type_ids: 0 0 0 0 0 0 0 + # + # Where "type_ids" are used to indicate whether this is the first + # sequence or the second sequence. The embedding vectors for `type=0` and + # `type=1` were learned during pre-training and are added to the wordpiece + # embedding vector (and position vector). This is not *strictly* necessary + # since the [SEP] token unambiguously separates the sequences, but it + # makes it easier for the model to learn the concept of sequences. + # + # For classification tasks, the first vector (corresponding to [CLS]) is + # used as as the "sentence vector". Note that this only makes sense + # because the entire model is fine-tuned. + tokens = [] + input_type_ids = [] + tokens.append(self._tokenizer.cls_token) + input_type_ids.append(0) + for token in tokens_a: + tokens.append(token) + input_type_ids.append(0) + tokens.append(self._tokenizer.sep_token) + input_type_ids.append(0) + + if tokens_b: + for token in tokens_b: + tokens.append(token) + input_type_ids.append(1) + tokens.append(self._tokenizer.sep_token) + input_type_ids.append(1) + + input_ids = self._tokenizer.tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1] * len(input_ids) + + # Zero-pad up to the sequence length. + while len(input_ids) < seq_length: + input_ids.append(0) + input_mask.append(0) + input_type_ids.append(0) + assert len(input_ids) == seq_length + assert len(input_mask) == seq_length + assert len(input_type_ids) == seq_length + + return InputFeatures( + input_ids=input_ids, + input_mask=input_mask, + input_type_ids=input_type_ids, + embedding_tensor_name=embedding_tensor_name, + service_id=service_id, + intent_or_slot_id=intent_or_slot_id, + value_id=value_id, + ) + + def _get_intents_input_features(self, service_schema): + """Create features for BERT inference for all intents of a service. + + We use "[service description] ||| [intent name] [intent description]" as an + intent's full description. + + Args: + service_schema: A ServiceSchema object containing the schema for the + corresponding service. + + Returns: + A list of InputFeatures containing features to be given as input to the + BERT model. + """ + service_des = service_schema.description + + features = [] + intent_descriptions = {i["name"]: i["description"] for i in service_schema.schema_json["intents"]} + for intent_id, intent in enumerate(service_schema.intents): + nl_seq = " ".join([service_des, _NL_SEPARATOR, intent, intent_descriptions[intent]]) + features.append(self._create_feature(nl_seq, "intent_emb", service_schema.service_id, intent_id)) + return features + + def _get_req_slots_input_features(self, service_schema): + """Create features for BERT inference for all requested slots of a service. + + We use "[service description] ||| [slot name] [slot description]" as a + slot's full description. + + Args: + service_schema: A ServiceSchema object containing the schema for the + corresponding service. + + Returns: + A list of InputFeatures containing features to be given as input to the + BERT model. + """ + service_des = service_schema.description + + slot_descriptions = {s["name"]: s["description"] for s in service_schema.schema_json["slots"]} + features = [] + for slot_id, slot in enumerate(service_schema.slots): + nl_seq = " ".join([service_des, _NL_SEPARATOR, slot, slot_descriptions[slot]]) + features.append(self._create_feature(nl_seq, "req_slot_emb", service_schema.service_id, slot_id)) + return features + + def _get_goal_slots_and_values_input_features(self, service_schema): + """Get BERT input features for all goal slots and categorical values. + + We use "[service description] ||| [slot name] [slot description]" as a + slot's full description. + We use ""[slot name] [slot description] ||| [value name]" as a categorical + slot value's full description. + + Args: + service_schema: A ServiceSchema object containing the schema for the + corresponding service. + + Returns: + A list of InputFeatures containing features to be given as input to the + BERT model. + """ + service_des = service_schema.description + + features = [] + slot_descriptions = {s["name"]: s["description"] for s in service_schema.schema_json["slots"]} + + for slot_id, slot in enumerate(service_schema.non_categorical_slots): + nl_seq = " ".join([service_des, _NL_SEPARATOR, slot, slot_descriptions[slot]]) + features.append(self._create_feature(nl_seq, "noncat_slot_emb", service_schema.service_id, slot_id)) + + for slot_id, slot in enumerate(service_schema.categorical_slots): + nl_seq = " ".join([service_des, _NL_SEPARATOR, slot, slot_descriptions[slot]]) + features.append(self._create_feature(nl_seq, "cat_slot_emb", service_schema.service_id, slot_id)) + for value_id, value in enumerate(service_schema.get_categorical_slot_values(slot)): + nl_seq = " ".join([slot, slot_descriptions[slot], _NL_SEPARATOR, value]) + features.append( + self._create_feature(nl_seq, "cat_slot_value_emb", service_schema.service_id, slot_id, value_id) + ) + return features + + def _get_input_features(self): + """Get the input function to compute schema element embeddings. + + Args: + schemas: A wrapper for all service schemas in the dataset to be embedded. + + Returns: + The input_fn to be passed to the estimator. + """ + # Obtain all the features. + features = [] + for service in self.schemas.services: + service_schema = self.schemas.get_service_schema(service) + features.extend(self._get_intents_input_features(service_schema)) + features.extend(self._get_req_slots_input_features(service_schema)) + features.extend(self._get_goal_slots_and_values_input_features(service_schema)) + + return features + + def _populate_schema_embeddings(self, schema_embeddings, hidden_states, mode): + """ + Populate all schema embeddings with BERT embeddings. + """ + completed_services = set() + batch_size, seq_len, hidden_size = hidden_states[0].shape + + for idx in range(len(self)): + service_id = self.features['service_id'][idx] + service = self.schemas.get_service_from_id(service_id) + + if service not in completed_services: + logging.debug(f"Generating embeddings for service {service}.") + completed_services.add(service) + tensor_name = self.features["embedding_tensor_name"][idx] + emb_mat = schema_embeddings[service_id][tensor_name] + + if mode == 'random': + # randomly initialize schema embeddings + random_token = random.randint(0, seq_len - 1) + embedding = [round(float(x), 6) for x in hidden_states[0][idx, random_token, :].flat] + elif mode == 'last_layer_average': + # Obtain the encoding of the [CLS] token. + embedding = [round(float(x), 6) for x in np.mean(hidden_states[0][idx, :], 0).flat] + elif mode == 'baseline': + # Obtain the encoding of the [CLS] token. + embedding = [round(float(x), 6) for x in hidden_states[0][idx, 0, :].flat] + else: + raise ValueError(f'Mode {mode} for generation schema embeddings is not supported') + intent_or_slot_id = self.features['intent_or_slot_id'][idx] + value_id = self.features['value_id'][idx] + + if tensor_name == "cat_slot_value_emb": + emb_mat[intent_or_slot_id, value_id] = embedding + else: + emb_mat[intent_or_slot_id] = embedding + + def save_embeddings(self, bert_hidden_states, output_file, mode): + """Generate schema element embeddings and save it as a numpy file.""" + schema_embeddings = [] + max_num_intent = self.schema_config["MAX_NUM_INTENT"] + max_num_cat_slot = self.schema_config["MAX_NUM_CAT_SLOT"] + max_num_noncat_slot = self.schema_config["MAX_NUM_NONCAT_SLOT"] + max_num_slot = max_num_cat_slot + max_num_noncat_slot + max_num_value = self.schema_config["MAX_NUM_VALUE_PER_CAT_SLOT"] + embedding_dim = self.schema_config["EMBEDDING_DIMENSION"] + + for _ in self.schemas.services: + schema_embeddings.append( + { + "intent_emb": np.zeros([max_num_intent, embedding_dim]), + "req_slot_emb": np.zeros([max_num_slot, embedding_dim]), + "cat_slot_emb": np.zeros([max_num_cat_slot, embedding_dim]), + "noncat_slot_emb": np.zeros([max_num_noncat_slot, embedding_dim]), + "cat_slot_value_emb": np.zeros([max_num_cat_slot, max_num_value, embedding_dim]), + } + ) + + # Populate the embeddings based on bert inference results and save them. + self._populate_schema_embeddings(schema_embeddings, bert_hidden_states, mode) + + master_device = not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0 + if master_device: + with open(output_file, "wb") as f_s: + np.save(f_s, schema_embeddings) + logging.info(f"The schema embeddings saved at {output_file}") + f_s.close() + + +class InputFeatures(object): + """A single set of features for BERT inference.""" + + def __init__( + self, input_ids, input_mask, input_type_ids, embedding_tensor_name, service_id, intent_or_slot_id, value_id + ): + # The ids in the vocabulary for input tokens. + self.input_ids = input_ids + # A boolean mask indicating which tokens in the input_ids are valid. + self.input_mask = input_mask + # Denotes the sequence each input token belongs to. + self.input_type_ids = input_type_ids + # The name of the embedding tensor corresponding to this example. + self.embedding_tensor_name = embedding_tensor_name + # The id of the service corresponding to this example. + self.service_id = service_id + # The id of the intent (for intent embeddings) or slot (for slot or slot + # value embeddings) corresponding to this example. + self.intent_or_slot_id = intent_or_slot_id + # The id of the value corresponding to this example. Only set if slot value + # embeddings are being calculated. + self.value_id = value_id diff --git a/nemo/collections/nlp/data/datasets/sgd_dataset/schema_processor.py b/nemo/collections/nlp/data/datasets/sgd_dataset/schema_processor.py new file mode 100644 index 000000000000..8c4475d56670 --- /dev/null +++ b/nemo/collections/nlp/data/datasets/sgd_dataset/schema_processor.py @@ -0,0 +1,168 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +""" +This file contains code artifacts adapted from the original implementation: +https://github.com/google-research/google-research/blob/master/schema_guided_dst +""" + +import collections +import os + +import numpy as np +import torch + +from nemo import logging +from nemo.collections.nlp.data.datasets.sgd_dataset import schema +from nemo.collections.nlp.data.datasets.sgd_dataset.schema_embedding_dataset import SchemaEmbeddingDataset +from nemo.collections.nlp.nm.data_layers.bert_inference_datalayer import BertInferDataLayer +from nemo.collections.nlp.utils.data_utils import concatenate + +__all__ = ['SchemaPreprocessor'] + + +class SchemaPreprocessor: + """ + Convert the raw data to the standard format supported by + StateTrackingSGDData. + + Args: + data_dir (str) - Directory for the downloaded DSTC8 data, which contains + the dialogue files and schema files of all datasets (eg train, dev) + dialogues_example_dir (str) - Directory where preprocessed DSTC8 dialogues are stored + schema_embedding_dir (str) - Directory where .npy file for embedding of + entities (slots, values, intents) in the dataset_split's + schema are stored. + task_name (str) - The name of the task to train + vocab_file (str) - The path to BERT vocab file + do_lower_case - (bool) - Whether to lower case the input text. + Should be True for uncased models and False for cased models. + max_seq_length (int) - The maximum total input sequence length after + WordPiece tokenization. Sequences longer than this will be + truncated, and sequences shorter than this will be padded." + tokenizer - tokenizer + bert_model - pretrained BERT model + dataset_split (str) - Dataset split for training / prediction (train/dev/test) + overwrite_dial_file (bool) - Whether to generate a new file saving + the dialogue examples overwrite_schema_emb_file, + bert_ckpt_dir (str) - Directory containing pre-trained BERT checkpoint + nf - NeuralModuleFactory + mode(str): Schema embeddings initialization mode, baseline is ['CLS'] token embeddings + from the last BERT layer + """ + + def __init__( + self, + data_dir, + schema_embedding_dir, + schema_config, + tokenizer, + bert_model, + overwrite_schema_emb_files, + bert_ckpt_dir, + nf, + datasets=['train', 'test', 'dev'], + mode='baseline', + is_trainable=False, + ): + + # Dimension of the embedding for intents, slots and categorical slot values in + # Maximum allowed number of categorical trackable slots for a service. + self.schema_config = schema_config.copy() + # self.MAX_NUM_CAT_SLOT = config["MAX_NUM_CAT_SLOT"] + # # Maximum allowed number of non-categorical trackable slots for a service. + # self.MAX_NUM_NONCAT_SLOT = config["MAX_NUM_NONCAT_SLOT"] + # # Maximum allowed number of values per categorical trackable slot. + # self.MAX_NUM_VALUE_PER_CAT_SLOT = config["MAX_NUM_VALUE_PER_CAT_SLOT"] + # # Maximum allowed number of intents for a service. + # self.MAX_NUM_INTENT = config["MAX_NUM_INTENT"] + + self.is_trainable = is_trainable + self.datasets = datasets + + for dataset_split in ['train', 'test', 'dev']: + if dataset_split not in self.datasets: + logging.warning( + 'WARNING: %s set was not included and won\'t be processed. Services from this dataset split ' + + 'won\'t be supported', + dataset_split, + ) + os.makedirs(schema_embedding_dir, exist_ok=True) + + tokenizer_type = type(tokenizer.tokenizer).__name__ + vocab_size = getattr(tokenizer, "vocab_size", 0) + self.schema_embedding_file = os.path.join( + schema_embedding_dir, + "{}_{}_{}_{}_pretrained_schema_embedding.npy".format( + '_'.join(self.datasets), mode, tokenizer_type, vocab_size + ), + ) + all_schema_json_paths = [] + for dataset_split in self.datasets: + all_schema_json_paths.append(os.path.join(data_dir, dataset_split, "schema.json")) + self.schemas = schema.Schema(all_schema_json_paths) + + if not os.path.exists(self.schema_embedding_file) or overwrite_schema_emb_files: + # Generate the schema embeddings if needed or specified + logging.info(f"Start generating the schema embeddings.") + dataset_params = { + "schema_config": schema_config, + "tokenizer": tokenizer, + "schemas": self.schemas, + } + emb_datalayer = BertInferDataLayer( + dataset_type=SchemaEmbeddingDataset, dataset_params=dataset_params, batch_size=1, shuffle=False, + ) + + input_ids, input_mask, input_type_ids = emb_datalayer() + + hidden_states = bert_model(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask) + evaluated_tensors = nf.infer(tensors=[hidden_states], checkpoint_dir=bert_ckpt_dir) + + master_device = not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0 + if master_device: + hidden_states = [concatenate(tensors) for tensors in evaluated_tensors] + emb_datalayer.dataset.save_embeddings(hidden_states, self.schema_embedding_file, mode) + logging.info(f"Finish generating the schema embeddings.") + + # wait until the master process writes to the schema embedding file + if torch.distributed.is_initialized(): + torch.distributed.barrier() + + with open(self.schema_embedding_file, "rb") as f: + self.schema_embeddings = np.load(f, allow_pickle=True) + f.close() + + def get_schema_embeddings(self): + # Convert from list of dict to dict of list + schema_data_dict = collections.defaultdict(list) + for service in self.schema_embeddings: + schema_data_dict["cat_slot_emb"].append(service["cat_slot_emb"]) + schema_data_dict["cat_slot_value_emb"].append(service["cat_slot_value_emb"]) + schema_data_dict["noncat_slot_emb"].append(service["noncat_slot_emb"]) + schema_data_dict["req_slot_emb"].append(service["req_slot_emb"]) + schema_data_dict["intent_emb"].append(service["intent_emb"]) + return schema_data_dict + + def _get_schema_embedding_file_name(self): + return self.schema_embedding_file + + def get_service_names_to_id_dict(self): + return self.schemas._services_vocab + + def get_ids_to_service_names_dict(self): + return self.schemas._services_id_to_vocab diff --git a/nemo/collections/nlp/data/datasets/sgd_dataset/sgd_dataset.py b/nemo/collections/nlp/data/datasets/sgd_dataset/sgd_dataset.py new file mode 100644 index 000000000000..7e83b6a93904 --- /dev/null +++ b/nemo/collections/nlp/data/datasets/sgd_dataset/sgd_dataset.py @@ -0,0 +1,67 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +""" +This file contains code artifacts adapted from the original implementation: +https://github.com/google-research/google-research/blob/master/schema_guided_dst +""" +import numpy as np +from torch.utils.data import Dataset + +__all__ = ['SGDDataset'] + + +class SGDDataset(Dataset): + """ + Processes SGD dataset + Args: + dataset_split (str): train/dev/test + dialogues_processor (obj): Data generator for dstc8 dialogues + """ + + def __init__(self, dataset_split, dialogues_processor): + self.features = dialogues_processor.get_dialog_examples(dataset_split) + + def __len__(self): + return len(self.features) + + def __getitem__(self, idx): + ex = self.features[idx] + service_id = ex.service_schema.service_id + + return ( + np.array(ex.example_id_num), + np.array(service_id), + np.array(ex.is_real_example, dtype=int), + np.array(ex.utterance_ids), + np.array(ex.utterance_segment), + np.array(ex.utterance_mask, dtype=np.long), + np.array(ex.categorical_slot_status), + np.array(ex.cat_slot_status_mask), + np.array(ex.categorical_slot_values), + np.array(ex.cat_slot_values_mask), + np.array(ex.noncategorical_slot_status), + np.array(ex.noncat_slot_status_mask), + np.array(ex.noncategorical_slot_value_start), + np.array(ex.noncategorical_slot_value_end), + np.array(ex.start_char_idx), # noncat_alignment_start + np.array(ex.end_char_idx), # noncat_alignment_end + np.array(ex.num_slots), # num_requested_slots + np.array(ex.requested_slot_status, dtype=np.float32), + np.array(ex.requested_slot_mask), + np.array(ex.intent_status_mask), + np.array(ex.intent_status_labels), + ) diff --git a/nemo/collections/nlp/data/tokenizers/tokenizer_utils.py b/nemo/collections/nlp/data/tokenizers/tokenizer_utils.py index 7ace6eeea053..0cdfb7e46bfe 100644 --- a/nemo/collections/nlp/data/tokenizers/tokenizer_utils.py +++ b/nemo/collections/nlp/data/tokenizers/tokenizer_utils.py @@ -18,14 +18,21 @@ from transformers import AlbertTokenizer, BertTokenizer, RobertaTokenizer import nemo -from nemo.collections.nlp.nm.trainables.common.megatron.megatron_utils import ( - get_megatron_vocab_file, - is_lower_cased_megatron, -) +from nemo.utils import logging -__all__ = ['MODEL_SPECIAL_TOKENS', 'TOKENIZERS', 'get_tokenizer', 'get_bert_special_tokens'] +try: + __megatron_utils_satisfied = True + from nemo.collections.nlp.nm.trainables.common.megatron.megatron_utils import ( + get_megatron_vocab_file, + is_lower_cased_megatron, + ) + +except Exception as e: + logging.error('Failed to import Megatron utils: `{}` ({})'.format(str(e), type(e))) + __megatron_utils_satisfied = False -logging = nemo.logging + +__all__ = ['MODEL_SPECIAL_TOKENS', 'TOKENIZERS', 'get_tokenizer', 'get_bert_special_tokens'] MODEL_SPECIAL_TOKENS = { 'bert': { @@ -84,12 +91,14 @@ def get_tokenizer( vocab_file (str): path to vocab file do_lower_case (bool): (whether to apply lower cased) - only applicable when tokenizer is build with vocab file ''' - if 'megatron' in pretrained_model_name: - do_lower_case = is_lower_cased_megatron(pretrained_model_name) - vocab_file = get_megatron_vocab_file(pretrained_model_name) - return nemo.collections.nlp.data.tokenizers.NemoBertTokenizer( - vocab_file=vocab_file, do_lower_case=do_lower_case - ) + # Check if we can use Megatron utils. + if __megatron_utils_satisfied: + if 'megatron' in pretrained_model_name: + do_lower_case = is_lower_cased_megatron(pretrained_model_name) + vocab_file = get_megatron_vocab_file(pretrained_model_name) + return nemo.collections.nlp.data.tokenizers.NemoBertTokenizer( + vocab_file=vocab_file, do_lower_case=do_lower_case + ) if tokenizer_name == 'nemobert': tokenizer = nemo.collections.nlp.data.tokenizers.NemoBertTokenizer( diff --git a/nemo/collections/nlp/nm/data_layers/__init__.py b/nemo/collections/nlp/nm/data_layers/__init__.py index 5b5d3dde539f..0c42605631ac 100644 --- a/nemo/collections/nlp/nm/data_layers/__init__.py +++ b/nemo/collections/nlp/nm/data_layers/__init__.py @@ -14,6 +14,7 @@ # limitations under the License. # ============================================================================= +from nemo.collections.nlp.nm.data_layers.bert_inference_datalayer import * from nemo.collections.nlp.nm.data_layers.glue_benchmark_datalayer import * from nemo.collections.nlp.nm.data_layers.joint_intent_slot_datalayer import * from nemo.collections.nlp.nm.data_layers.lm_bert_datalayer import * @@ -21,6 +22,7 @@ from nemo.collections.nlp.nm.data_layers.machine_translation_datalayer import * from nemo.collections.nlp.nm.data_layers.punctuation_capitalization_datalayer import * from nemo.collections.nlp.nm.data_layers.qa_squad_datalayer import * +from nemo.collections.nlp.nm.data_layers.state_tracking_sgd_datalayer import * from nemo.collections.nlp.nm.data_layers.state_tracking_trade_datalayer import * from nemo.collections.nlp.nm.data_layers.text_classification_datalayer import * from nemo.collections.nlp.nm.data_layers.text_datalayer import * diff --git a/nemo/collections/nlp/nm/data_layers/bert_inference_datalayer.py b/nemo/collections/nlp/nm/data_layers/bert_inference_datalayer.py new file mode 100644 index 000000000000..2da78552084d --- /dev/null +++ b/nemo/collections/nlp/nm/data_layers/bert_inference_datalayer.py @@ -0,0 +1,68 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +from nemo.collections.nlp.nm.data_layers.text_datalayer import TextDataLayer +from nemo.core import ChannelType, NeuralType +from nemo.utils.decorators import add_port_docs + +__all__ = ['BertInferDataLayer'] + + +class BertInferDataLayer(TextDataLayer): + """ + Data layer to run infernce with BERT (get final hidden layer). + + Args: + tokenizer (TokenizerSpec): tokenizer + dataset (str): directory or a single file with dataset documents + max_seq_length (int): maximum allowed length of the text segments + mask_probability (float): probability of masking input sequence tokens + batch_size (int): batch size in segments + short_seeq_prob (float): Probability of creating sequences which are + shorter than the maximum length. + Defualts to 0.1. + """ + + @property + @add_port_docs() + def output_ports(self): + """Returns definitions of module output ports. + + input_ids: indices of tokens which constitute batches of text segments + 0: AxisType(BatchTag) + + 1: AxisType(TimeTag) + + input_type_ids: indices of token types (e.g., sentences A & B in BERT) + 0: AxisType(BatchTag) + + 1: AxisType(TimeTag) + + input_mask: bool tensor with 0s in place of tokens to be masked + 0: AxisType(BatchTag) + + 1: AxisType(TimeTag) + + """ + return { + "input_ids": NeuralType(('B', 'T'), ChannelType()), + "input_type_ids": NeuralType(('B', 'T'), ChannelType()), + "input_mask": NeuralType(('B', 'T'), ChannelType()), + } + + def __init__(self, dataset_type, dataset_params, batch_size=1, shuffle=False): + + super().__init__(dataset_type, dataset_params, batch_size=batch_size, shuffle=shuffle) diff --git a/nemo/collections/nlp/nm/data_layers/state_tracking_sgd_datalayer.py b/nemo/collections/nlp/nm/data_layers/state_tracking_sgd_datalayer.py new file mode 100644 index 000000000000..d5f76d0ed65b --- /dev/null +++ b/nemo/collections/nlp/nm/data_layers/state_tracking_sgd_datalayer.py @@ -0,0 +1,121 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +from nemo.backends.pytorch import DataLayerNM +from nemo.collections.nlp.data.datasets.sgd_dataset.sgd_dataset import SGDDataset +from nemo.core.neural_types import ChannelType, LabelsType, LengthsType, NeuralType +from nemo.utils.decorators import add_port_docs + +__all__ = ['SGDDataLayer'] + + +class SGDDataLayer(DataLayerNM): + """ + Data layer for Schema Guided Dialogue State Tracking Dataset. + Args: + dataset_split (str): train/ dev/ test, + dialogues_processor (obj): containt dialogue data, + dataset_type (Dataset): Dataset Type, + shuffle (bool): enables shuffling, default=False + num_workers (int): number of workers + batch_size (int): batch size + pin_memory (bool): enables copying Tensors into CUDA pinned memory before returning them + """ + + @property + @add_port_docs() + def output_ports(self): + """Returns definitions of module output ports. + example_id_num (int): example ids + service_id (int): service ids + is_real_example (bool): flag to determine is the example is valid + utterance_ids (int): utterance ids + utterance_segment (int): Denotes the identity of the sequence. Takes values 0 (system utterance) and 1 (user utterance) + utterance_mask (int): Mask which takes the value 0 for padded tokens and 1 otherwise + categorical_slot_status (int): The status of each categorical slot in the service + cat_slot_status_mask(int): Masks out categorical status for padded cat slots, takes values 0 and 1 + categorical_slot_values (int): The index of the correct value for each categorical slot + cat_slot_values_mask (int): Masks out categorical slots values for slots not used in the service, takes values 0 and 1 + noncategorical_slot_status (int): The status of each non-categorical slot in the service + noncat_slot_status_mask(int): Masks out non-categorical status for padded cat slots, takes values 0 and 1 + noncategorical_slot_value_start (int): The index of the starting subword corresponding to the slot span for a non-categorical slot value + noncategorical_slot_value_end (int): The index of the ending (inclusive) subword corresponding to the slot span for a non-categorical slot value + start_char_idx (int): Start character indices in the original utterance corresponding to the tokens + end_char_idx (int): Inclusive end character indices in the original utterance corresponding to the tokens + num_slots (int): Total number of slots present in the service + requested_slot_status (int): Takes value 1 if the corresponding slot is requested, 0 otherwise + req_slot_mask (int): Masks requested slots not used for the particular service + intent_status_mask (long): Masks out padded intents in the service, takes values 0 and 1 + intent_status_labels (int): Intent labels + + """ + return { + "example_id_num": NeuralType(('B'), ChannelType()), + "service_id": NeuralType(('B'), ChannelType()), + "is_real_example": NeuralType(('B'), ChannelType()), + "utterance_ids": NeuralType(('B', 'T'), ChannelType()), + "utterance_segment": NeuralType(('B', 'T'), ChannelType()), + "utterance_mask": NeuralType(('B', 'T'), ChannelType()), + "categorical_slot_status": NeuralType(('B', 'T'), LabelsType()), + "cat_slot_status_mask": NeuralType(('B', 'T'), ChannelType()), + "categorical_slot_values": NeuralType(('B', 'T'), LabelsType()), + "cat_slot_values_mask": NeuralType(('B', 'T', 'C'), ChannelType()), + "noncategorical_slot_status": NeuralType(('B', 'T'), LabelsType()), + "noncat_slot_status_mask": NeuralType(('B', 'T'), ChannelType()), + "noncategorical_slot_value_start": NeuralType(('B', 'T'), LabelsType()), + "noncategorical_slot_value_end": NeuralType(('B', 'T'), LabelsType()), + "start_char_idx": NeuralType(('B', 'T'), LabelsType()), + "end_char_idx": NeuralType(('B', 'T'), LabelsType()), + "num_slots": NeuralType(('B'), LengthsType()), + "requested_slot_status": NeuralType(('B', 'T'), LabelsType()), + "req_slot_mask": NeuralType(('B', 'T'), ChannelType()), + "intent_status_mask": NeuralType(('B', 'T'), ChannelType()), + "intent_status_labels": NeuralType(('B'), LabelsType()), + } + + def __init__( + self, + dataset_split, + dialogues_processor, + dataset_type=SGDDataset, + shuffle=False, + batch_size=1, + num_workers=-1, + pin_memory=False, + ): + super().__init__() + dataset_params = { + 'dataset_split': dataset_split, + 'dialogues_processor': dialogues_processor, + } + self._dataset = dataset_type(**dataset_params) + self._batch_size = batch_size + self._shuffle = shuffle + self._pin_memory = pin_memory + if num_workers >= 0: + self._num_workers = num_workers + + def __len__(self): + return len(self._dataset) + + @property + def dataset(self): + return self._dataset + + @property + def data_iterator(self): + return None diff --git a/nemo/collections/nlp/nm/data_layers/text_datalayer.py b/nemo/collections/nlp/nm/data_layers/text_datalayer.py index e18da9f0d721..0013fc97e9a4 100644 --- a/nemo/collections/nlp/nm/data_layers/text_datalayer.py +++ b/nemo/collections/nlp/nm/data_layers/text_datalayer.py @@ -31,11 +31,14 @@ class TextDataLayer(DataLayerNM): shuffle (bool): whether to shuffle data """ - def __init__(self, dataset_type, dataset_params, batch_size, shuffle=False): + def __init__(self, dataset_type, dataset_params, batch_size, shuffle=False, num_workers=-1, pin_memory=False): super().__init__() self._dataset = dataset_type(**dataset_params) self._batch_size = batch_size self._shuffle = shuffle + self._pin_memory = pin_memory + if num_workers >= 0: + self._num_workers = num_workers def __len__(self): return len(self._dataset) diff --git a/nemo/collections/nlp/nm/losses/__init__.py b/nemo/collections/nlp/nm/losses/__init__.py index ee7b74199e13..357839adb61a 100644 --- a/nemo/collections/nlp/nm/losses/__init__.py +++ b/nemo/collections/nlp/nm/losses/__init__.py @@ -15,5 +15,6 @@ # ============================================================================= from nemo.collections.nlp.nm.losses.masked_xentropy_loss import * +from nemo.collections.nlp.nm.losses.sgd_loss import * from nemo.collections.nlp.nm.losses.smoothed_cross_entropy_loss import * from nemo.collections.nlp.nm.losses.spanning_loss import * diff --git a/nemo/collections/nlp/nm/losses/sgd_loss.py b/nemo/collections/nlp/nm/losses/sgd_loss.py new file mode 100644 index 000000000000..e51912bb7b48 --- /dev/null +++ b/nemo/collections/nlp/nm/losses/sgd_loss.py @@ -0,0 +1,227 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +''' +This file contains code artifacts adapted from the original implementation: +https://github.com/google-research/google-research/blob/master/schema_guided_dst/baseline/train_and_predict.py +''' + +import torch + +from nemo import logging +from nemo.backends.pytorch import LossNM +from nemo.collections.nlp.data.datasets.sgd_dataset.input_example import STATUS_ACTIVE +from nemo.core import ChannelType, LabelsType, LogitsType, NeuralType +from nemo.utils.decorators import add_port_docs + +__all__ = ['SGDDialogueStateLossNM'] + + +class SGDDialogueStateLossNM(LossNM): + """ + Neural module which implements loss for SGD model. + """ + + @property + @add_port_docs + def input_ports(self): + """Returns definitions of module input ports. + logit_intent_status (float): Output of SGD model + intent_status_labels (int): Intent labels + logit_req_slot_status (float): Output of SGD model + requested_slot_status (float): Takes value 1 if the corresponding slot is requested, 0 otherwise + req_slot_mask (bool): Masks requested slots not used for the particular service + logit_cat_slot_status (float): Output of SGD model + categorical_slot_status (int): The status of each categorical slot in the service + cat_slot_status_mask (bool): Masks categorical slots not used for the particular service + logit_cat_slot_value (float): Output of SGD model + categorical_slot_values (int): The index of the correct value for each categorical slot + logit_noncat_slot_status (float): Output of SGD model + noncategorical_slot_status (int): The status of each noncategorical slot in the service + noncat_slot_status_mask (bool): masks noncategorical slots not used for the particular service + logit_noncat_slot_start (float): Output of SGD model + logit_noncat_slot_end (float): Output of SGD model + noncategorical_slot_value_start (int): The index of the starting subword corresponding to the slot span for a non-categorical slot value + noncategorical_slot_value_end (int): The index of the ending (inclusive) subword corresponding to the slot span for a non-categorical slot value + """ + return { + "logit_intent_status": NeuralType(('B', 'T', 'C'), LogitsType()), + "intent_status_labels": NeuralType(('B'), LabelsType()), + "logit_req_slot_status": NeuralType(('B', 'T'), LogitsType()), + "requested_slot_status": NeuralType(('B', 'T'), LabelsType()), + "req_slot_mask": NeuralType(('B', 'T'), ChannelType()), + "logit_cat_slot_status": NeuralType(('B', 'T', 'C'), LogitsType()), + "categorical_slot_status": NeuralType(('B', 'T'), LabelsType()), + "cat_slot_status_mask": NeuralType(('B', 'T'), ChannelType()), + "logit_cat_slot_value": NeuralType(('B', 'T', 'C'), LogitsType()), + "categorical_slot_values": NeuralType(('B', 'T'), LabelsType()), + "logit_noncat_slot_status": NeuralType(('B', 'T', 'C'), LogitsType()), + "noncategorical_slot_status": NeuralType(('B', 'T'), LabelsType()), + "noncat_slot_status_mask": NeuralType(('B', 'T'), ChannelType()), + "logit_noncat_slot_start": NeuralType(('B', 'T', 'C'), LogitsType()), + "logit_noncat_slot_end": NeuralType(('B', 'T', 'C'), LogitsType()), + "noncategorical_slot_value_start": NeuralType(('B', 'T'), LabelsType()), + "noncategorical_slot_value_end": NeuralType(('B', 'T'), LabelsType()), + } + + @property + def output_ports(self): + """ + Returns definitions of module output ports. + loss: + NeuralType(None) + """ + return {"loss": NeuralType(None)} + + def __init__(self, reduction='mean'): + """ + Args: + reduction (str): specifies the reduction to apply to the final loss, choose 'mean' or 'sum' + """ + super().__init__() + + if reduction not in ['mean', 'sum']: + logging.warning(f'{reduction} reduction is not supported. Setting reduction to "mean"') + reduction = 'mean' + + self.reduction = reduction + self._cross_entropy = torch.nn.CrossEntropyLoss(reduction=self.reduction) + self._criterion_req_slots = torch.nn.BCEWithLogitsLoss(reduction=self.reduction) + + def _loss_function( + self, + logit_intent_status, + intent_status_labels, + logit_req_slot_status, + requested_slot_status, + req_slot_mask, + logit_cat_slot_status, + categorical_slot_status, + cat_slot_status_mask, + logit_cat_slot_value, + categorical_slot_values, + logit_noncat_slot_status, + noncategorical_slot_status, + noncat_slot_status_mask, + logit_noncat_slot_start, + logit_noncat_slot_end, + noncategorical_slot_value_start, + noncategorical_slot_value_end, + ): + # Intent loss + intent_loss = self._cross_entropy(logit_intent_status, intent_status_labels) + + # Requested slots. + # Shape: (batch_size, max_num_slots) + # mask unused slots + # Sigmoid cross entropy is used because more than one slots can be requested in a single utterance + requested_slot_loss = self._criterion_req_slots( + logit_req_slot_status.view(-1)[req_slot_mask], requested_slot_status.view(-1)[req_slot_mask] + ) + + # Categorical slot status + # Shape of logit_cat_slot_status: (batch_size, max_num_cat_slots, 3) + cat_slot_status_mask = cat_slot_status_mask.view(-1) > 0.5 + if sum(cat_slot_status_mask) == 0: + logging.warning(f'No active categorical slots in the batch') + cat_slot_status_loss = self._cross_entropy( + logit_cat_slot_status.view(-1, 3), torch.argmax(logit_cat_slot_status.view(-1, 3), dim=-1) + ) + else: + cat_slot_status_loss = self._cross_entropy( + logit_cat_slot_status.view(-1, 3)[cat_slot_status_mask], + categorical_slot_status.view(-1)[cat_slot_status_mask], + ) + + # Categorical slot values. + # Shape: (batch_size, max_num_cat_slots, max_num_slot_values). + max_num_slot_values = logit_cat_slot_value.size()[-1] + + # Zero out losses for categorical slot value when the slot status is not active. + cat_slot_value_mask = (categorical_slot_status == STATUS_ACTIVE).view(-1) + # to handle cases with no active categorical slot value + cat_slot_value_mask = cat_slot_value_mask.view(-1) > 0.5 + if sum(cat_slot_value_mask) == 0: + logging.warning(f'No active values for categorical slots in the batch.') + cat_slot_value_loss = self._cross_entropy( + logit_cat_slot_value.view(-1, max_num_slot_values), + torch.argmax(logit_cat_slot_value.view(-1, max_num_slot_values), dim=-1), + ) + else: + slot_values_active_logits = logit_cat_slot_value.view(-1, max_num_slot_values)[cat_slot_value_mask] + slot_values_active_labels = categorical_slot_values.view(-1)[cat_slot_value_mask] + cat_slot_value_loss = self._cross_entropy(slot_values_active_logits, slot_values_active_labels) + + # Non-categorical slot status. + # Shape: (batch_size, max_num_noncat_slots, 3). + noncat_slot_status_mask = noncat_slot_status_mask.view(-1) > 0.5 + if sum(noncat_slot_status_mask) == 0: + logging.warning(f'No active non-categorical slots in the batch.') + noncat_slot_status_loss = self._cross_entropy( + logit_noncat_slot_status.view(-1, 3), torch.argmax(logit_noncat_slot_status.view(-1, 3), dim=-1) + ) + else: + noncat_slot_status_loss = self._cross_entropy( + logit_noncat_slot_status.view(-1, 3)[noncat_slot_status_mask], + noncategorical_slot_status.view(-1)[noncat_slot_status_mask], + ) + + # Non-categorical slot spans. + # Shape: (batch_size, max_num_noncat_slots, max_num_tokens).n + max_num_tokens = logit_noncat_slot_start.size()[-1] + # Zero out losses for non-categorical slot spans when the slot status is not active. + # changed here + non_cat_slot_value_mask = (noncategorical_slot_status == STATUS_ACTIVE).view(-1) + # non_cat_slot_value_mask = (noncategorical_slot_status > -1 ).view(-1) + # to handle cases with no active categorical slot value + non_cat_slot_value_mask = non_cat_slot_value_mask.view(-1) + if sum(non_cat_slot_value_mask) == 0: + logging.warning(f'No active values for non-categorical slots in the batch.') + span_start_loss = self._cross_entropy( + logit_noncat_slot_start.view(-1, max_num_tokens), + torch.argmax(logit_noncat_slot_start.view(-1, max_num_tokens), dim=-1), + ) + span_end_loss = self._cross_entropy( + logit_noncat_slot_end.view(-1, max_num_tokens), + torch.argmax(logit_noncat_slot_end.view(-1, max_num_tokens), dim=-1), + ) + else: + noncat_slot_start_active_logits = logit_noncat_slot_start.view(-1, max_num_tokens)[non_cat_slot_value_mask] + noncat_slot_start_active_labels = noncategorical_slot_value_start.view(-1)[non_cat_slot_value_mask] + span_start_loss = self._cross_entropy(noncat_slot_start_active_logits, noncat_slot_start_active_labels) + + noncat_slot_end_active_logits = logit_noncat_slot_end.view(-1, max_num_tokens)[non_cat_slot_value_mask] + noncat_slot_end_active_labels = noncategorical_slot_value_end.view(-1)[non_cat_slot_value_mask] + span_end_loss = self._cross_entropy(noncat_slot_end_active_logits, noncat_slot_end_active_labels) + + losses = { + "intent_loss": intent_loss, + "requested_slot_loss": requested_slot_loss, + "cat_slot_status_loss": cat_slot_status_loss, + "cat_slot_value_loss": cat_slot_value_loss, + "noncat_slot_status_loss": noncat_slot_status_loss, + "span_start_loss": span_start_loss, + "span_end_loss": span_end_loss, + } + + total_loss = sum(losses.values()) + if self.reduction == 'mean': + total_loss = total_loss / len(losses) + else: + batch_size = logit_intent_status.shape[0] + total_loss = total_loss / batch_size + return total_loss diff --git a/nemo/collections/nlp/nm/trainables/common/__init__.py b/nemo/collections/nlp/nm/trainables/common/__init__.py index 7ac5338dfe4a..0061462d13fe 100644 --- a/nemo/collections/nlp/nm/trainables/common/__init__.py +++ b/nemo/collections/nlp/nm/trainables/common/__init__.py @@ -16,8 +16,14 @@ from nemo.collections.nlp.nm.trainables.common.common_utils import * from nemo.collections.nlp.nm.trainables.common.huggingface import * -from nemo.collections.nlp.nm.trainables.common.megatron import * from nemo.collections.nlp.nm.trainables.common.sequence_classification_nm import * from nemo.collections.nlp.nm.trainables.common.sequence_regression_nm import * from nemo.collections.nlp.nm.trainables.common.token_classification_nm import * from nemo.collections.nlp.nm.trainables.common.transformer import * +from nemo.utils import logging + +try: + from nemo.collections.nlp.nm.trainables.common.megatron.megatron_utils import * + +except Exception as e: + logging.error('Failed to import Megatron utils: `{}` ({})'.format(str(e), type(e))) diff --git a/nemo/collections/nlp/nm/trainables/common/common_utils.py b/nemo/collections/nlp/nm/trainables/common/common_utils.py index 318038c50aa4..4964269f65ed 100644 --- a/nemo/collections/nlp/nm/trainables/common/common_utils.py +++ b/nemo/collections/nlp/nm/trainables/common/common_utils.py @@ -18,8 +18,16 @@ from nemo import logging from nemo.collections.nlp.nm.trainables.common.huggingface.huggingface_utils import * -from nemo.collections.nlp.nm.trainables.common.megatron.megatron_bert_nm import MegatronBERT -from nemo.collections.nlp.nm.trainables.common.megatron.megatron_utils import * + +try: + __megatron_utils_satisfied = True + from nemo.collections.nlp.nm.trainables.common.megatron.megatron_bert_nm import MegatronBERT + from nemo.collections.nlp.nm.trainables.common.megatron.megatron_utils import * + +except Exception as e: + logging.error('Failed to import Megatron Neural Module and utils: `{}` ({})'.format(str(e), type(e))) + __megatron_utils_satisfied = False + __all__ = ['get_pretrained_lm_models_list', 'get_pretrained_lm_model'] @@ -28,7 +36,10 @@ def get_pretrained_lm_models_list(): ''' Returns the list of support pretrained models ''' - return get_megatron_lm_models_list() + get_huggingface_lm_models_list() + if __megatron_utils_satisfied: + return get_megatron_lm_models_list() + get_huggingface_lm_models_list() + else: + return get_huggingface_lm_models_list() def get_pretrained_lm_model(pretrained_model_name, config=None, vocab=None, checkpoint=None): @@ -45,7 +56,7 @@ def get_pretrained_lm_model(pretrained_model_name, config=None, vocab=None, chec ''' if pretrained_model_name in get_huggingface_lm_models_list(): model = get_huggingface_lm_model(bert_config=config, pretrained_model_name=pretrained_model_name) - elif pretrained_model_name in get_megatron_lm_models_list(): + elif __megatron_utils_satisfied and pretrained_model_name in get_megatron_lm_models_list(): if pretrained_model_name == 'megatron-bert-cased' or pretrained_model_name == 'megatron-bert-uncased': if not (config and checkpoint): raise ValueError(f'Config file and pretrained checkpoint required for {pretrained_model_name}') diff --git a/nemo/collections/nlp/nm/trainables/common/megatron/__init__.py b/nemo/collections/nlp/nm/trainables/common/megatron/__init__.py index d82f20067425..34bb64c10941 100644 --- a/nemo/collections/nlp/nm/trainables/common/megatron/__init__.py +++ b/nemo/collections/nlp/nm/trainables/common/megatron/__init__.py @@ -14,4 +14,10 @@ # limitations under the License. # ============================================================================= -from nemo.collections.nlp.nm.trainables.common.megatron.megatron_bert_nm import * +from nemo.utils import logging + +try: + from nemo.collections.nlp.nm.trainables.common.megatron.megatron_bert_nm import * + +except Exception as e: + logging.error('Failed to import Megatron Neural Module: `{}` ({})'.format(str(e), type(e))) diff --git a/nemo/collections/nlp/nm/trainables/common/megatron/megatron_utils.py b/nemo/collections/nlp/nm/trainables/common/megatron/megatron_utils.py index 13ec1894eb05..558831967207 100644 --- a/nemo/collections/nlp/nm/trainables/common/megatron/megatron_utils.py +++ b/nemo/collections/nlp/nm/trainables/common/megatron/megatron_utils.py @@ -30,7 +30,7 @@ 'get_megatron_checkpoint', ] -MEGATRON_CACHE = os.path.join(os.path.dirname(TRANSFORMERS_CACHE), 'megatron') +MEGATRON_CACHE = os.path.join(os.path.dirname(str(TRANSFORMERS_CACHE)), 'megatron') CONFIGS = {'345m': {"hidden-size": 1024, "num-attention-heads": 16, "num-layers": 24, "max-seq-length": 512}} diff --git a/nemo/collections/nlp/nm/trainables/common/transformer/transformer_modules.py b/nemo/collections/nlp/nm/trainables/common/transformer/transformer_modules.py index cca8dc6002ba..50e66346e017 100644 --- a/nemo/collections/nlp/nm/trainables/common/transformer/transformer_modules.py +++ b/nemo/collections/nlp/nm/trainables/common/transformer/transformer_modules.py @@ -28,11 +28,12 @@ try: from apex.normalization import FusedLayerNorm -except (AttributeError, ModuleNotFoundError): - # this is lie - it isn't fused in this case - logging.warning( - "Unable to import APEX. Mixed precision, distributed training and FusedLayerNorm are not available." - ) + + # Try to use FusedLayerNorm from Apex - this will trigger an error. + _ = FusedLayerNorm(8, eps=1e-5) + +except Exception as e: + logging.warning("Unable to import FusedLayerNorm from APEX. Using regular LayerNorm instead.") from torch.nn import LayerNorm as FusedLayerNorm diff --git a/nemo/collections/nlp/nm/trainables/dialogue_state_tracking/__init__.py b/nemo/collections/nlp/nm/trainables/dialogue_state_tracking/__init__.py index 0eed31eb4973..05f3cde4c1ce 100644 --- a/nemo/collections/nlp/nm/trainables/dialogue_state_tracking/__init__.py +++ b/nemo/collections/nlp/nm/trainables/dialogue_state_tracking/__init__.py @@ -13,5 +13,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= - +from nemo.collections.nlp.nm.trainables.dialogue_state_tracking.sgd import * from nemo.collections.nlp.nm.trainables.dialogue_state_tracking.trade_generator_nm import * diff --git a/nemo/collections/nlp/nm/trainables/dialogue_state_tracking/sgd/__init__.py b/nemo/collections/nlp/nm/trainables/dialogue_state_tracking/sgd/__init__.py new file mode 100644 index 000000000000..7f7de4a67ec5 --- /dev/null +++ b/nemo/collections/nlp/nm/trainables/dialogue_state_tracking/sgd/__init__.py @@ -0,0 +1,18 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +from nemo.collections.nlp.nm.trainables.dialogue_state_tracking.sgd.sgd_decoder_nm import * +from nemo.collections.nlp.nm.trainables.dialogue_state_tracking.sgd.sgd_encoder_nm import * diff --git a/nemo/collections/nlp/nm/trainables/dialogue_state_tracking/sgd/sgd_decoder_nm.py b/nemo/collections/nlp/nm/trainables/dialogue_state_tracking/sgd/sgd_decoder_nm.py new file mode 100644 index 000000000000..30f1d5c8758c --- /dev/null +++ b/nemo/collections/nlp/nm/trainables/dialogue_state_tracking/sgd/sgd_decoder_nm.py @@ -0,0 +1,404 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +''' +This file contains code artifacts adapted from the original implementation: +https://github.com/google-research/google-research/blob/master/schema_guided_dst/baseline/train_and_predict.py +''' + +import math +import sys + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from nemo.backends.pytorch.nm import TrainableNM +from nemo.core import ChannelType, EmbeddedTextType, LogitsType, NeuralType +from nemo.utils.decorators import add_port_docs + +__all__ = ['SGDDecoderNM'] + + +class LogitsAttention(nn.Module): + def __init__(self, num_classes, embedding_dim): + """Get logits for elements by using attention on token embedding. + Args: + num_classes (int): An int containing the number of classes for which logits are to be generated. + embedding_dim (int): hidden size of the BERT + + Returns: + A tensor of shape (batch_size, num_elements, num_classes) containing the logits. + """ + super().__init__() + self.num_attention_heads = 16 + self.attention_head_size = embedding_dim // self.num_attention_heads + self.embedding_dim = embedding_dim + self.num_classes = num_classes + self.dropout = nn.Dropout(0.1) + + self.key = nn.Linear(embedding_dim, embedding_dim) + self.query = nn.Linear(embedding_dim, embedding_dim) + self.value = nn.Linear(embedding_dim, embedding_dim) + self.layer = nn.Linear(embedding_dim, num_classes) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward(self, encoded_utterance, token_embeddings, element_embeddings): + """ + token_embeddings - token hidden states from BERT encoding of the utterance + encoded_utterance - [CLS] token hidden state from BERT encoding of the utterance + element_embeddings: A tensor of shape (batch_size, num_elements, embedding_dim). + """ + _, num_elements, _ = element_embeddings.size() + + query_layer = self.query(element_embeddings) + key_layer = self.key(token_embeddings) + value_layer = self.value(token_embeddings) + + query_layer = self.transpose_for_scores(query_layer) + key_layer = self.transpose_for_scores(key_layer) + value_layer = self.transpose_for_scores(value_layer) + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.embedding_dim) + attention_probs = nn.Softmax(dim=-1)(attention_scores) + attention_probs = self.dropout(attention_probs) + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.embedding_dim,) + context_layer = context_layer.view(*new_context_layer_shape) + + logits = self.layer(context_layer) + return logits + + +class Logits(nn.Module): + def __init__(self, num_classes, embedding_dim): + """Get logits for elements by conditioning on utterance embedding. + Args: + num_classes (int): An int containing the number of classes for which logits are to be generated. + embedding_dim (int): hidden size of the BERT + + Returns: + A tensor of shape (batch_size, num_elements, num_classes) containing the logits. + """ + super().__init__() + self.num_classes = num_classes + self.utterance_proj = nn.Linear(embedding_dim, embedding_dim) + self.activation = F.gelu + + self.layer1 = nn.Linear(2 * embedding_dim, embedding_dim) + self.layer2 = nn.Linear(embedding_dim, num_classes) + + def forward(self, encoded_utterance, token_embeddings, element_embeddings): + """ + token_embeddings - token hidden states from BERT encoding of the utterance + encoded_utterance - [CLS] token hidden state from BERT encoding of the utterance + element_embeddings: A tensor of shape (batch_size, num_elements, embedding_dim). + """ + _, num_elements, _ = element_embeddings.size() + + # Project the utterance embeddings. + utterance_embedding = self.utterance_proj(encoded_utterance) + utterance_embedding = self.activation(utterance_embedding) + + # Combine the utterance and element embeddings. + repeated_utterance_embedding = utterance_embedding.unsqueeze(1).repeat(1, num_elements, 1) + + utterance_element_emb = torch.cat([repeated_utterance_embedding, element_embeddings], axis=2) + logits = self.layer1(utterance_element_emb) + logits = self.activation(logits) + logits = self.layer2(logits) + return logits + + +class SGDDecoderNM(TrainableNM): + """ + Baseline model for schema guided dialogue state tracking with option to make schema embeddings learnable + """ + + @property + @add_port_docs() + def input_ports(self): + """Returns definitions of module output ports. + encoded_utterance (float): [CLS] token hidden state from BERT encoding of the utterance + token_embeddings (float): BERT encoding of utterance (all tokens) + utterance_mask (bool): Mask which takes the value 0 for padded tokens and 1 otherwise + cat_slot_values_mask (int): Masks out categorical slots values for slots not used in the service, takes values 0 and 1 + intent_status_mask (int): Masks out padded intents in the service, takes values 0 and 1 + service_ids (int): service ids + """ + return { + "encoded_utterance": NeuralType(('B', 'T'), EmbeddedTextType()), + "token_embeddings": NeuralType(('B', 'T', 'C'), ChannelType()), + "utterance_mask": NeuralType(('B', 'T'), ChannelType()), + "cat_slot_values_mask": NeuralType(('B', 'T', 'C'), ChannelType()), + "intent_status_mask": NeuralType(('B', 'T'), ChannelType()), + "service_ids": NeuralType(('B'), ChannelType()), + } + + @property + @add_port_docs() + def output_ports(self): + """Returns definitions of module output ports. + logit_intent_status (float): output for intent status + logit_req_slot_status (float): output for requested slots status + logit_cat_slot_status (float): output for categorical slots status + logit_cat_slot_value (float): output for categorical slots values + logit_noncat_slot_status (float): Output of SGD model + logit_noncat_slot_start (float): output for non categorical slots values start + logit_noncat_slot_end (float): output for non categorical slots values end + """ + return { + "logit_intent_status": NeuralType(('B', 'T', 'C'), LogitsType()), + "logit_req_slot_status": NeuralType(('B', 'T'), LogitsType()), + "logit_cat_slot_status": NeuralType(('B', 'T', 'C'), LogitsType()), + "logit_cat_slot_value": NeuralType(('B', 'T', 'C'), LogitsType()), + "logit_noncat_slot_status": NeuralType(('B', 'T', 'C'), LogitsType()), + "logit_noncat_slot_start": NeuralType(('B', 'T', 'C'), LogitsType()), + "logit_noncat_slot_end": NeuralType(('B', 'T', 'C'), LogitsType()), + } + + def __init__(self, embedding_dim, schema_emb_processor, head_transform): + """Get logits for elements by conditioning on utterance embedding. + + Args: + embedding_dim (int): hidden size of the BERT + schema_emb_processor (obj): contains schema embeddings for services and config file + head_transform (str): transformation to use for computing head + """ + super().__init__() + + # Add a trainable vector for the NONE intent + self.none_intent_vector = torch.empty((1, 1, embedding_dim), requires_grad=True).to(self._device) + # TODO truncated norm init + nn.init.normal_(self.none_intent_vector, std=0.02) + self.none_intent_vector = torch.nn.Parameter(self.none_intent_vector).to(self._device) + self.intent_layer = getattr(sys.modules[__name__], head_transform)(1, embedding_dim).to(self._device) + self.requested_slots_layer = getattr(sys.modules[__name__], head_transform)(1, embedding_dim).to(self._device) + + self.cat_slot_value_layer = getattr(sys.modules[__name__], head_transform)(1, embedding_dim).to(self._device) + + # Slot status values: none, dontcare, active. + self.cat_slot_status_layer = getattr(sys.modules[__name__], head_transform)(3, embedding_dim).to(self._device) + self.noncat_slot_layer = getattr(sys.modules[__name__], head_transform)(3, embedding_dim).to(self._device) + + # dim 2 for non_categorical slot - to represent start and end position + self.noncat_layer1 = nn.Linear(2 * embedding_dim, embedding_dim).to(self._device) + self.noncat_activation = F.gelu + self.noncat_layer2 = nn.Linear(embedding_dim, 2).to(self._device) + + config = schema_emb_processor.schema_config + num_services = len(schema_emb_processor.schemas.services) + self.intents_emb = nn.Embedding(num_services, config["MAX_NUM_INTENT"] * embedding_dim) + self.cat_slot_emb = nn.Embedding(num_services, config["MAX_NUM_CAT_SLOT"] * embedding_dim) + self.cat_slot_value_emb = nn.Embedding( + num_services, config["MAX_NUM_CAT_SLOT"] * config["MAX_NUM_VALUE_PER_CAT_SLOT"] * embedding_dim + ) + self.noncat_slot_emb = nn.Embedding(num_services, config["MAX_NUM_NONCAT_SLOT"] * embedding_dim) + self.req_slot_emb = nn.Embedding( + num_services, (config["MAX_NUM_CAT_SLOT"] + config["MAX_NUM_NONCAT_SLOT"]) * embedding_dim + ) + + # initialize schema embeddings from the BERT generated embeddings + schema_embeddings = schema_emb_processor.get_schema_embeddings() + self.intents_emb.weight.data.copy_( + torch.from_numpy(np.stack(schema_embeddings['intent_emb']).reshape(num_services, -1)) + ) + self.cat_slot_emb.weight.data.copy_( + torch.from_numpy(np.stack(schema_embeddings['cat_slot_emb']).reshape(num_services, -1)) + ) + self.cat_slot_value_emb.weight.data.copy_( + torch.from_numpy(np.stack(schema_embeddings['cat_slot_value_emb']).reshape(num_services, -1)) + ) + self.noncat_slot_emb.weight.data.copy_( + torch.from_numpy(np.stack(schema_embeddings['noncat_slot_emb']).reshape(num_services, -1)) + ) + self.req_slot_emb.weight.data.copy_( + torch.from_numpy(np.stack(schema_embeddings['req_slot_emb']).reshape(num_services, -1)) + ) + + if not schema_emb_processor.is_trainable: + self.intents_emb.weight.requires_grad = False + self.cat_slot_emb.weight.requires_grad = False + self.cat_slot_value_emb.weight.requires_grad = False + self.noncat_slot_emb.weight.requires_grad = False + self.req_slot_emb.weight.requires_grad = False + + self.to(self._device) + + def forward( + self, + encoded_utterance, + token_embeddings, + utterance_mask, + cat_slot_values_mask, + service_ids, + intent_status_mask, + ): + batch_size, emb_dim = encoded_utterance.size() + intent_embeddings = self.intents_emb(service_ids).view(batch_size, -1, emb_dim) + cat_slot_emb = self.cat_slot_emb(service_ids).view(batch_size, -1, emb_dim) + max_number_cat_slots = cat_slot_emb.shape[1] + cat_slot_value_emb = self.cat_slot_value_emb(service_ids).view(batch_size, max_number_cat_slots, -1, emb_dim) + noncat_slot_emb = self.noncat_slot_emb(service_ids).view(batch_size, -1, emb_dim) + req_slot_emb = self.req_slot_emb(service_ids).view(batch_size, -1, emb_dim) + + logit_intent_status = self._get_intents( + encoded_utterance, intent_embeddings, intent_status_mask, token_embeddings + ) + + logit_req_slot_status = self._get_requested_slots(encoded_utterance, req_slot_emb, token_embeddings) + + logit_cat_slot_status, logit_cat_slot_value = self._get_categorical_slot_goals( + encoded_utterance, cat_slot_emb, cat_slot_value_emb, cat_slot_values_mask, token_embeddings + ) + + ( + logit_noncat_slot_status, + logit_noncat_slot_start, + logit_noncat_slot_end, + ) = self._get_noncategorical_slot_goals(encoded_utterance, utterance_mask, noncat_slot_emb, token_embeddings) + + return ( + logit_intent_status, + logit_req_slot_status, + logit_cat_slot_status, + logit_cat_slot_value, + logit_noncat_slot_status, + logit_noncat_slot_start, + logit_noncat_slot_end, + ) + + def _get_intents(self, encoded_utterance, intent_embeddings, intent_status_mask, token_embeddings): + """ + Args: + intent_embedding - BERT schema embeddings + encoded_utterance - representation of untterance + intent_status_mask - masks out intent not used for the service + """ + batch_size = intent_embeddings.size()[0] + + # Add a trainable vector for the NONE intent. + repeated_none_intent_vector = self.none_intent_vector.repeat(batch_size, 1, 1) + intent_embeddings = torch.cat([repeated_none_intent_vector, intent_embeddings], axis=1) + logits = self.intent_layer( + encoded_utterance=encoded_utterance, + token_embeddings=token_embeddings, + element_embeddings=intent_embeddings, + ) + logits = logits.squeeze(axis=-1) # Shape: (batch_size, max_intents + 1) + + # Mask out logits for padded intents + negative_logits = self._get_negative_logits(logits) + return torch.where(intent_status_mask.to(dtype=torch.bool), logits, negative_logits) + + def _get_requested_slots(self, encoded_utterance, requested_slot_emb, token_embeddings): + """Obtain logits for requested slots.""" + + logits = self.requested_slots_layer( + encoded_utterance=encoded_utterance, + token_embeddings=token_embeddings, + element_embeddings=requested_slot_emb, + ) + logits = logits.squeeze(axis=-1) + + # logits shape: (batch_size, max_num_slots) + logits = logits.squeeze(axis=-1) + return logits + + def _get_categorical_slot_goals( + self, encoded_utterance, cat_slot_emb, cat_slot_value_emb, cat_slot_values_mask, token_embeddings + ): + """ + Obtain logits for status and values for categorical slots + Slot status values: none, dontcare, active + """ + + # Predict the status of all categorical slots. + status_logits = self.cat_slot_status_layer( + encoded_utterance=encoded_utterance, token_embeddings=token_embeddings, element_embeddings=cat_slot_emb + ) + + # Predict the goal value. + # Shape: (batch_size, max_categorical_slots, max_categorical_values, embedding_dim). + _, max_num_slots, max_num_values, embedding_dim = cat_slot_value_emb.size() + cat_slot_value_emb_reshaped = cat_slot_value_emb.view(-1, max_num_slots * max_num_values, embedding_dim) + + value_logits = self.cat_slot_value_layer( + encoded_utterance=encoded_utterance, + token_embeddings=token_embeddings, + element_embeddings=cat_slot_value_emb_reshaped, + ) + + # Reshape to obtain the logits for all slots. + value_logits = value_logits.view(-1, max_num_slots, max_num_values) + + # Mask out logits for padded slots and values because they will be softmaxed + negative_value_logits = self._get_negative_logits(value_logits) + value_logits = torch.where(cat_slot_values_mask.to(dtype=torch.bool), value_logits, negative_value_logits) + return status_logits, value_logits + + def _get_noncategorical_slot_goals(self, encoded_utterance, utterance_mask, noncat_slot_emb, token_embeddings): + """ + Obtain logits for status and slot spans for non-categorical slots. + Slot status values: none, dontcare, active + """ + # Predict the status of all non-categorical slots. + max_num_slots = noncat_slot_emb.size()[1] + status_logits = self.noncat_slot_layer( + encoded_utterance=encoded_utterance, token_embeddings=token_embeddings, element_embeddings=noncat_slot_emb + ) + + # Predict the distribution for span indices. + max_num_tokens = token_embeddings.size()[1] + + repeated_token_embeddings = token_embeddings.unsqueeze(1).repeat(1, max_num_slots, 1, 1) + repeated_slot_embeddings = noncat_slot_emb.unsqueeze(2).repeat(1, 1, max_num_tokens, 1) + + # Shape: (batch_size, max_num_slots, max_num_tokens, 2 * embedding_dim). + slot_token_embeddings = torch.cat([repeated_slot_embeddings, repeated_token_embeddings], axis=3) + + # Project the combined embeddings to obtain logits, Shape: (batch_size, max_num_slots, max_num_tokens, 2) + span_logits = self.noncat_layer1(slot_token_embeddings) + span_logits = self.noncat_activation(span_logits) + span_logits = self.noncat_layer2(span_logits) + + # Mask out invalid logits for padded tokens. + utterance_mask = utterance_mask.to(bool) # Shape: (batch_size, max_num_tokens). + repeated_utterance_mask = utterance_mask.unsqueeze(1).unsqueeze(3).repeat(1, max_num_slots, 1, 2) + negative_logits = (torch.finfo(span_logits.dtype).max * -0.7) * torch.ones( + span_logits.size(), device=self._device, dtype=span_logits.dtype + ) + + span_logits = torch.where(repeated_utterance_mask, span_logits, negative_logits) + + # Shape of both tensors: (batch_size, max_num_slots, max_num_tokens). + span_start_logits, span_end_logits = torch.unbind(span_logits, dim=3) + return status_logits, span_start_logits, span_end_logits + + def _get_negative_logits(self, logits): + # returns tensor with negative logits that will be used to mask out unused values + # for a particular service + negative_logits = (torch.finfo(logits.dtype).max * -0.7) * torch.ones( + logits.size(), device=self._device, dtype=logits.dtype + ) + return negative_logits diff --git a/nemo/collections/nlp/nm/trainables/dialogue_state_tracking/sgd/sgd_encoder_nm.py b/nemo/collections/nlp/nm/trainables/dialogue_state_tracking/sgd/sgd_encoder_nm.py new file mode 100644 index 000000000000..13c1887f04b2 --- /dev/null +++ b/nemo/collections/nlp/nm/trainables/dialogue_state_tracking/sgd/sgd_encoder_nm.py @@ -0,0 +1,90 @@ +# ============================================================================= +# Copyright 2020 NVIDIA. All Rights Reserved. +# Copyright 2019 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +''' +This file contains code artifacts adapted from the original implementation: +https://github.com/google-research/google-research/blob/master/schema_guided_dst/baseline/train_and_predict.py +''' + +from torch import nn + +from nemo.backends.pytorch.nm import TrainableNM +from nemo.collections.nlp.utils.transformer_utils import transformer_weights_init +from nemo.core import ChannelType, EmbeddedTextType, NeuralType +from nemo.utils.decorators import add_port_docs + +__all__ = ['SGDEncoderNM'] + +ACTIVATIONS_F = { + "tanh": nn.Tanh, + "relu": nn.ReLU, +} + + +class SGDEncoderNM(TrainableNM): + """ + Neural module which extracts the first token from the BERT representation of the utterance + followed by a fully connected layer. + + Args: + hidden_size (int): hidden size of the BERT model + activation (str): activation function applied + dropout (float): dropout ratio + """ + + @property + @add_port_docs + def input_ports(self): + """ + Returns definitions of module input ports. + hidden_states (float): BERT representation of the utterance + """ + return {"hidden_states": NeuralType(('B', 'T', 'C'), ChannelType())} + + @property + @add_port_docs + def output_ports(self): + """Returns definitions of module output ports. + logits (float): First token of the BERT representation of the utterance followed by fc and dropout + hidden_states (float) : BERT representation of the utterance with applied dropout + """ + return { + "logits": NeuralType(('B', 'T'), EmbeddedTextType()), + "hidden_states": NeuralType(('B', 'T', 'C'), ChannelType()), + } + + def __init__(self, hidden_size, activation='tanh', dropout=0.0, use_transformer_pretrained=True): + super().__init__() + self.fc = nn.Linear(hidden_size, hidden_size).to(self._device) + + if activation not in ACTIVATIONS_F: + raise ValueError(f'{activation} is not in supported ' + '{ACTIVATIONS_F.keys()}') + + self.activation = ACTIVATIONS_F[activation]() + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + if use_transformer_pretrained: + self.apply(lambda module: transformer_weights_init(module, xavier=False)) + # self.to(self._device) # sometimes this is necessary + + def forward(self, hidden_states): + first_token_hidden_states = hidden_states[:, 0] + logits = self.fc(first_token_hidden_states) + logits = self.activation(logits) + logits = self.dropout1(logits) + return logits, self.dropout2(hidden_states) diff --git a/nemo/collections/nlp/utils/data_utils.py b/nemo/collections/nlp/utils/data_utils.py index d57c782fedca..1c3dcf5f0db8 100644 --- a/nemo/collections/nlp/utils/data_utils.py +++ b/nemo/collections/nlp/utils/data_utils.py @@ -17,7 +17,9 @@ import re import string -__all__ = ['get_vocab', 'get_tokens', 'normalize_answer', 'mask_padded_tokens'] +import numpy as np + +__all__ = ['get_vocab', 'get_tokens', 'normalize_answer', 'mask_padded_tokens', 'concatenate'] def get_vocab(file): @@ -55,3 +57,10 @@ def get_tokens(s): def mask_padded_tokens(tokens, pad_id): mask = tokens != pad_id return mask + + +def concatenate(lists): + """ + Helper function for inference + """ + return np.concatenate([t.cpu() for t in lists]) diff --git a/nemo/collections/tts/data_layers.py b/nemo/collections/tts/data_layers.py index d57da99187e3..6d29b4504cc9 100644 --- a/nemo/collections/tts/data_layers.py +++ b/nemo/collections/tts/data_layers.py @@ -1,15 +1,13 @@ # Copyright (c) 2019 NVIDIA Corporation import torch -import nemo from .parts.datasets import AudioOnlyDataset from nemo.backends.pytorch.nm import DataLayerNM from nemo.core import DeviceType from nemo.core.neural_types import AudioSignal, LengthsType, NeuralType +from nemo.utils import logging from nemo.utils.decorators import add_port_docs -logging = nemo.logging - class AudioDataLayer(DataLayerNM): """ diff --git a/nemo/collections/tts/parts/helpers.py b/nemo/collections/tts/parts/helpers.py index fb5935ba8d84..0819b6398b45 100644 --- a/nemo/collections/tts/parts/helpers.py +++ b/nemo/collections/tts/parts/helpers.py @@ -4,9 +4,7 @@ import numpy as np import torch -import nemo - -logging = nemo.logging +from nemo.utils import logging __all__ = [ "waveglow_log_to_tb_func", diff --git a/nemo/collections/tts/parts/tacotron2.py b/nemo/collections/tts/parts/tacotron2.py index a201c1cfdbe3..925251f19f44 100644 --- a/nemo/collections/tts/parts/tacotron2.py +++ b/nemo/collections/tts/parts/tacotron2.py @@ -1,15 +1,12 @@ # Copyright (c) 2019 NVIDIA Corporation -from math import sqrt import torch from torch import nn from torch.autograd import Variable from torch.nn import functional as F -import nemo from nemo.collections.tts.parts.layers import ConvNorm, LinearNorm, get_mask_from_lengths - -logging = nemo.logging +from nemo.utils import logging class LocationLayer(nn.Module): diff --git a/nemo/collections/tts/tacotron2_modules.py b/nemo/collections/tts/tacotron2_modules.py index 2e4b235b1be1..5485728cd015 100644 --- a/nemo/collections/tts/tacotron2_modules.py +++ b/nemo/collections/tts/tacotron2_modules.py @@ -279,6 +279,42 @@ class Tacotron2DecoderInfer(Tacotron2Decoder): Defaults to 31. """ + def __init__( + self, + n_mel_channels: int, + n_frames_per_step: int = 1, + encoder_embedding_dim: int = 512, + gate_threshold: float = 0.5, + prenet_dim: int = 256, + max_decoder_steps: int = 1000, + decoder_rnn_dim: int = 1024, + p_decoder_dropout: float = 0.1, + p_attention_dropout: float = 0.1, + attention_rnn_dim: int = 1024, + attention_dim: int = 128, + attention_location_n_filters: int = 32, + attention_location_kernel_size: int = 31, + prenet_p_dropout: float = 0.5, + force: bool = False, + ): + super().__init__( + n_mel_channels=n_mel_channels, + n_frames_per_step=n_frames_per_step, + encoder_embedding_dim=encoder_embedding_dim, + gate_threshold=gate_threshold, + prenet_dim=prenet_dim, + max_decoder_steps=max_decoder_steps, + decoder_rnn_dim=decoder_rnn_dim, + p_decoder_dropout=p_decoder_dropout, + p_attention_dropout=p_attention_dropout, + attention_rnn_dim=attention_rnn_dim, + attention_dim=attention_dim, + attention_location_n_filters=attention_location_n_filters, + attention_location_kernel_size=attention_location_kernel_size, + prenet_p_dropout=prenet_p_dropout, + force=force, + ) + @property @add_port_docs() def input_ports(self): @@ -483,6 +519,9 @@ class MakeGate(NonTrainableNM): """MakeGate is a helper Neural Module that makes the target stop value. """ + def __init__(self): + super().__init__() + @property @add_port_docs() def input_ports(self): diff --git a/nemo/constants.py b/nemo/constants.py index 6cd3a1f60ff8..9d6793d7630a 100644 --- a/nemo/constants.py +++ b/nemo/constants.py @@ -47,4 +47,5 @@ # NEMO_ENV_VARNAME_DEBUG_VERBOSITY = "NEMO_DEBUG_VERBOSITY" NEMO_ENV_VARNAME_ENABLE_COLORING = "NEMO_ENABLE_COLORING" NEMO_ENV_VARNAME_REDIRECT_LOGS_TO_STDERR = "NEMO_REDIRECT_LOGS_TO_STDERR" +NEMO_ENV_VARNAME_TESTING = "NEMO_TESTING" # NEMO_ENV_VARNAME_SAVE_LOGS_TO_DIR = "NEMO_SAVE_LOGS_TO_DIR" diff --git a/nemo/core/callbacks.py b/nemo/core/callbacks.py index e465bf5bf95a..12e39a99c25f 100644 --- a/nemo/core/callbacks.py +++ b/nemo/core/callbacks.py @@ -25,7 +25,7 @@ from collections import namedtuple import nemo -from nemo.utils import get_checkpoint_from_dir +from nemo.utils import get_checkpoint_from_dir, logging try: import wandb @@ -34,8 +34,6 @@ except (ImportError, ModuleNotFoundError): _WANDB_AVAILABLE = False -logging = nemo.logging - class ActionCallback(ABC): """Abstract interface for callbacks. diff --git a/nemo/core/neural_factory.py b/nemo/core/neural_factory.py index b5c1930d0e06..59b266965669 100644 --- a/nemo/core/neural_factory.py +++ b/nemo/core/neural_factory.py @@ -36,10 +36,9 @@ from ..utils import ExpManager from .callbacks import ActionCallback, EvaluatorCallback from .neural_types import * +from nemo.utils import logging from nemo.utils.decorators import deprecated -logging = nemo.logging - class DeploymentFormat(Enum): """Which format to use when exporting a Neural Module for deployment""" diff --git a/nemo/core/neural_graph.py b/nemo/core/neural_graph.py index 917af83b117c..4e8e486eb1be 100644 --- a/nemo/core/neural_graph.py +++ b/nemo/core/neural_graph.py @@ -477,7 +477,7 @@ def export_to_config(self, config_file: str): YAML.dump(to_export, outfile) logging.info( - "Configuration of graph `{}` ({}) exported to {}".format(self.name, type(self).__name__, abs_path_file) + "Configuration of graph `{}` ({}) exported to '{}'".format(self.name, type(self).__name__, abs_path_file) ) def serialize(self) -> Dict[str, Any]: @@ -874,14 +874,21 @@ def summary(self) -> str: A nice, full graph summary. """ # Line "decorator". - desc = "\n" + 120 * '=' + "\n" + desc = "\n" + 113 * '=' + "\n" # 1. general information. - desc += "The `{}` Neural Graph:\n".format(self.name) + desc += "The `{}` Neural Graph [{}]".format(self.name, self.operation_mode) + if self.is_complete(): + desc += " [COMPLETE]:\n" + else: + desc += " [INCOMPLETE]:\n" # 2. modules. desc += " * Modules ({}):\n".format(len(self._modules)) for key, module in self._modules.items(): - desc += " * `{}` ({})\n".format(key, type(module).__name__) + if module.type == ModuleType.trainable and module.is_frozen(): + desc += " * `{}` ({}) [FROZEN]\n".format(key, type(module).__name__) + else: + desc += " * `{}` ({})\n".format(key, type(module).__name__) # 3. steps. desc += " * Steps ({}):\n".format(len(self._steps)) @@ -912,7 +919,7 @@ def summary(self) -> str: for output in outputs["mappings"]: desc += " * {}\n".format(output) # Line "decorator". - desc += 120 * '=' + desc += 113 * '=' # Return the result. return desc @@ -1030,9 +1037,10 @@ def restore_from(self, filename: str, module_names: Optional[List[str]] = None): try: # Get module. module = self._modules[name] - # Restore module weights - set_state_dict(module, chkpt["modules"][name]) - log_str += " * Module '{}' ({}) params loaded\n".format(module.name, type(module).__name__) + if module.type == ModuleType.trainable: + # Restore module weights + set_state_dict(module, chkpt["modules"][name]) + log_str += " * Module '{}' ({}) params loaded\n".format(module.name, type(module).__name__) except KeyError: log_str += " ! Module '{}' params not found in checkpoint\n".format(name) warning = True @@ -1042,3 +1050,32 @@ def restore_from(self, filename: str, module_names: Optional[List[str]] = None): logging.warning(log_str) else: logging.info(log_str) + + def is_complete(self) -> bool: + """ + Method checks if graph is "complete". In here the "complete" means that the graph has: + * exactly one DataLayer + * zero bound input ports + + In short it means that the graph can be complete. + + Returns: + True or false. + """ + has_datalayer = False + # Iterate through the modules one by one. + for module in self._modules.values(): + # Get module. + if module.type == ModuleType.datalayer: + if has_datalayer: + # More than one DL is not acceptable. + return False + else: + has_datalayer = True + + # Now check the ports. + if len(self._inputs) != 0: + return False + + # Else: + return True diff --git a/nemo/core/neural_modules.py b/nemo/core/neural_modules.py index 163db3ea3513..decb2a0acd35 100644 --- a/nemo/core/neural_modules.py +++ b/nemo/core/neural_modules.py @@ -121,10 +121,10 @@ def __extract_init_params(self) -> Dict[str, Any]: # Get the frame "call context". for frame in stack()[1:]: - # Get the call arguments. + # Get the current call arguments. localvars = getargvalues(frame[0]) - # Fill the parameters with call_args. + # Fill the parameters with call arguments. for key in to_set_params: if key in localvars.args: init_params[key] = localvars.locals[key] @@ -142,7 +142,7 @@ def __extract_init_params(self) -> Dict[str, Any]: if len(to_set_params) != 0: raise ValueError( "Could not collect all the signature params! " - F"Please file a bug on GitHub with the current stacktrace so that it can be resolved." + f"Please file a bug on GitHub with the current stack trace so that it can be reproduced." ) # print("! init_params of {}: {}\n".format(type(self).__name__, init_params)) @@ -228,7 +228,7 @@ def export_to_config(self, config_file: str): YAML.dump(to_export, outfile) logging.info( - "Configuration of module `{}` ({}) exported to {}".format(self.name, type(self).__name__, abs_path_file) + "Configuration of module `{}` ({}) exported to '{}'".format(self.name, type(self).__name__, abs_path_file) ) def serialize(self) -> Dict[str, Any]: diff --git a/nemo/utils/decorators/deprecated.py b/nemo/utils/decorators/deprecated.py index 80e330c4be56..d738c8a18031 100644 --- a/nemo/utils/decorators/deprecated.py +++ b/nemo/utils/decorators/deprecated.py @@ -22,7 +22,7 @@ from nemo.utils import logging -# logging = nemo.logging +# from nemo.utils import logging # Remember which deprecation warnings have been printed already. _PRINTED_WARNING = {} diff --git a/nemo/utils/exp_logging.py b/nemo/utils/exp_logging.py index 7f9a3ad9b0fa..fd3a0540ffe2 100644 --- a/nemo/utils/exp_logging.py +++ b/nemo/utils/exp_logging.py @@ -9,11 +9,11 @@ from nemo.utils.decorators import deprecated -# logging = nemo.logging +# from nemo.utils import logging @deprecated( version=0.11, explanation=( - "Please use nemo.logging instead by using logging = nemo.logging and logging.info(), " + "Please use nemo.logging instead by using from nemo.utils import logging and logging.info(), " "logging.warning() , etc." ), ) diff --git a/nemo/utils/formatters/base.py b/nemo/utils/formatters/base.py index 6b844877b185..12500477b9c8 100644 --- a/nemo/utils/formatters/base.py +++ b/nemo/utils/formatters/base.py @@ -126,3 +126,9 @@ def format(self, record): class BaseNeMoFormatter(BaseFormatter): DEFAULT_FORMAT = "%(color)s[NeMo %(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s" + + +class DebugNeMoFormatter(BaseFormatter): + DEFAULT_FORMAT = ( + "%(color)s[NeMo %(levelname)1.1s %(asctime)s %(module)s:%(lineno)d rank:%(rank)d]%(end_color)s %(message)s" + ) diff --git a/nemo/utils/helpers.py b/nemo/utils/helpers.py index aa2cca686aea..b21b7200b58f 100644 --- a/nemo/utils/helpers.py +++ b/nemo/utils/helpers.py @@ -13,7 +13,7 @@ import nemo from nemo.utils import logging -# logging = nemo.logging +# from nemo.utils import logging def rgetattr(obj, attr, *args): diff --git a/nemo/utils/nemo_logging.py b/nemo/utils/nemo_logging.py index 1551acf84839..8a2bd06040d6 100644 --- a/nemo/utils/nemo_logging.py +++ b/nemo/utils/nemo_logging.py @@ -20,9 +20,9 @@ from contextlib import contextmanager # from nemo.constants import NEMO_ENV_VARNAME_SAVE_LOGS_TO_DIR -from nemo.constants import NEMO_ENV_VARNAME_REDIRECT_LOGS_TO_STDERR +from nemo.constants import NEMO_ENV_VARNAME_REDIRECT_LOGS_TO_STDERR, NEMO_ENV_VARNAME_TESTING from nemo.utils.env_var_parsing import get_envbool, get_envint -from nemo.utils.formatters.base import BaseNeMoFormatter +from nemo.utils.formatters.base import BaseNeMoFormatter, DebugNeMoFormatter from nemo.utils.metaclasses import Singleton __all__ = ["Logger", "LogMode"] @@ -88,7 +88,17 @@ def _define_logger(self): self._logger = _logging.getLogger("nemo_logger") # By default, silence all loggers except the logger for rank 0 self.remove_stream_handlers() - if get_envint("RANK", 0) == 0: + if get_envbool(NEMO_ENV_VARNAME_TESTING, False): + old_factory = _logging.getLogRecordFactory() + + def record_factory(*args, **kwargs): + record = old_factory(*args, **kwargs) + record.rank = get_envint("RANK", 0) + return record + + _logging.setLogRecordFactory(record_factory) + self.add_stream_handlers(formatter=DebugNeMoFormatter) + elif get_envint("RANK", 0) == 0: self.add_stream_handlers() finally: @@ -112,7 +122,7 @@ def remove_stream_handlers(self): except KeyError: pass - def add_stream_handlers(self): + def add_stream_handlers(self, formatter=BaseNeMoFormatter): if self._logger is None: raise RuntimeError("Impossible to set handlers if the Logger is not predefined") @@ -127,8 +137,6 @@ def add_stream_handlers(self): self._handlers["stream_stderr"] = _logging.StreamHandler(sys.stderr) self._handlers["stream_stderr"].addFilter(lambda record: record.levelno > _logging.INFO) - formatter = BaseNeMoFormatter - self._handlers["stream_stdout"].setFormatter(formatter()) self._logger.addHandler(self._handlers["stream_stdout"]) @@ -138,9 +146,9 @@ def add_stream_handlers(self): except KeyError: pass - def reset_stream_handler(self): + def reset_stream_handler(self, formatter=BaseNeMoFormatter): self.remove_stream_handlers() - self.add_stream_handlers() + self.add_stream_handlers(formatter=formatter) def add_file_handler(self, log_file): if self._logger is None: diff --git a/nemo/utils/neural_graph/graph_outputs.py b/nemo/utils/neural_graph/graph_outputs.py index 6c494d986b7f..6f14c6848cb8 100644 --- a/nemo/utils/neural_graph/graph_outputs.py +++ b/nemo/utils/neural_graph/graph_outputs.py @@ -75,12 +75,12 @@ def __init__(self, tensors_ref): # Tensors[step][output_port_name] passed from the external neural graph object. self._tensors_ref = tensors_ref - # This dictionary stores the output tensors collected during the "default" tensor recording. + # This dictionary stores the bound outputs collected during the "default" recording of produced tensors. # As they are using the default port names, the second/next tensor published on the same port # will generate a new unique name following the (step_number.module.port_name) pattern. self._default_outputs = {} - # This dictionary stores list of output tensors of module "manually" indicated by the user. + # This dictionary stores list of outputs of modules "manually" bound by the user. # In this case tring to overwriting the existing ports with new tensors will be forbidden (Exception). self._manual_outputs = {} diff --git a/nemo/utils/neural_graph/neural_graph_manager.py b/nemo/utils/neural_graph/neural_graph_manager.py index b016b57dc3b8..b8b2e1deeb1f 100644 --- a/nemo/utils/neural_graph/neural_graph_manager.py +++ b/nemo/utils/neural_graph/neural_graph_manager.py @@ -45,11 +45,14 @@ def summary(self) -> str: Returns: A summary of the graphs on the list. """ - # TODO: a nicer summary. ;) - desc = "List of graphs:" + # Line "decorator". + summary = "\n" + 113 * '=' + "\n" + summary += "Registry of {}s:\n".format(self._base_type_name) for graph in self: - desc = desc + "`{}`: {}\n".format(graph.name, graph) - return desc + summary += " * {} ({}) [{}]\n".format(graph.name, len(graph), graph.operation_mode) + # Line "decorator". + summary += 113 * '=' + return summary @property def active_graph(self) -> "NeuralGraph": diff --git a/nemo/utils/neural_graph/object_registry.py b/nemo/utils/neural_graph/object_registry.py index 8e861e529944..464cda92f219 100644 --- a/nemo/utils/neural_graph/object_registry.py +++ b/nemo/utils/neural_graph/object_registry.py @@ -137,7 +137,11 @@ def summary(self) -> str: Returns: A summary of the objects on the list. """ - summary = "Registry of {}s:\n".format(self._base_type_name) + # Line "decorator". + summary = "\n" + 113 * '=' + "\n" + summary += "Registry of {}s:\n".format(self._base_type_name) for obj in self: summary += " * {} ({})\n".format(obj.name, type(obj).__name__) + # Line "decorator". + summary += 113 * '=' return summary diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 204ed8dbee7f..5d46fee518a4 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -11,3 +11,4 @@ wget wrapt ruamel.yaml sklearn +scipy diff --git a/requirements/requirements_nlp.txt b/requirements/requirements_nlp.txt index f923e8869ea9..f3cbda69415c 100644 --- a/requirements/requirements_nlp.txt +++ b/requirements/requirements_nlp.txt @@ -8,5 +8,8 @@ unidecode youtokentome numpy tqdm +sklearn +rapidfuzz gdown megatron-lm +inflect diff --git a/requirements/requirements_simple_gan.txt b/requirements/requirements_simple_gan.txt index 8f59cf99bbac..6ccafc3f904b 100644 --- a/requirements/requirements_simple_gan.txt +++ b/requirements/requirements_simple_gan.txt @@ -1,2 +1 @@ matplotlib -torchvision \ No newline at end of file diff --git a/requirements/requirements_tts.txt b/requirements/requirements_tts.txt index 61ff985cc778..3d5ac563c873 100644 --- a/requirements/requirements_tts.txt +++ b/requirements/requirements_tts.txt @@ -1,5 +1,3 @@ -librosa matplotlib pypinyin -scipy -attrdict \ No newline at end of file +attrdict diff --git a/scripts/convert_wav_to_g711wav.py b/scripts/convert_wav_to_g711wav.py new file mode 100644 index 000000000000..f882e5fc64cc --- /dev/null +++ b/scripts/convert_wav_to_g711wav.py @@ -0,0 +1,93 @@ +# Copyright 2020 NVIDIA. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# USAGE: +# python convert_wav_to_g711wav.py \ +# --data_dir= \ +# --dest_dir= +# +# Converts all wav audio files to PCM u-law wav files (8kHz, 8-bit). +# Requires sox to be installed. +import argparse +import concurrent.futures +import glob +import logging +import os +import subprocess + +from tqdm import tqdm + +parser = argparse.ArgumentParser(description='Convert wav audio to pcm mulaw wav') +parser.add_argument( + "--data_dir", default=None, type=str, required=True, help="The path to the input directory with .wav files.", +) +parser.add_argument( + "--dest_dir", default=None, type=str, required=True, help="Path to the destination directory.", +) +args = parser.parse_args() + + +def __convert_audio(in_path, out_path): + """ + Helper function that's called per thread, converts wav to G.711 wav. + Args: + in_path: source wav file to convert + out_path: destination for G.711 wav file + """ + cmd = ["sox", in_path, "-r", "8000", "-c", "1", "-e", "u-law", out_path] + subprocess.run(cmd) + + +def __process_set(data_dir, dst_root): + """ + Finds and converts all wav audio files in the given directory to pcm_mulaw. + Args: + data_dir: source directory with wav files to convert + dst_root: where G.711 (pcm_mulaw) wav files will be stored + """ + wav_list = glob.glob(data_dir) + + if not os.path.exists(dst_root): + os.makedirs(dst_root) + + # Set up and execute concurrent audio conversion + tp = concurrent.futures.ProcessPoolExecutor(max_workers=64) + futures = [] + + for wav_path in tqdm(wav_list, desc="Submitting wav futures", unit="file"): + audio_id = os.path.basename(wav_path) + out_path = os.path.join(dst_root, audio_id) + futures.append(tp.submit(__convert_audio, wav_path, out_path)) + + pbar = tqdm(total=len(wav_list), desc="Converting wav files", unit="file") + count = 0 + for f in concurrent.futures.as_completed(futures): + count += 1 + pbar.update() + tp.shutdown() + pbar.close() + + +def main(): + data_dir = args.data_dir + dest_dir = args.dest_dir + + logging.info("\n\nConverting audio in {}", data_dir) + __process_set( + os.path.join(data_dir, "*.wav",), os.path.join(dest_dir), + ) + + +if __name__ == '__main__': + main() diff --git a/scripts/export_jasper_to_onnx.py b/scripts/export_jasper_to_onnx.py index daa9459394a0..6df997e11c86 100644 --- a/scripts/export_jasper_to_onnx.py +++ b/scripts/export_jasper_to_onnx.py @@ -7,8 +7,7 @@ import nemo import nemo.collections.asr as nemo_asr - -logging = nemo.logging +from nemo.utils import logging def get_parser(): diff --git a/tests/integration/test_asr_gradient_step_and_eval.py b/tests/integration/test_asr_gradient_step_and_eval.py index d68898c076b6..9b883c4dfa4a 100644 --- a/tests/integration/test_asr_gradient_step_and_eval.py +++ b/tests/integration/test_asr_gradient_step_and_eval.py @@ -24,14 +24,13 @@ import pytest from ruamel.yaml import YAML -import nemo import nemo.collections.asr as nemo_asr - -logging = nemo.logging +from nemo.core import EvaluatorCallback, SimpleLossLoggerCallback +from nemo.utils import logging @pytest.mark.usefixtures("neural_factory") -class TestASRPytorch(TestCase): +class TestASRIntegrationPytorch(TestCase): labels = [ " ", "a", @@ -148,7 +147,7 @@ def test_jasper_training(self): ) loss_list = [] - callback = nemo.core.SimpleLossLoggerCallback( + callback = SimpleLossLoggerCallback( tensors=[loss], print_func=partial(self.print_and_log_loss, loss_log_list=loss_list), step_freq=1 ) @@ -200,7 +199,7 @@ def test_quartznet_training(self): ) loss_list = [] - callback = nemo.core.SimpleLossLoggerCallback( + callback = SimpleLossLoggerCallback( tensors=[loss], print_func=partial(self.print_and_log_loss, loss_log_list=loss_list), step_freq=1 ) @@ -258,7 +257,7 @@ def test_contextnet_ctc_training(self): ) loss_list = [] - callback = nemo.core.SimpleLossLoggerCallback( + callback = SimpleLossLoggerCallback( tensors=[loss], print_func=partial(self.print_and_log_loss, loss_log_list=loss_list), step_freq=1 ) @@ -315,7 +314,7 @@ def test_stft_conv_training(self): ) loss_list = [] - callback = nemo.core.SimpleLossLoggerCallback( + callback = SimpleLossLoggerCallback( tensors=[loss], print_func=partial(self.print_and_log_loss, loss_log_list=loss_list), step_freq=1 ) @@ -373,7 +372,7 @@ def test_jasper_evaluation(self): process_evaluation_epoch, ) - eval_callback = nemo.core.EvaluatorCallback( + eval_callback = EvaluatorCallback( eval_tensors=[loss, predictions, transcript, transcript_len], user_iter_callback=lambda x, y: process_evaluation_batch(x, y, labels=self.labels), user_epochs_done_callback=process_evaluation_epoch, diff --git a/tests/integration/test_integration_multidataset.py b/tests/integration/test_integration_multidataset.py index 892d3e08bcb4..4eee92058e8b 100644 --- a/tests/integration/test_integration_multidataset.py +++ b/tests/integration/test_integration_multidataset.py @@ -26,8 +26,7 @@ import nemo from nemo.backends.pytorch.common import DataCombination from nemo.core import ChannelType, NeuralType - -logging = nemo.logging +from nemo.utils import logging @pytest.mark.usefixtures("neural_factory") diff --git a/tests/integration/test_speaker_recognition_gradient_step.py b/tests/integration/test_speaker_recognition_gradient_step.py index ab062ddbad81..cf2535e9c9af 100644 --- a/tests/integration/test_speaker_recognition_gradient_step.py +++ b/tests/integration/test_speaker_recognition_gradient_step.py @@ -25,8 +25,7 @@ import nemo import nemo.collections.asr as nemo_asr - -logging = nemo.logging +from nemo.utils import logging @pytest.mark.usefixtures("neural_factory") diff --git a/tests/integration/test_speechcommands_gradient_step_and_eval.py b/tests/integration/test_speechcommands_gradient_step_and_eval.py index 2f6dcf3b2be2..c997ca98ad94 100644 --- a/tests/integration/test_speechcommands_gradient_step_and_eval.py +++ b/tests/integration/test_speechcommands_gradient_step_and_eval.py @@ -26,8 +26,7 @@ import nemo import nemo.collections.asr as nemo_asr - -logging = nemo.logging +from nemo.utils import logging @pytest.mark.usefixtures("neural_factory") diff --git a/tests/integration/test_tts_gradient_step.py b/tests/integration/test_tts_gradient_step.py index 8ffa18fb6269..8b8e500d81f4 100644 --- a/tests/integration/test_tts_gradient_step.py +++ b/tests/integration/test_tts_gradient_step.py @@ -25,11 +25,11 @@ import numpy as np import pytest -import nemo import nemo.collections.asr as nemo_asr import nemo.collections.tts as nemo_tts - -logging = nemo.logging +from nemo.backends.pytorch.actions import PtActions +from nemo.core import SimpleLossLoggerCallback +from nemo.utils import logging @pytest.mark.usefixtures("neural_factory") @@ -158,11 +158,11 @@ def test_tacotron2_training(self): ) loss_list = [] - callback = nemo.core.SimpleLossLoggerCallback( + callback = SimpleLossLoggerCallback( tensors=[loss_t], print_func=partial(self.print_and_log_loss, loss_log_list=loss_list), step_freq=1 ) # Instantiate an optimizer to perform `train` action - optimizer = nemo.backends.pytorch.actions.PtActions() + optimizer = PtActions() optimizer.train( [loss_t], callbacks=[callback], optimizer="sgd", optimization_params={"max_steps": 3, "lr": 0.01} ) @@ -212,11 +212,11 @@ def test_waveglow_training(self): loss_t = waveglow_loss(z=z, log_s_list=log_s_list, log_det_W_list=log_det_W_list) loss_list = [] - callback = nemo.core.SimpleLossLoggerCallback( + callback = SimpleLossLoggerCallback( tensors=[loss_t], print_func=partial(self.print_and_log_loss, loss_log_list=loss_list), step_freq=1 ) # Instantiate an optimizer to perform `train` action - optimizer = nemo.backends.pytorch.actions.PtActions() + optimizer = PtActions() optimizer.train( [loss_t], callbacks=[callback], optimizer="sgd", optimization_params={"max_steps": 3, "lr": 0.01} ) @@ -314,11 +314,11 @@ def test_fastspeech(self): ) loss_list = [] - callback = nemo.core.SimpleLossLoggerCallback( + callback = SimpleLossLoggerCallback( tensors=[loss_t], print_func=partial(self.print_and_log_loss, loss_log_list=loss_list), step_freq=1 ) # Instantiate an optimizer to perform `train` action - optimizer = nemo.backends.pytorch.actions.PtActions() + optimizer = PtActions() optimizer.train( [loss_t], callbacks=[callback], optimizer="sgd", optimization_params={"max_steps": 3, "lr": 0.0003} ) diff --git a/tests/unit/core/test_weight_share.py b/tests/unit/core/test_weight_share.py index 53c6dad81356..165db51f923b 100644 --- a/tests/unit/core/test_weight_share.py +++ b/tests/unit/core/test_weight_share.py @@ -34,8 +34,7 @@ from nemo.collections.nlp.nm.trainables.common import TokenClassifier from nemo.core import WeightShareTransform from nemo.core.neural_types import * - -logging = nemo.logging +from nemo.utils import logging @pytest.mark.usefixtures("neural_factory") diff --git a/tests/unit/test_unit_asr.py b/tests/unit/test_unit_asr.py index a664ac03fd23..ff6cc6985878 100644 --- a/tests/unit/test_unit_asr.py +++ b/tests/unit/test_unit_asr.py @@ -29,15 +29,13 @@ import nemo.collections.asr as nemo_asr from nemo.collections.asr.parts import AudioDataset, WaveformFeaturizer, collections, parsers from nemo.core import DeviceType - -logging = nemo.logging - +from nemo.utils import logging freq = 16000 @pytest.mark.usefixtures("neural_factory") -class TestASRPytorch(TestCase): +class TestUnitASRPytorch(TestCase): labels = [ " ", "a", diff --git a/tests/unit/test_unit_multidataset.py b/tests/unit/test_unit_multidataset.py index 9d8384df8ac4..1ef74caeadaf 100644 --- a/tests/unit/test_unit_multidataset.py +++ b/tests/unit/test_unit_multidataset.py @@ -26,8 +26,7 @@ import nemo from nemo.backends.pytorch.common import DataCombination from nemo.core import ChannelType, NeuralType - -logging = nemo.logging +from nemo.utils import logging @pytest.mark.usefixtures("neural_factory") diff --git a/tests/unit/test_unit_speech_commands.py b/tests/unit/test_unit_speech_commands.py index d8563ceafd3a..3077c08708b1 100644 --- a/tests/unit/test_unit_speech_commands.py +++ b/tests/unit/test_unit_speech_commands.py @@ -29,9 +29,7 @@ import nemo.collections.asr as nemo_asr from nemo.collections.asr.parts import AudioLabelDataset, WaveformFeaturizer, collections, parsers, perturb from nemo.core import DeviceType - -logging = nemo.logging - +from nemo.utils import logging freq = 16000 diff --git a/tests/unit/utils/test_deprecated.py b/tests/unit/utils/test_deprecated.py index 2ae3e5cb156f..4f1c9490e60f 100644 --- a/tests/unit/utils/test_deprecated.py +++ b/tests/unit/utils/test_deprecated.py @@ -30,7 +30,7 @@ class DeprecatedTest(TestCase): NEMO_ERR_MSG_FORMAT = re.compile( - r"\[NeMo W [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} deprecated:[0-9]*\] " + r"\[NeMo W [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} deprecated:[0-9]+( rank:[0-9]+)?\] " ) @pytest.mark.unit