From 494498e83b072b6190674c2a6aec0383ce5517e8 Mon Sep 17 00:00:00 2001 From: Rahul Huilgol Date: Tue, 13 Aug 2019 23:48:09 -0700 Subject: [PATCH] One Repo to rule them all (#72) * WIP, added tf and core * WIP * add code from all repos, and fix imports * fix more imports, add tests * add docs, examples * fix imports in examples * fix setup.py and CI * fix test invoker * Reload a step directory when it was last seen as empty (#117) * fix imports * fix new imports * unskip test * Add setup.py * undo end of training merge * remove import * Add training end code * add frameworks * fix function used * update setup to use append * fixing small errors (#74) * testing * testing * testing * testing * testing * testing * testing * trigger ci * trigger ci * trigger ci * trigger ci * testing * testing * testing * testing * testing * testing * testing * testing * testing * testing * testing * testing * testing * testing * testing * testing * testing * testing * testing * testing * uploading test reports to s3 * uploading test reports to s3 * uploading test reports to s3 * uploading test reports to s3 * changes * changes * docs * Add subpackages in core * docs and examples * provides trials and rules as part of main namescope * move rules and trials outside * fix training end tests, and update setup.py * new readme for whole repo * fix setup.py * update packages * make the mxnet tests faster * reduce lenght of integration tests * add script to build binaries * update argument * change num steps and frequency * delete path * add boto3 * fix training end tests * changes * move exceptions to its own module * fix links * update version string in setup.py * uncommented test * making the pytorch stuff up to date (#79) * making the pytorch stuff up to date * reverting util.py * fixing the hook imports * fixing test imports * fix increment of step * training_has_ended fix for pytorch (#80) * making the pytorch stuff up to date * Revert "making the pytorch stuff up to date" This reverts commit f87f9560b5351f135553072c495f2123964b9f3c. * changing to training_has_ended --- README.md | 30 +- bin/build_binaries.sh | 10 + config/buildspec.yml | 20 +- config/configure_branch_for_test.txt | 5 - config/get-branch.sh | 100 +- config/protoc_downloader.sh | 3 +- config/tests.sh | 32 +- docs/analysis/README.md | 457 +++++ docs/mxnet/README.md | 631 +++++++ docs/mxnet/api.md | 201 +++ docs/pytorch/README.md | 527 ++++++ docs/pytorch/api.md | 173 ++ docs/tensorflow/README.md | 526 ++++++ docs/tensorflow/api.md | 186 ++ .../notebooks/NNRecipe/NNRecipes.ipynb | 377 ++++ examples/analysis/notebooks/NNRecipe/mnist.py | 127 ++ examples/analysis/scripts/README.md | 28 + examples/analysis/scripts/check_grads.py | 18 + .../analysis/scripts/similar_across_runs.py | 22 + .../analysis/scripts/weight_update_ratio.py | 18 + .../mnist/SimpleInteractiveAnalysis.ipynb | 964 ++++++++++ .../scripts/mnist_gluon_all_zero_demo.py | 151 ++ .../scripts/mnist_gluon_basic_hook_demo.py | 146 ++ .../mnist_gluon_block_input_output_demo.py | 148 ++ .../mnist_gluon_model_input_output_demo.py | 135 ++ .../scripts/mnist_gluon_save_all_demo.py | 123 ++ examples/mxnet/scripts/mnist_gluon_vg_demo.py | 148 ++ .../PyTorch-SimpleInteractiveAnalysis.ipynb | 1567 +++++++++++++++++ .../pytorch/scripts/pytorch_hook_demos.py | 161 ++ examples/pytorch/scripts/simple.py | 107 ++ examples/pytorch/scripts/torch_resnet.py | 98 ++ .../Loss_Accuracy.ipynb | 199 +++ .../sentiment-analysis.ipynb | 974 ++++++++++ .../sentiment_keras.py | 107 ++ .../training_scripts/mnist/README.md | 77 + .../training_scripts/mnist/mnist.py | 123 ++ .../training_scripts/resnet50/README.md | 161 ++ .../resnet50/train_imagenet_resnet_hvd.py | 1211 +++++++++++++ .../training_scripts/simple/README.md | 120 ++ .../training_scripts/simple/simple.py | 71 + setup.py | 107 +- .../tfrecord => tests}/__init__.py | 0 tests/analysis/__init__.py | 0 tests/analysis/config.yaml | 193 ++ tests/analysis/conftest.py | 3 + tests/analysis/exceptions/__init__.py | 0 tests/analysis/exceptions/test_exceptions.py | 65 + tests/analysis/integration_testing_rules.py | 171 ++ tests/analysis/invoker.py | 86 + tests/analysis/rules/__init__.py | 0 tests/analysis/rules/test_invoker.py | 35 + tests/analysis/rules/test_rule_no_refresh.py | 59 + tests/analysis/tensors/__init__.py | 0 tests/analysis/tensors/test_refresh.py | 54 + tests/analysis/trials/__init__.py | 0 tests/analysis/trials/test_create.py | 25 + tests/analysis/trials/test_local.py | 25 + tests/analysis/trials/test_modes.py | 75 + tests/analysis/trials/test_refresh.py | 98 ++ tests/analysis/trials/test_s3.py | 75 + tests/analysis/utils.py | 30 + tests/core/__init__.py | 0 tests/core/test_collections.py | 6 +- tests/core/test_handler.py | 20 +- tests/core/test_index.py | 10 +- tests/core/test_modes.py | 6 +- tests/core/test_numpy.py | 4 +- tests/core/test_reduction_config.py | 2 +- tests/core/test_save_config.py | 2 +- tests/core/test_training_end.py | 11 +- tests/core/test_utils.py | 2 +- tests/mxnet/__init__.py | 0 tests/mxnet/mnist_gluon_model.py | 100 ++ tests/mxnet/test_hook.py | 17 + tests/mxnet/test_hook_all_zero.py | 36 + tests/mxnet/test_hook_custom_collection.py | 17 + tests/mxnet/test_hook_reduce_config.py | 61 + tests/mxnet/test_hook_save_all.py | 15 + tests/mxnet/test_hook_save_config.py | 22 + tests/mxnet/test_modes.py | 22 + tests/mxnet/test_training_end.py | 33 + tests/pytorch/__init__.py | 0 tests/pytorch/test_reduce_config.py | 104 ++ tests/pytorch/test_simple_write.py | 201 +++ tests/tensorflow/__init__.py | 0 tests/tensorflow/hooks/__init__.py | 0 .../tensorflow/hooks/test_estimator_modes.py | 171 ++ tests/tensorflow/hooks/test_reductions.py | 79 + tests/tensorflow/hooks/test_save_all_full.py | 57 + tests/tensorflow/hooks/test_save_config.py | 35 + .../tensorflow/hooks/test_save_reductions.py | 63 + tests/tensorflow/hooks/test_simple_include.py | 110 ++ tests/tensorflow/hooks/test_training_end.py | 20 + .../hooks/test_weights_gradients.py | 29 + tests/tensorflow/hooks/test_when_nan.py | 32 + tests/tensorflow/hooks/test_write.py | 88 + tests/tensorflow/hooks/utils.py | 49 + tests/tensorflow/test_tf_collections.py | 18 + tornasole/__init__.py | 4 + tornasole/analysis/__init__.py | 0 tornasole/analysis/utils.py | 39 + tornasole/core/__init__.py | 0 .../core}/access_layer/__init__.py | 0 .../core}/access_layer/base.py | 4 +- .../core}/access_layer/file.py | 1 + .../core}/access_layer/s3.py | 3 +- .../core}/access_layer/s3handler.py | 2 +- .../core}/access_layer/utils.py | 7 +- tornasole/core/actions/__init__.py | 2 + tornasole/core/actions/action_base.py | 8 + tornasole/core/actions/terminate_smjob.py | 20 + .../core}/collection.py | 0 .../core}/collection_manager.py | 0 .../core}/indexutils.py | 0 {tornasole_core => tornasole/core}/modes.py | 1 + {tornasole_core => tornasole/core}/reader.py | 4 +- .../core}/reduction_config.py | 0 tornasole/core/reductions.py | 32 + .../core}/sagemaker_utils.py | 3 +- .../core}/save_config.py | 0 .../core}/save_manager.py | 0 tornasole/core/tensor.py | 225 +++ .../core}/tfevent/__init__.py | 0 .../core}/tfevent/attr_value.proto | 6 +- .../core}/tfevent/event.proto | 2 +- .../core}/tfevent/event_file_reader.py | 10 +- .../core}/tfevent/event_file_writer.py | 11 +- .../core}/tfevent/graph.proto | 4 +- .../core}/tfevent/index_file_writer.py | 6 +- .../core}/tfevent/node_def.proto | 2 +- .../core}/tfevent/resource_handle.proto | 0 .../core}/tfevent/summary.proto | 2 +- .../core}/tfevent/summary_to_event.txt | 0 .../core}/tfevent/tensor.proto | 6 +- .../core}/tfevent/tensor_shape.proto | 0 .../core}/tfevent/types.proto | 0 .../core}/tfevent/util.py | 4 +- .../core}/tfevent/versions.proto | 0 tornasole/core/tfrecord/__init__.py | 0 .../core}/tfrecord/_crc32c.py | 0 .../core}/tfrecord/record_reader.py | 8 +- .../core}/tfrecord/record_writer.py | 7 +- .../core}/tfrecord/tensor_reader.py | 12 +- {tornasole_core => tornasole/core}/utils.py | 12 +- {tornasole_core => tornasole/core}/writer.py | 5 +- tornasole/exceptions.py | 54 + tornasole/mxnet/__init__.py | 5 + tornasole/mxnet/hook.py | 264 +++ tornasole/mxnet/mxnet_collection.py | 85 + tornasole/mxnet/util.py | 50 + tornasole/pytorch/__init__.py | 8 + tornasole/pytorch/hook.py | 273 +++ tornasole/pytorch/setup.py | 24 + tornasole/pytorch/torch_collection.py | 90 + tornasole/pytorch/util.py | 44 + tornasole/rules/__init__.py | 2 + tornasole/rules/rule.py | 111 ++ tornasole/rules/rule_invoker.py | 66 + tornasole/tensorflow/__init__.py | 10 + tornasole/tensorflow/collection.py | 153 ++ tornasole/tensorflow/hook.py | 374 ++++ tornasole/tensorflow/keras.py | 152 ++ tornasole/tensorflow/optimizer.py | 13 + tornasole/tensorflow/reductions.py | 37 + tornasole/tensorflow/save_manager.py | 45 + tornasole/tensorflow/utils.py | 51 + tornasole/trials/__init__.py | 4 + tornasole/trials/local_trial.py | 138 ++ tornasole/trials/s3_trial.py | 146 ++ tornasole/trials/trial.py | 263 +++ tornasole/trials/trial_catalog.py | 45 + tornasole/trials/utils.py | 15 + tornasole_core/__init__.py | 4 - 173 files changed, 15536 insertions(+), 267 deletions(-) create mode 100644 bin/build_binaries.sh delete mode 100755 config/configure_branch_for_test.txt create mode 100644 docs/analysis/README.md create mode 100644 docs/mxnet/README.md create mode 100644 docs/mxnet/api.md create mode 100644 docs/pytorch/README.md create mode 100644 docs/pytorch/api.md create mode 100644 docs/tensorflow/README.md create mode 100644 docs/tensorflow/api.md create mode 100644 examples/analysis/notebooks/NNRecipe/NNRecipes.ipynb create mode 100644 examples/analysis/notebooks/NNRecipe/mnist.py create mode 100644 examples/analysis/scripts/README.md create mode 100644 examples/analysis/scripts/check_grads.py create mode 100644 examples/analysis/scripts/similar_across_runs.py create mode 100644 examples/analysis/scripts/weight_update_ratio.py create mode 100644 examples/mxnet/notebook/mnist/SimpleInteractiveAnalysis.ipynb create mode 100644 examples/mxnet/scripts/mnist_gluon_all_zero_demo.py create mode 100644 examples/mxnet/scripts/mnist_gluon_basic_hook_demo.py create mode 100644 examples/mxnet/scripts/mnist_gluon_block_input_output_demo.py create mode 100644 examples/mxnet/scripts/mnist_gluon_model_input_output_demo.py create mode 100644 examples/mxnet/scripts/mnist_gluon_save_all_demo.py create mode 100644 examples/mxnet/scripts/mnist_gluon_vg_demo.py create mode 100644 examples/pytorch/notebooks/PyTorch-SimpleInteractiveAnalysis.ipynb create mode 100644 examples/pytorch/scripts/pytorch_hook_demos.py create mode 100644 examples/pytorch/scripts/simple.py create mode 100644 examples/pytorch/scripts/torch_resnet.py create mode 100644 examples/tensorflow/notebooks/tf-sentiment-script-mode/Loss_Accuracy.ipynb create mode 100644 examples/tensorflow/notebooks/tf-sentiment-script-mode/sentiment-analysis.ipynb create mode 100644 examples/tensorflow/notebooks/tf-sentiment-script-mode/sentiment_keras.py create mode 100644 examples/tensorflow/training_scripts/mnist/README.md create mode 100644 examples/tensorflow/training_scripts/mnist/mnist.py create mode 100644 examples/tensorflow/training_scripts/resnet50/README.md create mode 100644 examples/tensorflow/training_scripts/resnet50/train_imagenet_resnet_hvd.py create mode 100644 examples/tensorflow/training_scripts/simple/README.md create mode 100644 examples/tensorflow/training_scripts/simple/simple.py rename {tornasole_core/tfrecord => tests}/__init__.py (100%) create mode 100644 tests/analysis/__init__.py create mode 100644 tests/analysis/config.yaml create mode 100644 tests/analysis/conftest.py create mode 100644 tests/analysis/exceptions/__init__.py create mode 100644 tests/analysis/exceptions/test_exceptions.py create mode 100644 tests/analysis/integration_testing_rules.py create mode 100644 tests/analysis/invoker.py create mode 100644 tests/analysis/rules/__init__.py create mode 100644 tests/analysis/rules/test_invoker.py create mode 100644 tests/analysis/rules/test_rule_no_refresh.py create mode 100644 tests/analysis/tensors/__init__.py create mode 100644 tests/analysis/tensors/test_refresh.py create mode 100644 tests/analysis/trials/__init__.py create mode 100644 tests/analysis/trials/test_create.py create mode 100644 tests/analysis/trials/test_local.py create mode 100644 tests/analysis/trials/test_modes.py create mode 100644 tests/analysis/trials/test_refresh.py create mode 100644 tests/analysis/trials/test_s3.py create mode 100644 tests/analysis/utils.py create mode 100644 tests/core/__init__.py create mode 100644 tests/mxnet/__init__.py create mode 100644 tests/mxnet/mnist_gluon_model.py create mode 100644 tests/mxnet/test_hook.py create mode 100644 tests/mxnet/test_hook_all_zero.py create mode 100644 tests/mxnet/test_hook_custom_collection.py create mode 100644 tests/mxnet/test_hook_reduce_config.py create mode 100644 tests/mxnet/test_hook_save_all.py create mode 100644 tests/mxnet/test_hook_save_config.py create mode 100644 tests/mxnet/test_modes.py create mode 100644 tests/mxnet/test_training_end.py create mode 100644 tests/pytorch/__init__.py create mode 100644 tests/pytorch/test_reduce_config.py create mode 100644 tests/pytorch/test_simple_write.py create mode 100644 tests/tensorflow/__init__.py create mode 100644 tests/tensorflow/hooks/__init__.py create mode 100644 tests/tensorflow/hooks/test_estimator_modes.py create mode 100644 tests/tensorflow/hooks/test_reductions.py create mode 100644 tests/tensorflow/hooks/test_save_all_full.py create mode 100644 tests/tensorflow/hooks/test_save_config.py create mode 100644 tests/tensorflow/hooks/test_save_reductions.py create mode 100644 tests/tensorflow/hooks/test_simple_include.py create mode 100644 tests/tensorflow/hooks/test_training_end.py create mode 100644 tests/tensorflow/hooks/test_weights_gradients.py create mode 100644 tests/tensorflow/hooks/test_when_nan.py create mode 100644 tests/tensorflow/hooks/test_write.py create mode 100644 tests/tensorflow/hooks/utils.py create mode 100644 tests/tensorflow/test_tf_collections.py create mode 100644 tornasole/__init__.py create mode 100644 tornasole/analysis/__init__.py create mode 100644 tornasole/analysis/utils.py create mode 100644 tornasole/core/__init__.py rename {tornasole_core => tornasole/core}/access_layer/__init__.py (100%) rename {tornasole_core => tornasole/core}/access_layer/base.py (87%) rename {tornasole_core => tornasole/core}/access_layer/file.py (99%) rename {tornasole_core => tornasole/core}/access_layer/s3.py (97%) rename {tornasole_core => tornasole/core}/access_layer/s3handler.py (99%) rename {tornasole_core => tornasole/core}/access_layer/utils.py (91%) create mode 100644 tornasole/core/actions/__init__.py create mode 100644 tornasole/core/actions/action_base.py create mode 100644 tornasole/core/actions/terminate_smjob.py rename {tornasole_core => tornasole/core}/collection.py (100%) rename {tornasole_core => tornasole/core}/collection_manager.py (100%) rename {tornasole_core => tornasole/core}/indexutils.py (100%) rename {tornasole_core => tornasole/core}/modes.py (99%) rename {tornasole_core => tornasole/core}/reader.py (96%) rename {tornasole_core => tornasole/core}/reduction_config.py (100%) create mode 100644 tornasole/core/reductions.py rename {tornasole_core => tornasole/core}/sagemaker_utils.py (98%) rename {tornasole_core => tornasole/core}/save_config.py (100%) rename {tornasole_core => tornasole/core}/save_manager.py (100%) create mode 100644 tornasole/core/tensor.py rename {tornasole_core => tornasole/core}/tfevent/__init__.py (100%) rename {tornasole_core => tornasole/core}/tfevent/attr_value.proto (94%) rename {tornasole_core => tornasole/core}/tfevent/event.proto (97%) rename {tornasole_core => tornasole/core}/tfevent/event_file_reader.py (95%) rename {tornasole_core => tornasole/core}/tfevent/event_file_writer.py (97%) rename {tornasole_core => tornasole/core}/tfevent/graph.proto (95%) rename {tornasole_core => tornasole/core}/tfevent/index_file_writer.py (89%) rename {tornasole_core => tornasole/core}/tfevent/node_def.proto (98%) rename {tornasole_core => tornasole/core}/tfevent/resource_handle.proto (100%) rename {tornasole_core => tornasole/core}/tfevent/summary.proto (98%) rename {tornasole_core => tornasole/core}/tfevent/summary_to_event.txt (100%) rename {tornasole_core => tornasole/core}/tfevent/tensor.proto (94%) rename {tornasole_core => tornasole/core}/tfevent/tensor_shape.proto (100%) rename {tornasole_core => tornasole/core}/tfevent/types.proto (100%) rename {tornasole_core => tornasole/core}/tfevent/util.py (96%) rename {tornasole_core => tornasole/core}/tfevent/versions.proto (100%) create mode 100644 tornasole/core/tfrecord/__init__.py rename {tornasole_core => tornasole/core}/tfrecord/_crc32c.py (100%) rename {tornasole_core => tornasole/core}/tfrecord/record_reader.py (94%) rename {tornasole_core => tornasole/core}/tfrecord/record_writer.py (95%) rename {tornasole_core => tornasole/core}/tfrecord/tensor_reader.py (86%) rename {tornasole_core => tornasole/core}/utils.py (92%) rename {tornasole_core => tornasole/core}/writer.py (96%) create mode 100644 tornasole/exceptions.py create mode 100644 tornasole/mxnet/__init__.py create mode 100644 tornasole/mxnet/hook.py create mode 100644 tornasole/mxnet/mxnet_collection.py create mode 100644 tornasole/mxnet/util.py create mode 100644 tornasole/pytorch/__init__.py create mode 100644 tornasole/pytorch/hook.py create mode 100644 tornasole/pytorch/setup.py create mode 100644 tornasole/pytorch/torch_collection.py create mode 100644 tornasole/pytorch/util.py create mode 100644 tornasole/rules/__init__.py create mode 100644 tornasole/rules/rule.py create mode 100644 tornasole/rules/rule_invoker.py create mode 100644 tornasole/tensorflow/__init__.py create mode 100644 tornasole/tensorflow/collection.py create mode 100644 tornasole/tensorflow/hook.py create mode 100644 tornasole/tensorflow/keras.py create mode 100644 tornasole/tensorflow/optimizer.py create mode 100644 tornasole/tensorflow/reductions.py create mode 100644 tornasole/tensorflow/save_manager.py create mode 100644 tornasole/tensorflow/utils.py create mode 100644 tornasole/trials/__init__.py create mode 100644 tornasole/trials/local_trial.py create mode 100644 tornasole/trials/s3_trial.py create mode 100644 tornasole/trials/trial.py create mode 100644 tornasole/trials/trial_catalog.py create mode 100644 tornasole/trials/utils.py delete mode 100644 tornasole_core/__init__.py diff --git a/README.md b/README.md index d494e63bb8..e261b082e8 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,31 @@ -## Tornasole_core +## Tornasole -This repo is a part of code base of Tornasole: debugger and profile for Deep Learning training jobs and online/offline inference workloads. +Tornasole is an upcoming AWS service designed to be a debugger +for machine learning models. It lets you go beyond just looking +at scalars like losses and accuracies during training and +gives you full visibility into all tensors 'flowing through the graph' +during training or inference. -## License +Using Tornasole is a two step process: + +### Saving tensors + +This needs the `tornasole` package built for the appropriate framework. +It allows you to collect the tensors you want at the frequency +that you want, and save them for analysis. +Please follow the appropriate Readme page to install the correct version. + + +#### [Tornasole TensorFlow](docs/tensorflow/README.md) +#### [Tornasole MXNet](docs/mxnet/README.md) +#### [Tornasole PyTorch](docs/pytorch/README.md) +### Analysis +Please refer **[this page](docs/analysis/README.md)** for more details about how to analyze. +The analysis of these tensors can be done on a separate machine in parallel with the training job. + +## ContactUs +We would like to hear from you. If you have any question or feedback, please reach out to us tornasole-users@amazon.com + +## License This library is licensed under the Apache 2.0 License. diff --git a/bin/build_binaries.sh b/bin/build_binaries.sh new file mode 100644 index 0000000000..148825a3dc --- /dev/null +++ b/bin/build_binaries.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +VERSION = '0.2.1' + +for FRAMEWORK in tensorflow mxnet pytorch +do + CAPITALIZED_FRAMEWORK=`echo "$FRAMEWORK" | tr '[a-z]' '[A-Z]'` + TORNASOLE_WITH_$CAPITALIZED_FRAMEWORK=1 python setup.py bdist_wheel --universal + # aws s3 cp dist/tornasole-$VERSION-py2.py3-none-any.whl s3://tornasole-binaries-use1/tornasole_$FRAMEWORK/py3/ +done \ No newline at end of file diff --git a/config/buildspec.yml b/config/buildspec.yml index 3d04a42bec..6a622443b3 100755 --- a/config/buildspec.yml +++ b/config/buildspec.yml @@ -4,14 +4,13 @@ version: 0.2 phases: install: commands: - - . config/get-branch.sh #EXPORTS BRANCHES FOR OTHER REPOS AND CURRENT REPO. + - . config/get-branch.sh #don't need this - su && apt-get update - apt-get install sudo - sudo apt-get update - sudo apt-get install unzip - cd $CODEBUILD_SRC_DIR && chmod +x config/protoc_downloader.sh && ./config/protoc_downloader.sh - - pip install pytest - - pip install wheel + - pip install pytest wheel pyYaml pytest-html tensorflow mxnet torch - pip uninstall -y boto3 && pip uninstall -y aiobotocore && pip uninstall -y botocore pre_build: @@ -20,18 +19,11 @@ phases: build: commands: - cd $CODEBUILD_SRC_DIR && python setup.py bdist_wheel --universal && pip install dist/*.whl && cd .. - - if [ $framework = "tensorflow" ] ; then cd $CODEBUILD_SRC_DIR_tornasole_tf && git checkout $TF_BRANCH && python setup.py bdist_wheel --universal && pip install dist/*.whl && cd .. ; fi - - if [ $framework = "mxnet" ] ; then cd $CODEBUILD_SRC_DIR_tornasole_mxnet && git checkout $MXNET_BRANCH && python setup.py bdist_wheel --universal && pip install dist/*.whl && cd .. ; fi - - if [ $framework = "pytorch" ] ; then cd $CODEBUILD_SRC_DIR_tornasole_tf && git checkout $TF_BRANCH && cd tornasole_pytorch && python setup.py bdist_wheel --universal && pip install dist/*.whl && cd .. ; fi - - cd $CODEBUILD_SRC_DIR_tornasole_rules && git checkout $RULES_BRANCH && python setup.py bdist_wheel --universal && pip install dist/*.whl && cd .. - - cd $CODEBUILD_SRC_DIR && chmod +x config/tests.sh && ./config/tests.sh && mkdir -p $CURRENT_COMMIT_PATH && cp ./dist/*.whl $CURRENT_COMMIT_PATH && cd .. - - cd $CODEBUILD_SRC_DIR_tornasole_rules && git checkout $RULES_BRANCH && chmod +x config/tests.sh && ./config/tests.sh && mkdir -p $RULES_PATH && cp ./dist/*.whl $RULES_PATH && cd .. - - if [ $framework = "tensorflow" ] ; then cd $CODEBUILD_SRC_DIR_tornasole_tf && git checkout $TF_BRANCH && chmod +x config/tests.sh && ./config/tests.sh && mkdir -p $TF_PATH && cp ./dist/*.whl $TF_PATH && cd .. ; fi - - if [ $framework = "mxnet" ] ; then cd $CODEBUILD_SRC_DIR_tornasole_mxnet && git checkout $MXNET_BRANCH && chmod +x config/tests.sh && ./config/tests.sh && mkdir -p $MXNET_PATH && cp ./dist/*.whl $MXNET_PATH && cd .. ; fi - - if [ $framework = "pytorch" ] ; then cd $CODEBUILD_SRC_DIR_tornasole_tf && git checkout $TF_BRANCH && cd tornasole_pytorch && chmod +x config/tests.sh && ./config/tests.sh && mkdir -p $TF_PATH && cp ./dist/*.whl $TF_PATH && cd .. ; fi - - if [ "$CODEBUILD_GIT_BRANCH" = "master" ] && [ "$CODEBUILD_WEBHOOK_EVENT" = "PUSH" ] ; then aws s3 cp $CODEBUILD_SRC_DIR/wheels s3://tornasolecodebuildtest/ --recursive ; fi - + - cd $CODEBUILD_SRC_DIR && chmod +x config/tests.sh && ./config/tests.sh && mkdir -p upload/$CURRENT_COMMIT_PATH/wheels && cp ./dist/*.whl upload/$CURRENT_COMMIT_PATH/wheels && cd .. + - aws s3 cp $CODEBUILD_SRC_DIR/upload s3://tornasolecodebuildtest/ --recursive +#if [ "$CODEBUILD_GIT_BRANCH" = "master" ] && [ "$CODEBUILD_WEBHOOK_EVENT" = "PUSH" ] ; then post_build: commands: - if [ "$CODEBUILD_BUILD_SUCCEEDING" -eq 0 ]; then echo "ERROR BUILD FAILED , ACCESS BUILD LOGS THROUGH GITHUB OR TROUGH THE LINK:$CODEBUILD_BUILD_URL" ; fi - if [ "$CODEBUILD_BUILD_SUCCEEDING" -eq 1 ]; then echo "INFO BUILD SUCCEEDED !!! , ACCESS BUILD LOGS THROUGH GITHUB OR TROUGH THE LINK:$CODEBUILD_BUILD_URL" ; fi + diff --git a/config/configure_branch_for_test.txt b/config/configure_branch_for_test.txt deleted file mode 100755 index 8faef499d2..0000000000 --- a/config/configure_branch_for_test.txt +++ /dev/null @@ -1,5 +0,0 @@ -tornasole_core:default -tornasole_tf:default -tornasole_mxnet:default -tornasole_rules:default -tornasole_pytorch:default diff --git a/config/get-branch.sh b/config/get-branch.sh index ce7f8481e1..8ed0977c42 100755 --- a/config/get-branch.sh +++ b/config/get-branch.sh @@ -1,114 +1,16 @@ -#$CODEBUILD_WEBHOOK_BASE_REF IS DESTINATION BRANCH FOR PR. -#$CODEBUILD_GIT_BRANCH IS CURRENT BRANCH FOR THE REPO WHICH TRIGGERED BUILD. -core_repo="tornasole_core" -rules_repo="tornasole_rules" -tf_repo="tornasole_tf" -mxnet_repo="tornasole_mxnet" - - - -if [ -z "${CODEBUILD_BUILD_IMAGE##*tensorflow*}" ] ; then export framework="tensorflow"; -elif [ -z "${CODEBUILD_BUILD_IMAGE##*mxnet*}" ] ; then export framework="mxnet"; -elif [ -z "${CODEBUILD_BUILD_IMAGE##*pytorch*}" ] ; then export framework="pytorch"; -fi - export CODEBUILD_GIT_BRANCH="$(git symbolic-ref HEAD --short 2>/dev/null)" if [ "$CODEBUILD_GIT_BRANCH" = "" ] ; then CODEBUILD_GIT_BRANCH="$(git branch -a --contains HEAD | sed -n 2p | awk '{ printf $1 }')"; export CODEBUILD_GIT_BRANCH=${CODEBUILD_GIT_BRANCH#remotes/origin/}; fi -SUBSTRING=$(echo $CODEBUILD_WEBHOOK_BASE_REF| cut -d'/' -f 3) -BRANCH='' -if [ "$CODEBUILD_WEBHOOK_EVENT" = "PULL_REQUEST_CREATED" ] || [ "$CODEBUILD_WEBHOOK_EVENT" = "PULL_REQUEST_REOPENED" ] || [ "$CODEBUILD_WEBHOOK_EVENT" = "PULL_REQUEST_UPDATED" ] || [ "$CODEBUILD_WEBHOOK_EVENT" = "PULL_REQUEST_MERGED" ] && [ "$CODEBUILD_WEBHOOK_EVENT" != "PUSH" ]; then - BRANCH=$SUBSTRING - -elif [ "$CODEBUILD_WEBHOOK_EVENT" != "PULL_REQUEST_CREATED" ] && [ "$CODEBUILD_WEBHOOK_EVENT" != "PULL_REQUEST_REOPENED" ] && [ "$CODEBUILD_WEBHOOK_EVENT" != "PULL_REQUEST_UPDATED" ] && [ "$CODEBUILD_WEBHOOK_EVENT" != "PULL_REQUEST_MERGED" ] && [ "$CODEBUILD_GIT_BRANCH" != "alpha" ] && [ "$CODEBUILD_GIT_BRANCH" != "master" ] ; then - cd $CODEBUILD_SRC_DIR && git checkout $CODEBUILD_GIT_BRANCH - if [ $(git merge-base --is-ancestor $CODEBUILD_GIT_BRANCH "alpha" ; echo $?) -eq 1 ]; then - BRANCH='alpha' - - elif [ $(git merge-base --is-ancestor $CODEBUILD_GIT_BRANCH "alpha" ; echo $?) -eq 0 ]; then - BRANCH='master' - - fi - cd .. - -else BRANCH=$CODEBUILD_GIT_BRANCH -fi - -TF_BRANCH=$BRANCH ; -CORE_BRANCH=$BRANCH ; -RULES_BRANCH=$BRANCH ; -MXNET_BRANCH=$BRANCH ; - - -if [ "$CODEBUILD_GIT_BRANCH" != "alpha" ] && [ "$CODEBUILD_GIT_BRANCH" != "master" ] && [ "$CODEBUILD_WEBHOOK_EVENT" != "PUSH" ] ; then - file="config/configure_branch_for_test.txt" - while IFS=: read -r repo_name default_or_branchname - do - if [ "$repo_name" = "$tf_repo" ] && [ "$default_or_branchname" != "default" ]; then - TF_BRANCH=$default_or_branchname - elif [ "$repo_name" = "$mxnet_repo" ] && [ "$default_or_branchname" != "default" ] ; then - MXNET_BRANCH=$default_or_branchname - elif [ "$repo_name" = "$rules_repo" ] && [ "$default_or_branchname" != "default" ] ; then - RULES_BRANCH=$default_or_branchname - elif [ "$repo_name" = "$core_repo" ] && [ "$default_or_branchname" != "default" ] ; then - CORE_BRANCH=$default_or_branchname - fi - - done <"$file" -fi cd $CODEBUILD_SRC_DIR && git checkout $CODEBUILD_GIT_BRANCH export CURRENT_COMMIT_HASH=$(git log -1 --pretty=%h); export CURRENT_COMMIT_DATE="$(git show -s --format=%ci | cut -d' ' -f 1)_$(git show -s --format=%ci | cut -d' ' -f 2)"; export CURRENT_REPO_NAME=$(basename `git rev-parse --show-toplevel`) ; -export CURRENT_COMMIT_PATH="$CODEBUILD_SRC_DIR/wheels/$CURRENT_COMMIT_DATE/$CURRENT_REPO_NAME/$CURRENT_COMMIT_HASH" +export CURRENT_COMMIT_PATH="$CURRENT_COMMIT_DATE/$CURRENT_REPO_NAME/$CURRENT_COMMIT_HASH" cd .. -if [ "$CURRENT_REPO_NAME" != "$core_repo" ]; then - cd $CODEBUILD_SRC_DIR_tornasole_core && git checkout $CORE_BRANCH - export CORE_REPO_NAME=$(basename `git rev-parse --show-toplevel`) ; - export CORE_COMMIT_HASH=$(git log -1 --pretty=%h); - export CORE_COMMIT_DATE="$(git show -s --format=%ci | cut -d' ' -f 1)_$(git show -s --format=%ci | cut -d' ' -f 2)"; - export CORE_PATH="$CODEBUILD_SRC_DIR/wheels/$CORE_COMMIT_DATE/$CORE_REPO_NAME/$CORE_COMMIT_HASH" - cd .. -fi - -if [ "$CURRENT_REPO_NAME" != "$rules_repo" ]; then - cd $CODEBUILD_SRC_DIR_tornasole_rules && git checkout $RULES_BRANCH - export RULES_REPO_NAME=$(basename `git rev-parse --show-toplevel`) ; - export RULES_COMMIT_HASH=$(git log -1 --pretty=%h); - export RULES_COMMIT_DATE="$(git show -s --format=%ci | cut -d' ' -f 1)_$(git show -s --format=%ci | cut -d' ' -f 2)"; - export RULES_PATH="$CODEBUILD_SRC_DIR/wheels/$RULES_COMMIT_DATE/$RULES_REPO_NAME/$RULES_COMMIT_HASH" - cd .. -fi - -if [ "$CURRENT_REPO_NAME" != "$mxnet_repo" ]; then - cd $CODEBUILD_SRC_DIR_tornasole_mxnet && git checkout $MXNET_BRANCH - export MXNET_REPO_NAME=$(basename `git rev-parse --show-toplevel`) ; - export MXNET_COMMIT_HASH=$(git log -1 --pretty=%h); - export MXNET_COMMIT_DATE="$(git show -s --format=%ci | cut -d' ' -f 1)_$(git show -s --format=%ci | cut -d' ' -f 2)"; - export MXNET_PATH="$CODEBUILD_SRC_DIR/wheels/$MXNET_COMMIT_DATE/$MXNET_REPO_NAME/$MXNET_COMMIT_HASH" - cd .. -fi - -if [ "$CURRENT_REPO_NAME" != "$tf_repo" ]; then - cd $CODEBUILD_SRC_DIR_tornasole_tf && git checkout $TF_BRANCH - export TF_REPO_NAME=$(basename `git rev-parse --show-toplevel`) ; - export TF_COMMIT_HASH=$(git log -1 --pretty=%h); - export TF_COMMIT_DATE="$(git show -s --format=%ci | cut -d' ' -f 1)_$(git show -s --format=%ci | cut -d' ' -f 2)"; - export TF_PATH="$CODEBUILD_SRC_DIR/wheels/$TF_COMMIT_DATE/$TF_REPO_NAME/$TF_COMMIT_HASH" - cd .. -fi - -export TF_BRANCH ; -export CORE_BRANCH ; -export RULES_BRANCH ; -export MXNET_BRANCH ; - - - export CODEBUILD_ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' --output text) export CODEBUILD_PROJECT=${CODEBUILD_BUILD_ID%:$CODEBUILD_LOG_PATH} diff --git a/config/protoc_downloader.sh b/config/protoc_downloader.sh index 2fd904af3f..61d39db131 100644 --- a/config/protoc_downloader.sh +++ b/config/protoc_downloader.sh @@ -5,5 +5,4 @@ PROTOC_ZIP=protoc-3.7.1-linux-x86_64.zip curl -OL https://github.com/google/protobuf/releases/download/v3.7.1/$PROTOC_ZIP unzip -o $PROTOC_ZIP -d /usr/local bin/protoc unzip -o $PROTOC_ZIP -d /usr/local include/* -rm -f $PROTOC_ZIP - +rm -f $PROTOC_ZIP \ No newline at end of file diff --git a/config/tests.sh b/config/tests.sh index 6f03f2d52c..c81e18f6ab 100644 --- a/config/tests.sh +++ b/config/tests.sh @@ -1,28 +1,8 @@ -if [ -z "$framework" ] - then - echo "framework is not mentioned" - exit 1 -fi +#!/usr/bin/env bash -if [ "$framework" = "tensorflow" ] - then - echo "Launching testing job using $framework framework" +#export TORNASOLE_LOG_LEVEL=debug +TORNASOLE_LOG_LEVEL=debug python -m pytest --html=upload/$CURRENT_COMMIT_PATH/reports/report.html --self-contained-html tests/ +TORNASOLE_LOG_LEVEL=debug python -m pytest --html=upload/$CURRENT_COMMIT_PATH/reports/test_rules_tensorflow.html --self-contained-html -s tests/analysis/integration_testing_rules.py::test_test_rules --mode tensorflow --path_to_config ./tests/analysis/config.yaml +TORNASOLE_LOG_LEVEL=debug python -m pytest --html=upload/$CURRENT_COMMIT_PATH/reports/test_rules_mxnet.html --self-contained-html -s tests/analysis/integration_testing_rules.py::test_test_rules --mode mxnet --path_to_config ./tests/analysis/config.yaml +TORNASOLE_LOG_LEVEL=debug python -m pytest --html=upload/$CURRENT_COMMIT_PATH/reports/test_rules_pytorch.html --self-contained-html -s tests/analysis/integration_testing_rules.py::test_test_rules --mode pytorch --path_to_config ./tests/analysis/config.yaml - -elif [ "$framework" = "mxnet" ] - then - echo "Launching testing job using $framework framework" - -elif [ "$framework" = "pytorch" ] - then - echo "Launching testing job using $framework framework" - - -else - echo "$framework framework not supported!!!" - exit 1 - -fi - -export TORNASOLE_LOG_LEVEL=debug -python -m pytest tests/ diff --git a/docs/analysis/README.md b/docs/analysis/README.md new file mode 100644 index 0000000000..eedc02bdd3 --- /dev/null +++ b/docs/analysis/README.md @@ -0,0 +1,457 @@ +# Tornasole Analysis +Tornasole is an upcoming AWS service designed to be a debugger for machine learning models. +It lets you go beyond just looking at scalars like losses and accuracies during training and gives you +full visibility into all tensors 'flowing through the graph' during training or inference. + +Tornasole's analysis module helps you analyze tensors saved from machine learning jobs. +It allows you to run Rules on these tensors as well as anything else you might want to do with +access to raw tensors such as inspection or visualization. It provides access to the tensors in the form of numpy arrays. + +## The Programming Model +The library is organized using the following constructs. + +### Trial +Trial the construct which lets you query for tensors for a given Tornasole run, specified by the path in which Tornasole artifacts are being saved or were saved. +You can pass a path which holds data for a past run (which has ended) as well as a path for a current run (to which tensors are being written). +Trial is capable of loading new tensors as and when they become available at the given location. + +There are two types of trials you can create: LocalTrial or S3Trial. +We provide a wrapper method to create the appropriate trial. + +The parameters you have to provide are: +- `name`: name can be any string. It is to help you manage different trials. +Make sure to give it a unique name to prevent confusion. +- `path`: path can be a local path or an S3 path of the form `s3://bucket/prefix`. This path should be where Tornasole hooks (TF or MXNet) save data to. +You should see the directory `events` and the file `collections.ts` in this path. + +##### Creating local trial +``` +from tornasole.trials import create_trial +trial = create_trial(path='/home/ubuntu/tornasole_outputs/train', + name='resnet_training_run') +``` +##### Creating S3 trial +``` +from tornasole.trials import create_trial +trial = create_trial(path='s3://tornasole-testing-bucket/outputs/resnet', + name='resnet_training_run') +``` +###### Restricting analysis to a range of steps +To any of these methods you can optionally pass `range_steps` to restrict your analysis to a certain range of steps. +Note that if you do so, Trial will not load data from other steps. + +*Examples* +- `range_steps=(100, None)`: This will load all steps after 100 +- `range_steps=(None, 100)`: This will load all steps before 100 +- `range_steps=(100, 200)` : This will load steps between 100 and 200 +- `range_steps=None`: This will load all steps + +``` +lt = create_trial(path='ts_outputs/resnet', name='resnet_training', + range_steps=(100, 200)) +``` + + +### Mode +A machine learning job can be executing steps in multiple modes, such as training, evaluating, or predicting. +Tornasole provides you the construct of a `mode` to keep data from these modes separate +and make it easy for analysis. To leverage this functionality you have to +call the `set_mode` function of hook such as the following call `hook.set_mode(modes.TRAIN)`. +The different modes available are `modes.TRAIN`, `modes.EVAL` and `modes.PREDICT`. + +When you set a mode, steps in that mode have a sequence. We refer to these numbers +as `mode_step`. Each `mode_step` has a global step number associated with it, which represents the +sequence of steps across all modes executed by the job. + +For example, your job executes 10 steps, out of which the first 4 are training steps, 5th is evaluation step, 6-9 are training steps, and 10th is evaluation step. +Please note that indexing starts from 0. +In such a case, when you query for the global steps as below: +``` +trial.available_steps() +``` +you will see `[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]`. + +If you query for training steps as below: +``` +from tornasole_rules import modes +trial.available_steps(modes.TRAIN) +``` + you will see `[0, 1, 2, 3, 4, 5, 6, 7, 8]` because there were 8 training step. +The training step with mode_step 4 here refers to the global step number 5. +You can query this as follows: +``` +trial.global_step(mode=modes.TRAIN, mode_step=4) +``` + +If you did not explicitly set a mode during the running of the job, +the steps are global steps, and are in the `modes.GLOBAL` mode. +In such a case, `global_step` is the same as `mode_step` where mode is `modes.GLOBAL`. + +Below, we describe the above functions and others that the Trial API provides. + +#### Trial API +Once you have a trial object you can do the following +**See names of all tensors available** +``` +trial.tensors() +``` +This returns tensors seen for any mode if mode was set during the machine learning job. + +**See all steps seen by the Trial for a particular mode** + +The returned list is the step number within that mode. +Each of these mode steps has a global step number associated with it. +The global step represents the sequence of steps across all modes executed by the job. +``` +from tornasole import modes +trial.available_steps(mode=modes.TRAIN) +``` +**See all global steps seen by the Trial** + +This is the list of steps across all modes. +``` +trial.available_steps() +``` + +**Get the mode and step number within mode for a given global step** +You can get the `mode` of `global_step` 100 as follows: +``` +mode = trial.mode(global_step=100) +``` +You can get the `mode_step` for `global_step` 100 as follows: +``` +mode_step = trial.mode_step(global_step=100) +``` +**Know the global step number for a given mode step** +``` +from tornasole import modes +global_step_num = trial.global_step(modes.TRAIN, mode_step=10) +``` +**See all modes for which the trial has data** +``` +trial.modes() +``` +**Access a particular tensor** +A tensor is identified by a string which represents its name. +``` +trial.tensor('relu_activation:0') +``` +**See the global steps for which tensor's value was saved** +``` +trial.tensor('relu_activation:0').steps() +``` + +**See the steps for a given mode when tensor's value was saved** +This returns the mode steps for those steps when this tensor's value was saved for this mode. +``` +from tornasole import modes +trial.tensor('relu_activation:0').steps(mode=modes.TRAIN) +``` + +**Get the value of the tensor at a global step** +This returns the tensor value as a numpy array for the 10th global step. +``` +trial.tensor('relu_activation:0').value(10) +``` +Please note that this can raise exceptions if the step is not available. +Please see [this section](#when-a-tensor-is-not-available-during-rule-execution) for more details on the different exceptions that can be raised. +**Get the value of the tensor at a step number for a given mode** +This returns the tensor value as a numpy array for the 10th training step. +``` +from tornasole import modes +trial.tensor('relu_activation:0').value(10, mode=modes.TRAIN) +``` +Please note that this can raise exceptions if the step is not available. +Please see [this section](#when-a-tensor-is-not-available-during-rule-execution) for more details on the different exceptions that can be raised. +**Get reduction value of a tensor at a step** + +Tornasole provides a few reductions out of the box that you can query with the following API. +This below returns the mean of the absolute values at step 10. +``` +trial.tensor('relu:0').reduction_value(10, 'mean', abs=True) +``` +The different reductions you can query for are the same as what are allowed in [ReductionConfig](https://github.com/awslabs/tornasole_tf/blob/master/docs/api.md) when saving tensors. +This API thus allows you to access the reduction you might have saved instead of the full tensor. +If you had saved the full tensor, it will calculate the requested reduction now and cache it. + +- `min`, `max`, `mean`, `prod`, `std`, `sum`, `variance` +- `l1`, `l2` norms + +Each of these can be retrieved for the absolute value of the tensor or the original tensor. +Above was an example to get the mean of the absolute value of the tensor. +`abs` can be set to `False` if you want to see the `mean` of the actual tensor. + +*Note that if you had only saved a particular reduction, you will not be able +to access the full tensor value or any other reduction during analysis. +This also applies to the `abs` flag, meaning that if you had saved the +`mean` of `abs` values of the tensor you can not query for the non absolute values mean. +If you do so, Tornasole will return `None`.* + +If you had saved the tensor without any reduction, then you can retrieve the actual tensor +as a numpy array and compute any function you might be interested in. + +Please note that this can raise exceptions if the step is not available. +Please see [this section](#when-a-tensor-is-not-available-during-rule-execution) for more details on the different exceptions that can be raised. + +**Get names of tensors matching regex** + +This method takes a regex pattern or a list of regex patterns. +Each regex pattern is a python style regex pattern string. +``` +trail.tensors_matching_regex(['relu_activation*']) +``` +**List tensors in a collection** + +This returns names of all tensors saved in a given collection. +`gradients` below is the name of the collection we are interested in. +``` +trial.tensors_in_collection('gradients') +``` +**List collections** + +Below returns all collections belonging to the trial as a dictionary. +This dictionary is indexed by the name of the collection, and the value is the collection object. +``` +trial.collections() +``` +**Refresh or do not refresh tensors** + +By default Tornasole refreshes tensors each time you try to query the tensor. +It looks for whether this tensor is saved for new steps and if so fetches them. +If you know the saved data will not change (stopped the machine learning job), or +are not interested in the latest data, you can stop the refreshing of tensors as follows: + +`no_refresh` takes a trial or a list of trials, which should not be refreshed. +Anything executed inside the with `no_refresh` block will not be refreshed. +``` +from tornasole.analysis.utils import no_refresh +with no_refresh(trials): + pass +``` + +Similarly if you want to refresh tensors only within a block, you can do: +``` +from tornasole.analysis.utils import refresh +with refresh(trials): + pass +``` + +#### When a tensor is not available +Tornasole is designed to be aware that tensors required to execute a rule may not be available at every step. +Hence it raises a few exceptions which allow us to control what happens when a tensor is missing. +These are available in the `tornasole.exceptions` module. You can import them as follows: +``` +from tornasole.exceptions import * +``` +Here are the exceptions and their meanings: + +- `TensorUnavailableForStep` : This means that the tensor requested is not available for the step. This might mean that +this step might not be saved at all by the hook, or that this step might have saved some tensors but the requested +tensor is not part of them. Note that when you see this exception, it means that this tensor can never become available +for this step in the future. + +- `TensorUnavailable` : This means that this tensor is not being saved or has not been saved by Tornasole. This means +that this tensor will never be seen for any step in Tornasole. + +- `StepUnavailable`: This means that the step was not saved and Tornasole has no data from the step. + +- `StepNotYetAvailable`: This means that the step has not yet been seen by Tornasole. It may be available in the future if the training is still going on. +Tornasole automatically loads new data as and when it becomes available. + +- `NoMoreData` : This will be raised when the training ends. Once you see this, you will know that there will be no more steps +and no more tensors saved. This is **WIP**. TornasoleRules can not currently do this. +For now, we expect you to stop the rule execution after the job is finished yourself. + +### Rules +Rules are the medium by which Tornasole executes a certain piece of code regularly on different steps of the jobs. +A rule is assigned to a trial and can be invoked at each new step of the trial. +It can also access other trials for its execution. +You can evaluate a rule using tensors from the current step or any step before the current step. +Please ensure your logic respects these semantics, else you will get a `TensorUnavailableForStep` +exception as the data would not yet be available. + +#### Writing a rule +Writing a rule involves implementing the [Rule interface](../../tornasole/rules/rule.py). + + +##### Constructor +Creating a rule involves first inheriting from the base Rule class Tornasole provides. +For this rule here we do not need to look at any other trials, so we set `other_trials` to None. +``` +from tornasole.rules import Rule + +class VanishingGradientRule(Rule): + def __init__(self, base_trial, threshold=0.0000001): + super().__init__(base_trial, other_trials=None) + self.threshold = threshold +``` +##### RequiredTensors + +Next you need to implement a method which lets Tornasole know what tensors you +are interested in for invocation at a given step. This is the `required_tensors` method. +It should return a list of instances of the class `RequiredTensors`, one for each trial +that this rule needs tensors from. + +As mentioned above, for this rule we need tensors from only one trial. +Hence we pass a list with one instance of RequiredTensors. +Before we look at how to define this method, let us look at the API for RequiredTensors + +###### API +[RequiredTensors](../../tornasole/rules/rule.py) is a class which takes one argument to instantiate, namely +- `trial` : The trial whose tensors we need, to execute the rule. + +``` +req_tensors = RequiredTensors(self.base_trial) +``` +To add a given tensor name as required, we do the following: +``` +# the steps we need this tensor from to execute the rule at one step, step_num +steps = [step_num] + +# need_tensor takes the name of the tensor (as string) and a list of steps (each represented by `int` step number) +req_tensors.need_tensor(tname, steps) + +# you can also pass a tensorname regex to need_tensor as follows +# this will match the regex mentioned as tname and fetch all tensors matching this regex +# for each step in steps +req_tensors.need_tensor(tname, steps, should_match_regex=True) +``` + +###### Declare required tensors +Now the required tensors method looks as follows if we need all the gradients for this step `step_num`. +``` + def required_tensors(self, step_num): + req_tensors = RequiredTensors(self.base_trial) + for tname in self.base_trial.tensors_in_collection('gradients'): + steps = [step_num] + req_tensors.need(tname, steps) + return [req_tensors] +``` + +This function will be used by the rule execution engine to fetch all the +required tensors from local disk or S3 before it executes the rule. +If you try to retrieve the value of a tensor which was not mentioned as part of `required_tensors`, +it might not be fetched from the trial path. +In such a case you might see one of the exceptions +`TensorUnavailableForStep` or `TensorUnavailable` or get the tensor object as None. +This is because the rule invoker executes the rule with `no_refresh` mode. Refer discussion above on this for more. + +##### Function to invoke at a given step +In this function you can implement the core logic of what you want to do with these tensors. +It should return a boolean value `True` or `False`. +This can be used to define actions that you might want to take based on the output of the rule. +A simplified version of the actual invoke function for `VanishingGradientRule` is below: +``` + def invoke_at_step(self, step): + for tname in self.base_trial.tensors_in_collection('gradients'): + t = self.base_trial.tensor(tname) + abs_mean = t.reduction_value(s, 'mean', abs=True) + if abs_mean < self.threshold: + return True + else: + return False +``` +#### Executing a rule +Now that you have written a rule, here's how you can execute it. We provide a function to invoke rules easily. +Refer [tornasole/rules/rule_invoker.py](../../tornasole/rules/rule_invoker.py) +The invoke function has the following syntax. +It takes a instance of a Rule and invokes it for a series of steps one after the other. +``` +invoke(rule_obj, start_step=0, end_step=None) +``` + +An example of invoking the VanishingGradientRule we described above is +[examples/analysis/scripts/check_grads.py](../../examples/analysis/scripts/check_grads.py). +``` +trial_obj = create_trial(trial_dir) +vr = VanishingGradientRule(base_trial=trial_obj, threshold=0.0000001) +invoke(vr) +``` + +For first party Rules (see below) that we provide, and that you want to run with default arguments, +you can run them as follows +``` +python -m tornasole.rules.rule_invoker --trial-dir ~/ts_outputs/vanishing_gradients --rule-name VanishingGradient +``` + +#### First party rules +We provide a few rules which we built. These are supposed to be general purpose rules that you can use easily. +We also hope these serve as examples for you to build your own rules. + +##### VanishingGradient +This rule helps you identify if you are running into a situation where your gradients vanish, i.e. have a +really low or zero magnitude. + +Here's how you import and instantiate this rule. +Note that it takes two parameters, `base_trial` the trial whose execution will invoke the rule, and a `threshold` which is +used to determine whether the gradient is `vanishing`. Gradients whose mean of absolute values are lower than this threshold +will return True when we invoke this rule. +``` +from tornasole.rules.generic import VanishingGradient +r = VanishingGradient(base_trial, threshold=0.0000001) +``` +##### ExplodingTensor +This rule helps you identify if you are running into a situation where any tensor has non finite values. +By default this rule treats `nan` and `infinity` as exploding. +If you want to check only for nan, pass the `only_nan` flag as True + +Here's how you import and instantiate this rule. +Note that it takes two parameters, `base_trial` the trial whose execution will invoke the rule and +`only_nan` which can be set to True if you only want to monitor for `nan` and not for `infinity`. + +``` +from tornasole.rules.generic import ExplodingTensor +r = ExplodingTensor(base_trial) +``` + +##### SimilarAcrossRules +This rule helps you compare tensors across runs. Note that this rule takes two trials as inputs. First trial is the `base_trial` whose +execution will invoke the rule, and the `other_trial` is what is used to compare this trial's tensors with. +The third argument is a regex pattern which can be used to restrict this comparision to certain tensros. +It returns `True` if tensors are different at a given step between the two trials. +``` +from tornasole.rules.generic import SimilarAcrossRuns +r = SimilarAcrossRuns(base_trial, other_trial, include=None) +``` + +##### WeightUpdateRatio +This rule helps you keep track of the ratio of the updates to weights during training. It takes as inputs three arguments. +First, is the `base_trial` as usual. Second and third are `large_threshold` and `small_threshold`. +This returns True if the ratio of updates to weights is larger than `large_threshold` +or when this ratio is smaller than `small_threshold`. + +It is a good sign for training when the updates are in a good scale +compared to the gradients. Very large updates can push weights away from optima, and very small updates mean slow convergence. +``` +from tornasole.rules.generic import WeightUpdateRatio +wur = WeightUpdateRatio(base_trial, large_threshold, small_threshold) +``` + +##### AllZero +This rule helps to identify whether the tensors contain all zeros. It takes following arguments + +- `base_trial`: The trial whose execution will invoke the rule. The rule will inspect the tensors gathered during this trial. +- `collection_names`: The list of collection names. The rule will inspect the tensors that belong to these collections. +- `tensor_regex`: The list of regex patterns. The rule will inspect the tensors that match the regex patterns specified in this list. + +For this rule, users must specify either the `collection_names` or `tensor_regex` parameter. If both the parameters are specified the rule will inspect union on tensors. + +``` +from tornasole.rules.generic import AllZero +collections = ['weights', 'bias'] +tensor_regex = ['input*'] +allzero = AllZero(base_trial=trial_obj, collection_names=collections, tensor_regex=tensor_regex) +``` + +## Examples +We have a few example scripts and notebooks to help you get started. Please go to the `examples` folder. + +We also have an end-to-end flow example from saving tensors to plotting using saved tensors for MXNet at +`examples/mxnet/notebook/mnist`. + +## ContactUs +We would like to hear from you. If you have any question or feedback, +please reach out to us tornasole-users@amazon.com + +## License +This library is licensed under the Apache 2.0 License. diff --git a/docs/mxnet/README.md b/docs/mxnet/README.md new file mode 100644 index 0000000000..69a9637e11 --- /dev/null +++ b/docs/mxnet/README.md @@ -0,0 +1,631 @@ +# Tornasole for MXNet +Tornasole is an upcoming AWS service designed to be a debugger +for machine learning models. It lets you go beyond just looking +at scalars like losses and accuracies during training and +gives you full visibility into all tensors 'flowing through the graph' +during training or inference. + +Using Tornasole is a two step process: + +**Saving tensors** +This needs the `tornasole` package built for the appropriate framework. This package lets you collect the tensors you want at the frequency +that you want, and save them for analysis. +Please follow the appropriate Readme page to install the correct version. This page is for using Tornasole with MXNet. + +**Analysis** +Please refer to [this page](../analysis/README.md) for more details about how to analyze. +That said, we do provide a few example analysis commands below +so as to provide an end to end flow. +The analysis of these tensors can be done on a separate machine +in parallel with the training job. + +## Installation +#### Prerequisites +- **Python 3.6** +- Tornasole can work in local mode or remote(s3) mode. You can skip this, if you want to try [local mode example](#tornasole-local-mode-example). +This is necessary to setup if you want to try [s3 mode example](#tornasole-s3-mode-example). +For running in S3 mode, you need to make sure that instance you are using has proper credentials set to have S3 write access. +Try the below command - +``` + aws s3 ls +``` +If you see errors, then most probably your credentials are not properly set. +Please follow [FAQ on S3](#s3access) to make sure that your instance has proper S3 access. + +- We recommend using the `mxnet_p36` conda environment on EC2 machines launched with the AWS Deep Learning AMI. +You can activate this by doing: `source activate mxnet_p36`. + +- If you are not using the above environment, please ensure that you have the MXNet framework installed. + +#### Instructions +**Make sure that your aws account is whitelisted for Tornasole. [ContactUs](#contactus)**. + +Once your account is whitelisted, you should be able to install the `tornasole` package built for MXNet as follows: + +``` +aws s3 cp s3://tornasole-binaries-use1/tornasole_mxnet/py3/tornasole-0.2.1-py2.py3-none-any.whl . +pip install tornasole-0.2.1-py2.py3-none-any.whl +``` + +**Please note** : If, while installing tornasole, you get a version conflict issue between botocore and boto3, +you might need to run the following +``` +pip uninstall -y botocore boto3 aioboto3 aiobotocore && pip install botocore==1.12.91 boto3==1.9.91 aiobotocore==0.10.2 aioboto3==6.4.1 +``` + +## Quickstart +If you want to quickly run some examples, you can jump to [examples](#examples) section. You can also see this [mnist notebook example](/examples/mxnet/notebook/mnist/SimpleInteractiveAnalysis.ipynb) to see tornasole working. + +Integrating Tornasole into the training job can be accomplished by following steps below. + +### Import the tornasole_hook package +Import the TornasoleHook class along with other helper classes in your training script as shown below + +``` +from tornasole.mxnet.hook import TornasoleHook +from tornasole.mxnet import SaveConfig, Collection +``` + +### Instantiate and initialize tornasole hook + +``` + # Create SaveConfig that instructs engine to log graph tensors every 10 steps. + save_config = SaveConfig(save_interval=10) + # Create a hook that logs tensors of weights, biases and gradients while training the model. + output_s3_uri = 's3://my_mxnet_training_debug_bucket/12345678-abcd-1234-abcd-1234567890ab' + hook = TornasoleHook(out_dir=output_s3_uri, save_config=save_config) +``` + +Using the _Collection_ object and/or _include\_regex_ parameter of TornasoleHook , users can control which tensors will be stored by the TornasoleHook. +The section [How to save tensors](#how-to-save-tensors) explains various ways users can create _Collection_ object to store the required tensors. + +The _SaveConfig_ object controls when these tensors are stored. The tensors can be stored for specific steps or after certain interval of steps. If the save\_config parameter is not specified, the TornasoleHook will store tensors after every 100 steps. + +For additional details on TornasoleHook, SaveConfig and Collection please refer to the [API documentation](api.md) + +### Register Tornasole hook to the model before starting of the training. + +#### NOTE: The tornasole hook can only be registered to Gluon Non-hybrid models. + +After creating or loading the desired model, users can register the hook with the model as shown below. + +``` +net = create_gluon_model() + # Apply hook to the model (e.g. instruct engine to recognize hook configuration + # and enable mode in which engine will log graph tensors +hook.register_hook(net) +``` + +#### Set the mode +Set the mode you are running the job in. This helps you group steps by mode, +for easier analysis. +If you do not specify this, it saves steps under a `default` mode. +``` +hook.set_mode(ts.modes.TRAIN) +``` + +## Examples +#### Simple CPU training + +##### Tornasole local mode example +The example [mnist\_gluon\_vg\_demo.py](../../examples/mxnet/scripts/mnist_gluon_vg_demo.py) is implemented to show how Tornasole is useful in detecting the vanishing gradient problem. The learning_rate and momentum in this example are set in a such way that the training will encounter the vanishing gradient issue. + +``` +python examples/mxnet/scripts/mnist_gluon_vg_demo.py --output-uri ~/tornasole-testing/vg-demo/trial-one +``` + +You can monitor the job for vanishing gradients by doing the following: + +``` +python -m tornasole.rules.rule_invoker --trial-dir ~/tornasole-testing/vg-demo/trial-one --rule-name VanishingGradient +``` + +Note: You can also try some further analysis on tensors saved by following [programming model](../analysis/README.md#the-programming-model) section of our analysis README. + +##### Tornasole S3 mode example + +``` +python examples/mxnet/scripts/mnist_gluon_vg_demo.py --output-uri s3://tornasole-testing/vg-demo/trial-one +``` + +You can monitor the job for vanishing gradients by doing the following: + +``` +python -m tornasole.rules.rule_invoker --trial-dir s3://tornasole-testing/vg-demo/trial-one --rule-name VanishingGradient +``` +Note: You can also try some further analysis on tensors saved by following [programming model](../analysis/README.md#the-programming-model) section of our analysis README. + +## API +Please refer to [this document](api.md) for description of all the functions and parameters that our APIs support + +#### Hook +TornasoleHook is the entry point for Tornasole into your program. +Some key parameters to consider when creating the TornasoleHook are the following: + +- `out_dir`: This represents the path to which the outputs of tornasole will be written to under a directory with the name `out_dir`. This can be a local path or an S3 prefix of the form `s3://bucket_name/prefix`. +- `save_config`: This is an object of [SaveConfig](#saveconfig). The SaveConfig allows user to specify when the tensors are to be stored. User can choose to specify the number of steps or the intervals of steps when the tensors will be stored. If not specified, it defaults to a SaveConfig which saves every 100 steps. +- `include_collections`: This represents the [collections](#collection) to be saved. With this parameter, user can control which tensors are to be saved. +- `include_regex`: This represents the regex patterns of names of tensors to save. With this parameter, user can control which tensors are to be saved. + +**Examples** + +- Save weights and gradients every 100 steps to an S3 location + +``` +import tornasole.mxnet as tm +tm.TornasoleHook(out_dir='s3://tornasole-testing/trial_job_dir', + save_config=tm.SaveConfig(save_interval=100), + include_collections=['weights', 'gradients']) +``` + +- Save custom tensors by regex pattern to a local path + +``` +import tornasole.mxnet as tm +tm.TornasoleHook(out_dir='/home/ubuntu/tornasole-testing/trial_job_dir', + include_regex=['relu*']) +``` + +Refer [API](api.md) for all parameters available and detailed descriptions. + +### Mode +A machine learning job can be executing steps in multiple modes, such as training, evaluating, or predicting. +Tornasole provides you the construct of a `mode` to keep data from these modes separate +and make it easy for analysis. To leverage this functionality you have to +call the `set_mode` function of hook such as the following call `hook.set_mode(modes.TRAIN)`. +The different modes available are `modes.TRAIN`, `modes.EVAL` and `modes.PREDICT`. + +If the mode was not set, all steps will be available together. + +#### Collection +Collection object helps group tensors for easier handling of tensors being saved. +A collection has its own list of tensors, include regex patterns, [reduction config](#reductionconfig) and [save config](#saveconfig). +This allows setting of different save and reduction configs for different tensors. +These collections are then also available during analysis. +Tornasole will save the value of tensors in collection, if the collection is included in `include_collections` param of the [hook](#hook). + +Refer [API](api.md) for all methods available when using collections such +as setting SaveConfig, +ReductionConfig for a specific collection, or retrieving all collections. + +Please refer to [creating a collection](#creating-a-collection) to get overview of how to +create collection and adding tensors to collection. + +#### SaveConfig +SaveConfig class allows you to customize the frequency of saving tensors. +The hook takes a SaveConfig object which is applied as +default to all tensors included. +A collection can also have its own SaveConfig object which is applied +to the tensors belonging to that collection. + +SaveConfig also allows you to save tensors when certain tensors become nan. +This list of tensors to watch for is taken as a list of strings representing names of tensors. + +The parameters taken by SaveConfig are: + +- `save_interval`: This allows you to save tensors every `n` steps +- `save_steps`: Allows you to pass a list of step numbers at which tensors should be saved + +Refer [API](api.md) for all parameters available and detailed descriptions for them, as well as example SaveConfig objects. + +#### ReductionConfig +ReductionConfig allows the saving of certain reductions of tensors instead +of saving the full tensor. By reduction here we mean an operation that converts the tensor to a scalar. The motivation here is to reduce the amount of data +saved, and increase the speed in cases where you don't need the full tensor. +The reduction operations which are computed in the training process and then saved. +During analysis, these are available as reductions of the original tensor. +**Please note that using reduction config means that you will not have +the full tensor available during analysis, so this can restrict what you can do with the tensor saved.** +The hook takes a ReductionConfig object which is applied as default to all tensors included. +A collection can also have its own ReductionConfig object which is applied +to the tensors belonging to that collection. + +**Examples** + +- ```ReductionConfig(abs_reductions=['min','max','mean'])``` Save min, max, mean on absolute values of the tensors included + +- ```ReductionConfig(reductions=['min','max','mean'])``` Save min, max, mean of the tensors included + +- ```ReductionConfig(norms=['l1'])``` Saves l1 norm of the tensors included + + +These reduction config instances can be passed to the hook as follows + +``` + import tornasole.mxnet as tm + global_reduce_config = tm.ReductionConfig(reductions=["max", "mean"]) + hook = tm.TornasoleHook(out_dir=out_dir, save_config=global_save_config,reduction_config=global_reduce_config) +``` + +Or ReductionConfig can be specified for an individual collection as follows + +``` +import tornasole.mxnet as tm +tm.get_collection("ReluActivation").include(["relu*"]) +tm.get_collection("ReluActivation").set_save_config(SaveConfig(save_steps=[4,5,6])) +tm.get_collection("ReluActivation").set_reduction_config(ReductionConfig(reductions=["min"], abs_reductions=["max"])) +... +tm.get_collection("flatten").include(["flatten*"]) +tm.get_collection("flatten").set_save_config(SaveConfig(save_steps=[4,5,6])) +tm.get_collection("flatten").set_reduction_config(ReductionConfig(norms=["l1"], abs_norms=["l2"])) +hook = TornasoleHook(out_dir=out_dir, include_collections=['weights', 'bias','gradients', + 'default', 'ReluActivation', 'flatten']) +``` + +Refer [API](api.md) for a list of the reductions available as well as examples. + + +### How to save tensors + +There are different ways to save tensors when using Tornasole. +Tornasole provides easy ways to save certain standard tensors by way of default collections (a Collection represents a group of tensors). +Examples of such collections are 'weights', 'gradients', 'bias' and 'default'. +Besides the tensors in above default collections, you can save tensors by name or regex patterns on those names. +Users can also specify a certain block in the model to save the inputs and outputs of that block. +This section will take you through these ways in more detail. + +#### Saving the tensors with _include\_regex_ +The TornasoleHook API supports _include\_regex_ parameter. The users can specify a regex pattern with this pattern. The TornasoleHook will store the tensors that match with the specified regex pattern. With this approach, users can store the tensors without explicitly creating a Collection object. The specified regex pattern will be associated with 'default' Collection and the SaveConfig object that is associated with the 'default' collection. + +#### Default Collections +Currently, the tornasole\_mxnet hook creates Collection objects for 'weights', 'gradients', 'bias' and 'default'. These collections contain the regex pattern that match with tensors of type weights, gradient and bias. The regex pattern for the 'default' collection is set when user specifies _include\_regex_ with TornasoleHook or sets the _SaveAll=True_. These collections use the SaveConfig parameter provided with the TornasoleHook initialization. The TornasoleHook will store the related tensors, if user does not specify any special collection with _include\_collections_ parameter. If user specifies a collection with _include\_collections_ the above default collections will not be in effect. + +#### Custom Collections +You can also create any other customized collection yourself. +You can create new collections as well as modify existing collections + +##### Creating a collection +Each collection should have a unique name (which is a string). You can create collections by invoking helper methods as described in the [API](api.md) documentation + +``` +import tornasole.mxnet as tm +tm.get_collection('weights').include(['weight']) +``` + +##### Adding tensors +Tensors can be added to a collection by either passing an include regex parameter to the collection. +If you don't know the name of the tensors you want to add, you can also add the tensors to the collection +by the variables representing the tensors in code. The following sections describe these two scenarios. + +###### Adding tensors by regex +If you know the name of the tensors you want to save and can write regex +patterns to match those tensornames, you can pass the regex patterns to the collection. +The tensors which match these patterns are included and added to the collection. + +``` +import tornasole.mxnet as tm +tm.get_collection('ReluActivation').include(["relu*", "input_*"]) +``` + +###### Adding tensors from Gluon block +If users want to log the inputs and outputs of a particular block in the Gluon model. They can do so by creating a collection as shown below. + +``` +import tornasole.mxnet as tm +tm.get_collection('Conv2DBlock').add_block_tensors(conv2d, inputs=True, outputs=True) +``` + +For creating this collection, users must have access to the block object whose inputs and outputs are to be logged. + +#### Saving All Tensors +Tornasole makes it easy to save all the tensors in the model. You just need to set the flag `save_all=True` when creating the hook. This creates a collection named 'all' and saves all the tensors under that collection. +**NOTE : Storing all the tensors will slow down the training and will increase the storage consumption.** + + +### More Examples +| Example Type | Logging Weights and Gradients | Logging inputs and outputs of the model | Logging inputs and outputs of a block in the model. | Saving all tensors. | Vanishing Gradient demo | +| --------------- | ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | +| Link to Example | [mnist\_gluon\_basic\_hook\_demo.py](../../examples/mxnet/scripts/mnist_gluon_basic_hook_demo.py) | [mnist\_gluon\_model\_input\_output\_demo.py](../../examples/mxnet/scripts/mnist_gluon_model_input_output_demo.py) | [mnist\_gluon\_block\_input\_output\_demo.py](../../examples/mxnet/scripts/mnist_gluon_block_input_output_demo.py) | [mnist\_gluon\_save\_all\_demo.py](../../examples/mxnet/scripts/mnist_gluon_save_all_demo.py) | [mnist\_gluon\_vg\_demo.py](../../examples/mxnet/scripts/mnist_gluon_vg_demo.py) | + +#### Logging the weights and gradients of the model + +The [mnist\_gluon\_basic\_hook\_demo.py](../../examples/mxnet/scripts/mnist_gluon_basic_hook_demo.py) shows end to end example of how to create and register Tornasole hook that can log tensors of model weights and their gradients. + +Here is how to create a hook for this purpose. + +``` + # Create a tornasole hook. The initialization of hook determines which tensors + # are logged while training is in progress. + # Following function shows the default initialization that enables logging of + # weights, biases and gradients in the model. +def create_tornasole_hook(output_s3_uri): + # With the following SaveConfig, we will save tensors for steps 1, 2 and 3 + # (indexing starts with 0). + save_config = SaveConfig(save_steps=[1, 2, 3]) + # Create a hook that logs weights, biases and gradients while training the model. + hook = TornasoleHook(out_dir=output_s3_uri, save_config=save_config) + return hook +``` + +Here is how to register the hook + +``` + # Create a model using gluon API. The tornasole hook is currently + # supports MXNet gluon models only. +def create_gluon_model(): + # Create Model in Gluon + net = nn.HybridSequential() + net.add(nn.Conv2D(channels=6, kernel_size=5, activation='relu'), + nn.MaxPool2D(pool_size=2, strides=2), +... + nn.Dense(10)) + net.initialize(init=init.Xavier(), ctx=mx.cpu()) + return net +... + # Create a Gluon Model. + net = create_gluon_model() +... + # Create a Gluon Model. + net = create_gluon_model() + hook = create_tornasole_hook(output_s3_uri) + hook.register(net) +``` + +The example can be invoked as shown below. **Ensure that the s3 bucket specified in command line is accessible for read and write operations** + +``` +python examples/mxnet/scripts/mnist_gluon_basic_hook_demo.py --output-uri s3://tornasole-testing/basic-mxnet-hook --trial-id trial-one +``` + +For detail command line help run + +``` +python examples/mxnet/scripts/mnist_gluon_basic_hook_demo.py --help +``` + +#### Logging the inputs and output of a model along with weights and gradients +The [mnist\_gluon\_model\_input\_output\_demo.py](../../examples/mxnet/scripts/mnist_gluon_model_input_output_demo.py) shows how to create and register the tornasole hook that can log the inputs and output of the model in addition to weights and gradients tensors. +In order to achieve this we would need to create a collection as follows + +``` + # The names of input and output tensors of a block are in following format + # Inputs : _input_, and + # Output : _output + # In order to log the inputs and output of a model, we will create a collection as follows: + tm.get_collection('TopBlock').add_block_tensors(block, inputs=True, outputs=True) +``` + +The name of the Collection is "TopBlock". We have created it around top level block of the model which represents the whole complete model itself to this collection. As a result this collection will contain tensors that were inputs and outputs of this block (e.g. model itself) at corresponding training steps. +Following code shows how to initialize the hook with the above collection. + +``` + # Create a tornasole hook. The initialization of hook determines which tensors + # are logged while training is in progress. + # Following function shows the hook initialization that enables logging of + # weights, biases and gradients in the model along with the inputs and outputs of the model. +def create_tornasole_hook(output_s3_uri, block): + # Create a SaveConfig that determines tensors from which steps are to be stored. + # With the following SaveConfig, we will save tensors for steps 1, 2 and 3. + save_config = SaveConfig(save_steps=[1, 2, 3]) + # The names of input and output tensors of a block are in following format + # Inputs : _input_, and + # Output : _output + # In order to log the inputs and output of a model, we will create a collection as follows: + tm.get_collection('TopBlock').add_block_tensors(block, inputs=True, outputs=True) + # Create a hook that logs weights, biases, gradients and inputs outputs of model while training. + hook = TornasoleHook(out_dir=output_s3_uri, save_config=save_config, include_collections=['weights', 'gradients', 'bias','TopBlock']) + return hook +``` + +Here is how to register the above hook. + +``` + # Create a model using gluon API. The tornasole hook is currently + # supports MXNet gluon models only. +def create_gluon_model(): + # Create Model in Gluon + net = nn.HybridSequential() + net.add(nn.Conv2D(channels=6, kernel_size=5, activation='relu'), + nn.MaxPool2D(pool_size=2, strides=2), +... + nn.Dense(10)) + net.initialize(init=init.Xavier(), ctx=mx.cpu()) + return net +... +net = create_gluon_model() +hook = create_tornasole_hook(output_s3_uri) +hook.register(net) +``` + +The example can be invoked as shown below. **Ensure that the s3 bucket specified in command line is accessible for read and write operations** + +``` +python examples/mxnet/scripts/mnist_gluon_model_input_output_demo.py --output-s3-uri s3://tornasole-testing/model-io-hook/trial-one +``` + +For detail command line help run + +``` +python examples/mxnet/scripts/mnist_gluon_model_input_output_demo.py --help +``` + +#### Logging the inputs and output of a block in the model along with weights and gradients +The [mnist\_gluon\_block\_input\_output\_demo.py](../../examples/mxnet/scripts/mnist_gluon_block_input_output_demo.py) shows how to create and register the tornasole hook that can log the inputs and output of a particular block in the model in addition to weights and gradients tensors. + +**NOTE: For this type of logging the Gluon Model should not be hybridized.** +In order to achieve this we need to have access to the block object whose tensors we want to log. The following code snippet shows how we can cache the block objects while creating a model. Ensure that the model is not hybridized. + +``` + # Create a model using gluon API. The tornasole hook is currently + # supports MXNet gluon models only. +def create_gluon_model(): + # Create Model in Gluon + child_blocks = [] + net = nn.HybridSequential() + conv2d_0 = nn.Conv2D(channels=6, kernel_size=5, activation='relu') + child_blocks.append(conv2d_0) + maxpool2d_0 = nn.MaxPool2D(pool_size=2, strides=2) +... + dense_2 = nn.Dense(10) + child_blocks.append(dense_2) + net.add(conv2d_0, maxpool2d_0, conv2d_1, maxpool2d_1, flatten_0, dense_0, dense_1, dense_2) + net.initialize(init=init.Xavier(), ctx=mx.cpu()) + return net, child_blocks +``` + +We can then create a collection to log the input output tensors of one of the child_blocks. For example, child_block[0] is passed to *create_tornasole_hook* function as 'block' and the function creates collection for that block as shown below + +``` + # Create a tornasole hook. The initialization of hook determines which tensors + # are logged while training is in progress. + # Following function shows the hook initialization that enables logging of + # weights, biases and gradients in the model along with the inputs and output of the given + # child block. +def create_tornasole_hook(output_s3_uri, block): + # Create a SaveConfig that determines tensors from which steps are to be stored. + # With the following SaveConfig, we will save tensors for steps 1, 2 and 3. + save_config = SaveConfig(save_steps=[1, 2, 3]) + # The names of input and output tensors of a block are in following format + # Inputs : _input_, and + # Output : _output + # In order to log the inputs and output of a model, we will create a collection as follows + tm.get_collection(block.name).add_block_tensors(block, inputs=True, outputs=True) + # Create a hook that logs weights, biases, gradients and inputs outputs of model while training. + hook = TornasoleHook(out_dir=output_s3_uri, save_config=save_config, include_collections=[ + 'weights', 'gradients', 'bias', block.name]) + return hook +``` + +The name of the Collection is kept same as the name of block. +We have created it around a block in the model. +As a result this collection will contain tensors that were inputs and outputs of +this block (e.g. Conv2D block) at corresponding training steps. + + +Here is how to register the above hook. + +``` + # Create a Gluon Model. + net,child_blocks = create_gluon_model() + # Create a tornasole hook for logging the desired tensors. + # The output_s3_uri is a the URI for the s3 bucket where the tensors will be saved. + # The trial_id is used to store the tensors from different trials separately. + output_s3_uri=opt.output_s3_uri + # For creating a tornasole hook that can log inputs and output of the specific child block in the model, + # we will pass the desired block object to the create_tornasole_hook function. + # In the following case, we are attempting log inputs and output of the first Conv2D block. + hook = create_tornasole_hook(output_s3_uri, child_blocks[0]) + # Register the hook to the top block. + hook.register_hook(net) +``` + +The example can be invoked as shown below. +**Ensure that the s3 bucket specified in command line is accessible for read and write operations** + +``` +python examples/mxnet/scripts/mnist_gluon_block_input_output_demo.py --output-s3-uri s3://tornasole-testing/block-io-mxnet-hook/trial-one +``` + +For detail command line help run + +``` +python examples/mxnet/scripts/mnist_gluon_block_input_output_demo.py --help +``` + +#### Saving all tensors in the model +The [mnist\_gluon\_save_all\_demo.py](../../examples/mxnet/scripts/mnist_gluon_save_all_demo.py) shows how to store every tensor in the model. +As mentioned above, for saving all the tensors users not required to create a special collection. Users can set _save_all_ flag while creating TornasoleHook as shown below. + +``` + # Create a tornasole hook. The initialization of hook determines which tensors + # are logged while training is in progress. + # Following function shows the initialization of tornasole hook that enables logging of + # all the tensors in the model. +def create_tornasole_hook(output_s3_uri): + # Create a SaveConfig that determines tensors from which steps are to be stored. + # With the following SaveConfig, we will save tensors for steps 1, 2 and 3. + save_config = SaveConfig(save_steps=[1, 2, 3]) + # Create a hook that logs all the tensors seen while training the model. + hook = TornasoleHook(out_dir=output_s3_uri, save_config=save_config, save_all=True) + return hook +``` + +Here is how to register the above hook. + +``` + # Create a model using gluon API. The tornasole hook is currently + # supports MXNet gluon models only. +def create_gluon_model(): + # Create Model in Gluon + net = nn.HybridSequential() + net.add(nn.Conv2D(channels=6, kernel_size=5, activation='relu'), + nn.MaxPool2D(pool_size=2, strides=2), +... + nn.Dense(10)) + net.initialize(init=init.Xavier(), ctx=mx.cpu()) + return net +... +net = create_gluon_model() +hook = create_tornasole_hook(output_s3_uri) +hook.register(net) +``` + +The example can be invoked as shown below. **Ensure that the s3 bucket specified in command line is accessible for read and write operations** + +``` +python examples/mxnet/scripts/mnist_gluon_save_all_demo.py --output-s3-uri s3://tornasole-testing/saveall-mxnet-hook/trial-one +``` +For detail command line help run + +``` +python examples/mxnet/scripts/mnist_gluon_save_all_demo.py --help +``` + +#### Example demonstrating the vanishing gradient +The example [mnist\_gluon\_vg\_demo.py](../../examples/mxnet/scripts/mnist_gluon_vg_demo.py) is implemented to show how Tornasole is useful in detecting the vanishing gradient problem. The learning_rate and momentum in this example are set in a such way that the training will encounter the vanishing gradient issue. +The example can be invoked as follows + +``` +python examples/mxnet/scripts/mnist_gluon_vg_demo.py --output-uri s3://tornasole-testing/vg-demo/trial-one +``` + +## Analyzing the Results + +This library enables users to collect the desired tensors at desired frequency while MXNet job is running. +The tensor data generated during this job can be analyzed with various +rules that check for vanishing gradients, exploding gradients, etc. +For example, the [mnist\_gluon\_vg\_demo.py](../../examples/mxnet/scripts/mnist_gluon_vg_demo.py) +has the vanishing gradient issue. When the tensors generated by this example are +analyzed by 'VanishingGradient' rule, it shows in which steps the model encounters the vanishing gradient issue. + +``` +python -m tornasole.rules.rule_invoker --trial-dir s3://tornasole-testing/vg-demo/trial-one --rule-name VanishingGradient +``` + +For details regarding how to analyze the tensor data, usage of existing rules or writing new rules, +please refer to [analysis documentation](../analysis/README.md). + + +## FAQ +#### Logging +You can control the logging from Tornasole by setting the appropriate +level for the python logger `tornasole` using either of the following approaches. + +**In Python code** +``` +import logging +logging.getLogger('tornasole').setLevel = logging.INFO +``` + +**Using environment variable** +You can also set the environment variable `TORNASOLE_LOG_LEVEL` as below + +``` +export TORNASOLE_LOG_LEVEL=INFO +``` +Log levels available are 'INFO', 'DEBUG', 'WARNING', 'ERROR', 'CRITICAL', 'OFF'. + +#### S3Access +The instance running tornasole in s3 mode needs to have s3 access. There are different ways to provide an instance to your s3 account. +- If you using EC2 instance, you should launch your instance with proper iam role to access s3. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +- If you are using mac or other machine, you can create a IAM user for your account to have s3 access by following this guide (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html) and then configure your instance to use your AWS_ACCESS_KEY_ID AND AWS_SECRET_KEY_ID by using doc here https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html +- Once you are done configuring, please verify that below is working and buckets returned are from the account and region you want to use. +``` +aws s3 ls +``` + +## ContactUs +We would like to hear from you. If you have any question or feedback, please reach out to us tornasole-users@amazon.com + +## License +This library is licensed under the Apache 2.0 License. diff --git a/docs/mxnet/api.md b/docs/mxnet/api.md new file mode 100644 index 0000000000..498ae01b58 --- /dev/null +++ b/docs/mxnet/api.md @@ -0,0 +1,201 @@ +## API + +Tornasole MXNet provides the following constructs: +### Hook +TornasoleHook is the entry point for Tornasole into your program. + +``` + + class TornasoleHook + """ + A class used to represent the hook which gets attached to the + training process. + + + + Attributes + ---------- + out_dir : str + represents a path to which the outputs of tornasole will be written to. + This can be a local path or + an S3 prefix of the form s3://bucket_name/prefix + + dry_run : bool + when dry_run is set to True, behavior is only described in the log file. + The tensors are not actually saved. + + worker: str + name of worker in a multi process training job + outputs and tensors are organized by this name during retrieval. + + save_config: SaveConfig object + SaveConfig allows you to customize when tensors are saved. + Hook takes SaveConfig object which is applied as + default for all included tensors. + A collection can optionally have its own SaveConfig object + which overrides this for its tensors. + Refer to documentation for SaveConfig. + + reduction_config: ReductionConfig object + ReductionConfig allows you to save tensors as their reductions + instead of saving full tensors. + If ReductionConfig is passed then the chosen reductions are applied + as default for all tensors included. + A collection can optionally have its own ReductionConfig object + which overrides this for its tensors. + + include_regex: list of str + takes as input the list of string representing regular expressions. Tensors whose names match + these regular expressions will be saved. These tensors will be available as part of the `default` + collection. + + include_collections: list of str + takes as input the names of collections which should be saved. + by default, ['weights','gradients', 'bias', 'default'] are passed to include_collections. + + save_all: bool + a shortcut for saving all tensors in the model. + tensors are all grouped into the `default` collection + + def __init__(self, + out_dir, + dry_run=False, + worker='worker0', + reduction_config=None, + save_config=SaveConfig(save_interval=100), + include_regex=None, + include_collections=['weights', 'gradients', 'bias', 'default'], + save_all=False, + ): +``` + + +It also has an important method which can be used to set the appropriate mode. +Modes can refer to 'training', 'evaluation' or 'prediction'. They can be set as follows: + +```hook.set_mode(ts.modes.TRAIN)```, + +```hook.set_mode(ts.modes.EVAL)``` or + +```hook.set_mode(ts.modes.PREDICT)```. + +This allows you to group steps by mode which allows for clearer analysis. Tornasole +also allows you to see a global ordering of steps which makes it clear after how many training +steps did a particular evaluation step happen. If you do not set this mode, all steps are saved under +a `default` mode. + +The _save\_config_ parameter is optional. If not specified, the TornasoleHook will use a default SaveConfig that stores tensors with step_interval=100. That is, the tensors will be saved every 100th step. + +The _reduction\_config_ is optional. If not specified, the reductions are not applied to the stored tensors. + + +### Collection + +Collection object helps group tensors for easier handling of tensors being saved. +A collection has its own list of tensors, include/exclude regex patterns, reduction config and save config. +This allows setting of different save and reduction configs for different tensors. +These collections are then also available during analysis with `tornasole_rules`. + +#### Creating or accessing a collection + +``` +import tornasole.mxnet as ts +``` + + +| Function | Behavior | +|----|----| +| ```ts.get_collection(collection_name)``` | Returns the collection with the given name. Creates the collection if it doesn't already exist | +| ```ts.get_collections()``` | Returns all collections as a dictionary with the keys being names of the collections | +| ```ts.add_to_collection(collection_name, args)``` | Equivalent to calling `coll.add(args)` on the collection with name `collection_name` | +| ```ts.add_to_default_collection(args)``` | Equivalent to calling `coll.add(args)` on the collection with the name `default`| +| ```ts.reset_collections()``` | Clears all collections | + +#### Methods +The following methods can be called on a collection object. + + +| Method | Behavior | +|----|----| +| ```coll.include(t)``` | Takes a regex or a list of regex to match tensors to be included to the collection | +| ```coll.add_block_tensors(block, input=False, output=False)``` | Takes an instance Gluon block, input and output flags. Users can use this Collection to log input/output tensors for a specific block | +| ```coll.get_include_regex()``` | Returns include_regex for the collection | +| ```coll.get_save_config()``` | Returns save config for the collection | +| ```coll.set_save_config(s)``` | Sets save config for the collection | +| ```coll.get_reduction_config()``` | Returns reduction config for the collection | +| ```coll.set_reduction_config()``` | Sets reduction config for the collection | + +### SaveConfig +SaveConfig class allows you to customize the frequency of saving tensors. +The hook takes a SaveConfig object which is applied as +default to all tensors included. +A collection can also have its own SaveConfig object which is applied +to the tensors belonging to that collection. + +SaveConfig also allows you to save tensors when certain tensors become nan. +This list of tensors to watch for is taken as a list of strings representing names of tensors. + +``` + + class SaveConfig: + + Attributes + ---------- + + save_interval: int + allows you to save every n steps by passing n to save_interval + + skip_num_steps: int + allows you to avoid saving for the first n steps of the job. + it defaults to 0, i.e. don't skip any steps in the beginning. + + save_steps: list of int + save at all the steps given in this list. + if this is given, it ignores the save_interval. + + when_nan: list of str representing name of tensor + saves the tensors to which this saveConfig is attached + whenever any of the tensors in this list become nan or infinite. + This means that if your save_interval is set to 10, and 'loss' is in when_nan + your tensors will be saved whenever save_interval is multiple of 10 as well as + whenever loss becomes nan or infinite. +``` + +The default value of _save\_interval_ is 100. The TornasoleHook that uses a default SaveConfig object will store the tensors every 100th step. + + +### ReductionConfig +ReductionConfig allows the saving of certain reductions of tensors instead +of saving the full tensor. The motivation here is to reduce the amount of data +saved, and increase the speed in cases where you don't need the full +tensor. The reduction operations which are computed in the training process +and then saved. +During analysis, these are available as reductions of the original tensor. +Please note that using reduction config means that you will not have +the full tensor available during analysis, so this can restrict what you can do with the tensor saved. +The hook takes a ReductionConfig object which is applied as default to all tensors included. +A collection can also have its own ReductionConfig object which is applied +to the tensors belonging to that collection. + +``` + + Attributes + ---------- + + reductions: list of str + takes list of names of reductions to be computed. + should be one of 'min', 'max', 'median', 'mean', 'std', 'sum', 'prod' + + abs_reductions: list of str + takes list of names of reductions to be computed after converting the tensor + to abs(tensor) i.e. reductions are applied on the absolute values of tensor. + should be one of 'min', 'max', 'median', 'mean', 'std', 'sum', 'prod' + + norms: list of str + takes names of norms to be computed of the tensor. + should be one of 'l1', 'l2' + + abs_norms: list of str + takes names of norms to be computed of the tensor after taking absolute value + should be one of 'l1', 'l2' +``` diff --git a/docs/pytorch/README.md b/docs/pytorch/README.md new file mode 100644 index 0000000000..eb652662e0 --- /dev/null +++ b/docs/pytorch/README.md @@ -0,0 +1,527 @@ +# Tornasole for Pytorch +Tornasole is an upcoming AWS service designed to be a debugger +for machine learning models. It lets you go beyond just looking +at scalars like losses and accuracies during training and +gives you full visibility into all tensors 'flowing through the graph' +during training or inference. + +Using Tornasole is a two step process: + +**Saving tensors** +This needs the `tornasole` package built for the appropriate framework. This package lets you collect the tensors you want at the frequency +that you want, and save them for analysis. +Please follow the appropriate Readme page to install the correct version. This page is for using Tornasole with Pytorch. + +**Analysis** +Please refer to [this page](../analysis/README.md) for more details about how to analyze. +That said, we do provide a few example analysis commands below +so as to provide an end to end flow. +The analysis of these tensors can be done on a separate machine +in parallel with the training job. + +## Installation +#### Prerequisites +- **Python 3.6** +- Tornasole can work in local mode or remote(s3) mode. You can skip this, if you want to try [local mode example](#tornasole-local-mode-example). +This is necessary to setup if you want to try [s3 mode example](#tornasole-s3-mode-example). +For running in S3 mode, you need to make sure that instance you are using has proper credentials set to have S3 write access. +Try the below command - +``` + aws s3 ls +``` +If you see errors, then most probably your credentials are not properly set. +Please follow [FAQ on S3](#s3access) to make sure that your instance has proper S3 access. + +- We recommend using the `pytorch_p36` conda environment on EC2 machines launched with the AWS Deep Learning AMI. +You can activate this by doing: `source activate pytorch_p36`. + +- If you are not using the above environment, please ensure that you have the PyTorch framework installed. + +#### Instructions +**Make sure that your aws account is whitelisted for Tornasole. [ContactUs](#contactus)**. + +Once your account is whitelisted, you should be able to install the `tornasole` package built for PyTorch as follows: + +``` +aws s3 cp s3://tornasole-binaries-use1/tornasole_pytorch/py3/tornasole-0.2.1-py2.py3-none-any.whl . +pip install tornasole-0.2.1-py2.py3-none-any.whl +``` + +**Please note** : If, while installing tornasole, you get a version conflict issue between botocore and boto3, +you might need to run the following +``` +pip uninstall -y botocore boto3 aioboto3 aiobotocore && pip install botocore==1.12.91 boto3==1.9.91 aiobotocore==0.10.2 aioboto3==6.4.1 +``` + +## Quickstart +If you want to quickly run some examples, you can jump to [examples](#examples) section. +You can also see this [pytorch notebook example](../.../../examples/pytorch/notebooks/PyTorch-SimpleInteractiveAnalysis.ipynb) +to see tornasole working. + +Integrating Tornasole into the training job can be accomplished by following steps below. + +### Import the tornasole_hook package +Import the TornasoleHook class along with other helper classes in your training script as shown below + +``` +from tornasole.pytorch import TornasoleHook +from tornasole.pytorch import Collection +from tornasole import SaveConfig +import tornasole.pytorch as ts +``` + +### Instantiate and initialize tornasole hook + +``` + # Create SaveConfig that instructs engine to log graph tensors every 10 steps. + save_config = SaveConfig(save_interval=10) + # Create a hook that logs tensors of weights, biases and gradients while training the model. + output_s3_uri = 's3://my_pytorch_training_debug_bucket' + trial_id = '12345678-abcd-1234-abcd-1234567890ab' # ID to easily identify training job (e.g. trial) + hook = TornasoleHook(out_dir=output_s3_uri, save_config=save_config) +``` + +For additional details on TornasoleHook, SaveConfig and Collection please refer to the [API documentation](api.md) + +### Register Tornasole hook to the model before starting of the training. + +Here is a sample PyTorch model you may use if you wish (this is enclosed in the +``` +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.add_module('fc1', nn.Linear(20,500)) + self.add_module('relu1', nn.ReLU()) + self.add_module('fc2', nn.Linear(500, 10)) + self.add_module('relu2', nn.ReLU()) + self.add_module('fc3', nn.Linear(10, 4)) + def forward(self, x_in): + fc1_out = self.fc1(x_in) + relu1_out = self.relu1(fc1_out) + fc2_out = self.fc2(relu1_out) + relu2_out = self.relu2(fc2_out) + fc3_out = self.fc3(relu2_out) + out = F.log_softmax(fc3_out, dim=1) + return out + +def create_model(): + device = torch.device("cpu") + return Net().to(device) +``` +After creating or loading the desired model, users can register the hook with the model as shown below. + +``` +net = create_model() +# Apply hook to the model (e.g. instruct engine to recognize hook configuration +# and enable mode in which engine will log graph tensors +hook.register_hook(net) +``` + +#### Set the mode +Set the mode you are running the job in. This helps you group steps by mode, +for easier analysis. +If you do not specify this, it saves steps under a `default` mode. +``` +hook.set_mode(ts.modes.TRAIN) +``` + +## Examples +#### Simple CPU training + +##### Tornasole local mode example +The example [pytorch/demos/pytorch\_hook\_demos.py](../.../../examples/pytorch/scripts/pytorch_hook_demos.py) is implemented to show how Tornasole is useful in detecting the vanishing gradient and exploding tensor problem. Using the rule_type flag, our scripts will set the learning_rate and momentum in this example in a such way that the training will encounter the vanishing gradient/exploding tensor issue. +For Vanishing Gradient, generate data using the following command +``` +python examples/pytorch/scripts/pytorch_hook_demos.py --output-uri ./tornasole-testing/demo_vg/ --rule_type vanishing_grad +``` + +You can monitor the vanishing gradient by doing the following: + +``` +python -m tornasole.rules.rule_invoker --trial-dir ./tornasole-testing/demo_vg/ --rule-name VanishingGradient +``` + +You can execute a similar command to generate data for the exploding tensor example + +``` +python examples/pytorch/scripts/pytorch_hook_demos.py --output-uri ./tornasole-testing/demo_eg/ --rule_type exploding_tensor +``` + +It can be analyzed through the following command. +``` +python -m tornasole.rules.rule_invoker --trial-dir ./tornasole-testing/demo_eg/ --rule-name ExplodingTensor +``` + +**Note: You can also try some further analysis on tensors saved by following [programming model](../analysis/README.md#the-programming-model) section of analysis README.** + +##### Tornasole S3 mode example +Vanishing Gradient: +``` +python examples/pytorch/scripts/pytorch_hook_demos.py --output-uri s3://tornasole-testing/vg-demo --rule_type vanishing_grad +``` +Exploding Tensor: +``` +python examples/pytorch/scripts/pytorch_hook_demos.py --output-uri s3://tornasole-testing/eg-demo --rule_type exploding_tensor +``` + +You can monitor the tensors for vanishing gradients by doing the following +``` +python -m tornasole.rules.rule_invoker --trial-dir s3://tornasole-testing/vg-demo --rule-name VanishingGradient +``` +And exploding tensors by doing the following +``` +python -m tornasole.rules.rule_invoker --trial-dir s3://tornasole-testing/eg-demo --rule-name ExplodingTensor +``` +**Note: You can also try some further analysis on tensors saved by following [programming model](../analysis/README.md#the-programming-model) section of analysis README.** + +## API +Please refer to [this document](api.md) for description of all the functions and parameters that our APIs support + +#### Hook +TornasoleHook is the entry point for Tornasole into your program. +Some key parameters to consider when creating the TornasoleHook are the following: + +- `outdir`: This represents the path to which the outputs of tornasole will be written to. This can be a local path or an S3 prefix of the form s3://bucket_name/prefix. +- `save_config`: This is an object of [SaveConfig](#saveconfig). The SaveConfig allows user to specify when the tensors are to be stored. User can choose to specify the number of steps or the intervals of steps when the tensors will be stored. +- `include_collections`: This represents the [collections](#collection) to be saved. Each collection can have its own SaveConfig item. + +Refer [API](api.md) for all parameters available and detailed descriptions. + +### Mode +A machine learning job can be executing steps in multiple modes, such as training, evaluating, or predicting. +Tornasole provides you the construct of a `mode` to keep data from these modes separate +and make it easy for analysis. To leverage this functionality you have to +call the `set_mode` function of hook such as the following call `hook.set_mode(modes.TRAIN)`. +The different modes available are `modes.TRAIN`, `modes.EVAL` and `modes.PREDICT`. + +If the mode was not set, all steps will be available together. + +#### Collection +Collection object helps group tensors for easier handling of tensors being saved. +A collection has its own list of tensors, include regex patterns, [reduction config](#reductionconfig) and [save config](#saveconfig). +This allows setting of different save and reduction configs for different tensors. +These collections are then also available during analysis. +Tornasole will save the value of tensors in collection, if the collection is included in `include_collections` param of the [hook](#hook). + +Refer [API](api.md) for all methods available when using collections such as setting SaveConfig, +ReductionConfig for a specific collection, or retrieving all collections. + +Please refer to [creating a collection](#creating-a-collection) to get overview of how to create collection and adding tensors to collection. + +#### SaveConfig +SaveConfig class allows you to customize the frequency of saving tensors. +The hook takes a SaveConfig object which is applied as +default to all tensors included. +A collection can also have its own SaveConfig object which is applied +to the tensors belonging to that collection. + +SaveConfig also allows you to save tensors when certain tensors become nan. +This list of tensors to watch for is taken as a list of strings representing names of tensors. + +The parameters taken by SaveConfig are: + +- `save_interval`: This allows you to save tensors every `n` steps +- `save_steps`: Allows you to pass a list of step numbers at which tensors should be saved + +Refer [API](api.md) for all parameters available and detailed descriptions for them, as well as example SaveConfig objects. + +#### ReductionConfig +ReductionConfig allows the saving of certain reductions of tensors instead +of saving the full tensor. By reduction here we mean an operation that converts the tensor to a scalar. The motivation here is to reduce the amount of data +saved, and increase the speed in cases where you don't need the full tensor. +The reduction operations which are computed in the training process and then saved. +During analysis, these are available as reductions of the original tensor. +**Please note that using reduction config means that you will not have +the full tensor available during analysis, so this can restrict what you can do with the tensor saved.** +The hook takes a ReductionConfig object which is applied as default to all tensors included. +A collection can also have its own ReductionConfig object which is applied +to the tensors belonging to that collection. + +**Examples** +- ```ReductionConfig(abs_reductions=['min','max','mean'])``` Save min, max, mean on absolute values of the tensors included + +- ```ReductionConfig(reductions=['min','max','mean'])``` Save min, max, mean of the tensors included + +- ```ReductionConfig(norms=['l1'])``` Saves l1 norm of the tensors included + +These reduction config instances can be passed to the hook as follows +``` +import tornasole.pytorch as ts +hook = ts.TornasoleHook(..., reduction_config=ts.ReductionConfig(norms=['l1']), ...) +``` +Refer [API](api.md) for a full list of the reductions available. + + +### How to save tensors + +There are different ways to save tensors when using Tornasole. +Tornasole provides easy ways to save certain standard tensors by way of default collections (a Collection represents a group of tensors). +Examples of such collections are 'weights', 'gradients'. +Besides these tensors, you can save tensors by name or regex patterns on those names. +Users can also specify a certain module in the model to save the inputs and outputs of that module. +This section will take you through these ways in more detail. + +#### Default Collections +Currently, Tornasole creates Collection objects for 'weights' and 'gradients' by default for every run. +These collections store the tensors that are corresponding trainable parameters and their gradients. + +#### Custom Collections +You can also create any other customized collection yourself. +You can create new collections as well as modify existing collections + +##### Creating a collection +Each collection should have a unique name (which is a string). Users can create or retrieve the collection by name as follows. + +``` +weight_collection = ts.get_collection('weight') +``` + +##### Adding tensors +Tensors can be added to a collection by either passing an include regex parameter to the collection. +If you don't know the name of the tensors you want to add, you can also add the tensors to the collection +by the variables representing the tensors in code. The following sections describe these two scenarios. + +###### Adding tensors by regex +If you know the name of the tensors you want to save and can write regex +patterns to match those tensornames, you can pass the regex patterns to the collection. +The tensors which match these patterns are included and added to the collection. + +``` +custom_collect = ts.get_collection("ReluActivation") +custom_collect.include(["relu*", "input_*"]) +``` + +###### Adding tensors from torch.nn Module +If users want to log the inputs and outputs of a particular module, they can do so by creating a collection as shown below. For the example below, assume conv2d is the module we wish to log the inputs and outputs of + +``` +module_collection = ts.get_collection('Conv2DModule') +module_collection.add_module_tensors(conv2d, inputs=True, outputs=True) +``` + +For creating this collection, users must have access to the module object whose inputs and outputs are to be logged. + +#### Saving All Tensors +Tornasole makes it easy to save all the tensors in the model. You just need to set the flag `save_all=True` when creating the hook. +This creates a collection named 'all' and saves all the tensors under that collection. +**NOTE : Storing all the tensors will slow down the training and will increase the storage consumption.** + + +### More Examples +| Example Type | Logging Weights and Gradients | Logging inputs and outputs of the model | Saving all tensors. | Vanishing Gradient demo | +| --------------- | ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | +| Link to Example | [pytorch\_hook\_demos.py](../../examples/pytorch/scripts/pytorch_hook_demos.py) | [pytorch\_hook\_demos.py](../../examples/pytorch/scripts/pytorch_hook_demos.py) | [pytorch\_hook\_demos.py](../../examples/pytorch/scripts/pytorch_hook_demos.py) | [pytorch\_hook\_demos.py](../../examples/pytorch/scripts/pytorch_hook_demos.py) | + +All script examples are in the pytorch_hook_demos.py file. The appropriate flags to set for each example are described below + + +#### Logging the weights, biases, and gradients of the model + +The [pytorch\_hook\_demos.py](../../examples/pytorch/scripts/pytorch_hook_demos.py) shows an end to end +example of how to create and register Tornasole hook that can log tensors of model weights and their gradients. +Simply pass in the appropriate argument (`--hook-type weights-bias-gradients`) to the script when running it. + +Here is how to create a hook for this purpose. + +``` + # Create Tornasole hook. The initializations of hook determines which tensors + # are logged while training is in progress. + # Following function shows the default initilization that enables logging of + # weights, biases and gradients in the model. + def create_tornasole_hook(output_dir): + # Create a SaveConfig that determines tensors from which steps are to be stored. + # With the following SaveConfig, we will save tensors for steps 1, 2 and 3. + save_config = SaveConfig(save_steps=[1, 2, 3]) + # Create a hook that logs ONLY weights, biases, and gradients while training the model. + hook = TornasoleHook(out_dir=output_dir, save_config=save_config) + return hook +``` + +Here is how to register the hook + +``` +# Assume your model is called net +hook = create_tornasole_hook(output_dir) +hook.register_hook(net) +``` + +The example can be invoked as shown below. You may replace the local URI with an S3 one instead, but you must **ensure that the s3 bucket specified in command line is accessible for read and write operations** + +``` +python examples/pytorch/scripts/pytorch_hook_demos.py --output-uri ./tornasole-testing/demo/ --hook-type weights-bias-gradients +``` + +For detailed command line help run + +``` +python examples/pytorch/scripts/pytorch_hook_demos.py --help +``` + +#### Logging the inputs and output of a model along with weights and gradients +The [pytorch\_hook\_demos.py](../../examples/pytorch/scripts/pytorch_hook_demos.py) also shows how to create and register the tornasole hook that can log the inputs and output of the model in addition to weights and gradients tensors. +In order to achieve this we would need to create a collection as follows + +``` +# In order to log the inputs and output of a module, we will create a collection as follows: +get_collection('l_mod').add_module_tensors(module, inputs=True, outputs=True) +``` + +The name of the Collection is "l_mod". We have created it around the top level module of the model which represents the whole complete model itself to this collection. As a result this collection will contain tensors that were inputs and outputs of this module (e.g. the model itself) at corresponding training steps. +The following code shows how to initialize the hook with the above collection. + +``` +def create_tornasole_hook(output_dir, module): + # The names of input and output tensors of a module are in following format + # Inputs : _input_, and + # Output : _output + # In order to log the inputs and output of a module, we will create a collection as follows: + assert module is not None + get_collection('l_mod').add_module_tensors(module, inputs=True, outputs=True) + + # Create a hook that logs weights, biases, gradients and inputs outputs of model while training. + hook = TornasoleHook(out_dir=output_dir, save_config=SaveConfig(save_steps=[i * 10 for i in range(5)]), + include_collections=['weights', 'gradients', 'bias','l_mod']) +``` + +Here is how to register the above hook. + +``` +# Assume your model is called net +hook = create_tornasole_hook(output_dir=output_dir, module=net) +hook.register_hook(net) +``` + +The example can be invoked as shown below. You may replace the local URI with an S3 one instead, +but you must **ensure that the s3 bucket specified in command line is accessible for read and write operations** + +``` +python examples/pytorch/scripts/pytorch_hook_demos.py --output-uri ./tornasole-testing/demo/ --hook-type module-input-output +``` + +For detailed command line help run + +``` +python examples/pytorch/scripts/pytorch_hook_demos.py --help +``` + +#### Logging the inputs and output of a module in the model along with weights and gradients +The [pytorch\_hook\_demos.py](../../examples/pytorch/scripts/pytorch_hook_demos.py) also shows how to +create and register the tornasole hook that can log the inputs and output of a particular module in the +model in addition to weights and gradients tensors. Follow the same procedure as above; just pass +in the appropriate module into `create_tornasole_hook`. + + +#### Saving all tensors in the model +The [pytorch\_hook\_demos.py](../../examples/pytorch/scripts/pytorch_hook_demos.py) also shows how to store every tensor in the model. +As mentioned above, for saving all the tensors users not required to create a special collection. +Users can set the _save_all_ flag while creating a TornasoleHook object in the manner shown below. + +``` + # Create Tornasole hook. The initializations of hook determines which tensors + # are logged while training is in progress. + # Following function shows the default initilization that enables logging of + # weights, biases and gradients in the model. + def create_tornasole_hook(output_dir): + # Create a SaveConfig that determines tensors from which steps are to be stored. + # With the following SaveConfig, we will save tensors for steps 1, 2 and 3. + save_config = SaveConfig(save_steps=[1, 2, 3]) + # Create a hook that logs weights, biases, gradients, module inputs, and module outputs of all layers while training the model. + hook = TornasoleHook(out_dir=output_dir, save_config=save_config, saveall=True) + return hook +``` + +Here is how to register the hook + +``` +# Assume your model is called net +hook = create_tornasole_hook(output_dir) +hook.register_hook(net) +``` + +The example can be invoked as shown below. You may replace the local URI with an S3 one instead, +but you must **ensure that the s3 bucket specified in command line is accessible for read and write operations** + +``` +python examples/pytorch/scripts/pytorch_hook_demos.py --output-uri ./tornasole-testing/demo/ --hook-type saveall +``` + +For detailed command line help run + +``` +python examples/pytorch/scripts/pytorch_hook_demos.py --help +``` +#### Example demonstrating the vanishing gradient +The example [torch\_vg\_demo](../../examples/pytorch/scripts/pytorch_hook_demos.py) is implemented +to show how Tornasole is useful in detecting the vanishing gradient problem. +The learning_rate and momentum in this example are set in a such way that the training will +encounter the vanishing gradient issue. +The example can be invoked as follows (the `--rule_type` argument lets our example set the appropriate learning rate and momentum) + +``` +python examples/pytorch/scripts/pytorch_hook_demos.py --output-uri s3://tornasole-testing/vg-demo --rule_type vanishing_grad +``` + +#### Example demonstrating the exploding tensor +The example [torch\_exploding\_demo](../../examples/pytorch/scripts/pytorch_hook_demos.py) +is implemented to show how Tornasole is useful in detecting the exploding tensor problem. +The learning_rate and momentum in this example are set in a such way that the training will encounter the exploding tensor issue. +The example can be invoked as follows (the `--rule_type` argument lets our example set the appropriate learning rate and momentum) + +``` +python examples/pytorch/scripts/pytorch_hook_demos.py --output-uri s3://tornasole-testing/eg-demo --rule_type exploding_tensor +``` + +## Analyzing the Results + +This library enables users to collect the desired tensors at desired frequency while the PyTorch job is running. +The tensor data generated during this job can be analyzed with various rules +that check for vanishing gradients, exploding gradients, etc. +For example, the [torch\_vg\_demo](../../examples/pytorch/scripts/pytorch_hook_demos.py) has the vanishing gradient issue. +When the tensors generated by the VanishingGradient example are analyzed by +'VanishingGradient' rule, it shows in which steps the model encounters the vanishing gradient issue. + +``` +python -m tornasole.rules.rule_invoker --trial-dir s3://tornasole-testing/vg-demo --rule-name VanishingGradient +``` +You can execute a similar command to analyze the tensors generated by the ExplodingTensor example +``` +python -m tornasole.rules.rule_invoker --trial-dir s3://tornasole-testing/eg-demo --rule-name ExplodingTensor +``` + +For details regarding how to analyze the tensor data, usage of existing rules or +writing new rules, please refer to [analysis documentation](../analysis/README.md). + + +## FAQ +#### Logging +You can control the logging from Tornasole by setting the appropriate +level for the python logger `tornasole` using either of the following approaches. + +**In Python code** +``` +import logging +logging.getLogger('tornasole').setLevel = logging.INFO +``` + +**Using environment variable** +You can also set the environment variable `TORNASOLE_LOG_LEVEL` as below + +``` +export TORNASOLE_LOG_LEVEL=INFO +``` +Log levels available are 'INFO', 'DEBUG', 'WARNING', 'ERROR', 'CRITICAL', 'OFF'. + +#### S3Access +The instance running tornasole in s3 mode needs to have s3 access. There are different ways to provide an instance to your s3 account. +- If you using EC2 instance, you should launch your instance with proper iam role to access s3. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +- If you are using mac or other machine, you can create a IAM user for your account to have s3 access by following this guide (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html) and then configure your instance to use your AWS_ACCESS_KEY_ID AND AWS_SECRET_KEY_ID by using doc here https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html +- Once you are done configuring, please verify that below is working and buckets returned are from the account and region you want to use. +``` +aws s3 ls +``` + +## ContactUs +We would like to hear from you. If you have any question or feedback, please reach out to us tornasole-users@amazon.com + +## License +This library is licensed under the Apache 2.0 License. diff --git a/docs/pytorch/api.md b/docs/pytorch/api.md new file mode 100644 index 0000000000..ee2a4b213b --- /dev/null +++ b/docs/pytorch/api.md @@ -0,0 +1,173 @@ +## API + +Tornasole PyTorch provides the following constructs: + +### Hook +TornasoleHook is the entry point for Tornasole into your program. + + +``` +class TornasoleHook: + +A class used to represent the hook which gets attached to the + training process. + + Attributes + ---------- + out_dir : str + represents a path to which the outputs of tornasole will be written to + under a directory with the name `run_id`. This can be a local path or + an S3 prefix of the form s3://bucket_name/prefix + dry_run : bool + when dry_run is set to True, behavior is only described in the log file. + The tensors are not actually saved. + + save_config: SaveConfig object + SaveConfig allows you to customize when tensors are saved. + Hook Ttkes SaveConfig object which is applied as + default for all included tensors. + A collection can optionally have its own SaveConfig object + which overrides this for its tensors. + Refer to documentation for SaveConfig. + + include_regex: list of (str or tensor variables) + these strings can be regex expressions or simple tensor names. + if given includes the tensors matched by the expression, + or the tensors themselves which were passed. + if it is empty, does not include any tensor. + note that exclude takes precedence over include. + note also that this is for tensors not in any collection. + tensors in collections are handled through + include_collections, exclude_collections + + reduction_config: ReductionConfig object + ReductionConfig allows you to save tensors as their reductions + instead of saving full tensors. + If ReductionConfig is passed then the chosen reductions are applied + as default for all tensors included. + A collection can optionally have its own ReductionConfig object + which overrides this for its tensors. + include_regex: + include_collections: list of str + takes as input the names of collections which should be saved. + by default, collections for weights and gradients are created by the hook. + save_all: bool + a shortcut for saving all tensors in the model + + def __init__(self, + out_dir, + dry_run=False, + worker=DEFAULT_WORKER_NAME, + reduction_config=None, + save_config=default_save_config(), + include_regex=None, + include_collections=['weights', 'bias', 'gradients', 'default'], + save_all=False): +``` +### Collection + +Collection object helps group tensors for easier handling of tensors being saved. +A collection has its own list of tensors, include/exclude regex patterns, reduction config and save config. +This allows setting of different save and reduction configs for different tensors. +These collections are then also available during analysis with `tornasole_rules`. + +#### Creating or accessing a collection +``` +import tornasole.pytorch as ts +``` + +| Function | Behavior | +|---|---| +| ```ts.get_collection(collection_name)``` | Returns the collection with the given name. Creates the collection if it doesn't already exist | +| ```ts.get_collections()``` | Returns all collections as a dictionary with the keys being names of the collections | +| ```ts.add_to_collection(collection_name, args)``` | Equivalent to calling `coll.add(args)` on the collection with name `collection_name` | +| ```ts.add_to_default_collection(args)``` | Equivalent to calling `coll.add(args)` on the collection with the name `default`| +| ```ts.reset_collections()``` | Clears all collections | + +#### Methods +The following methods can be called on a collection object. + +| Method | Behavior | +|---|---| +| ```coll.include(t)``` | Takes a regex or a list of regex to match tensors to be included to the collection | +| ```coll.add(t)``` | Takes an instance or list or set of tf.Operation/tf.Variable/tf.Tensor to add to the collection | +| ```coll.get_include_regex()``` | Returns include_regex for the collection | +| ```coll.get_save_config()``` | Returns save config for the collection | +| ```coll.set_save_config(s)``` | Sets save config for the collection | +| ```coll.get_reduction_config()``` | Returns reduction config for the collection | +| ```coll.set_reduction_config()``` | Sets reduction config for the collection | +| ```coll.add_module_tensors(module, input=False, output=False)``` | Takes an instance of a module, along with input and output flags. Users can use this Collection to log input/output tensors for a specific module | + + + + +### SaveConfig +SaveConfig class allows you to customize the frequency of saving tensors. +The hook takes a SaveConfig object which is applied as +default to all tensors included. A collection can also have its own SaveConfig object which is applied +to the tensors belonging to that collection. + +SaveConfig also allows you to save tensors when certain tensors become nan. +This list of tensors to watch for is taken as a list of strings representing names of tensors. + +``` +class SaveConfig: + + """ + + Attributes + ---------- + + save_interval: int + save every n steps + + skip_num_steps: int + start saving after n steps + + save_steps: list of int + save at all the steps given in this list. + if this is given, it ignores the save_interval + """ +``` + +#### Examples +- ```SaveConfig(save_interval=10)``` Saving every 10 steps + +- ```SaveConfig(skip_num_steps=1000, save_interval=10)``` Save every 10 steps after skipping the first 1000 steps + +- ```SaveConfig(save_steps=[10, 500, 10000, 20000])``` Saves only at the supplied steps + +### ReductionConfig +ReductionConfig allows the saving of certain reductions of tensors instead +of saving the full tensor. The motivation here is to reduce the amount of data +saved, and increase the speed in cases where you don't need the full +tensor. The reduction operations which are computed in the training process +and then saved. +During analysis, these are available as reductions of the original tensor. +Please note that using reduction config means that you will not have +the full tensor available during analysis, so this can restrict what you can do with the tensor saved. +The hook takes a ReductionConfig object which is applied as default to all tensors included. +A collection can also have its own ReductionConfig object which is applied +to the tensors belonging to that collection. + +``` + Attributes + ---------- + + reductions: list of str + takes list of names of reductions to be computed. + should be one of 'min', 'max', 'median', 'mean', 'std', 'variance', 'sum', 'prod' + + abs_reductions: list of str + takes list of names of reductions to be computed after converting the tensor + to abs(tensor) i.e. reductions are applied on the absolute values of tensor. + should be one of 'min', 'max', 'median', 'mean', 'std', 'variance', 'sum', 'prod' + + norms: list of str + takes names of norms to be computed of the tensor. + should be one of 'l1', 'l2' + + abs_norms: list of str + takes names of norms to be computed of the tensor after taking absolute value + should be one of 'l1', 'l2' + """ diff --git a/docs/tensorflow/README.md b/docs/tensorflow/README.md new file mode 100644 index 0000000000..d55531aded --- /dev/null +++ b/docs/tensorflow/README.md @@ -0,0 +1,526 @@ +# Tornasole for TensorFlow +Tornasole is an upcoming AWS service designed to be a debugger +for machine learning models. It lets you go beyond just looking +at scalars like losses and accuracies during training and +gives you full visibility into all tensors 'flowing through the graph' +during training or inference. + +Using Tornasole is a two step process: + +**Saving tensors** +This needs the `tornasole` package built for the appropriate framework. This package lets you collect the tensors you want at the frequency +that you want, and save them for analysis. +Please follow the appropriate Readme page to install the correct version. This page is for using Tornasole with TensorFlow. + +**Analysis** +Please refer to [this page](../analysis/README.md) for more details about how to analyze. +That said, we do provide a few example analysis commands below +so as to provide an end to end flow. +The analysis of these tensors can be done on a separate machine +in parallel with the training job. + +## Installation +#### Prerequisites +- **Python 3.6** +- Tornasole can work in local mode or remote(s3) mode. You can skip this, if you want to try [local mode example](#tornasole-local-mode-example). +This is necessary to setup if you want to try [s3 mode example](#tornasole-s3-mode-example). +For running in S3 mode, you need to make sure that instance you are using has proper credentials set to have S3 write access. +Try the below command - +``` + aws s3 ls +``` +If you see errors, then most probably your credentials are not properly set. +Please follow [FAQ on S3](#s3access) to make sure that your instance has proper S3 access. + +- We recommend using the `tensorflow_p36` conda environment on EC2 machines launched with the AWS Deep Learning AMI. +You can activate this by doing: `source activate tensorflow_p36`. + +- If you are not using the above environment, please ensure that you have the TensorFlow framework installed. + +#### Instructions +**Make sure that your aws account is whitelisted for Tornasole. [ContactUs](#contactus)**. + +Once your account is whitelisted, you should be able to install the `tornasole` package built for TensorFlow as follows: + +``` +aws s3 cp s3://tornasole-binaries-use1/tornasole_tensorflow/py3/tornasole-0.2.1-py2.py3-none-any.whl . +pip install tornasole-0.2.1-py2.py3-none-any.whl +``` + +**Please note** : If, while installing tornasole, you get a version conflict issue between botocore and boto3, +you might need to run the following +``` +pip uninstall -y botocore boto3 aioboto3 aiobotocore && pip install botocore==1.12.91 boto3==1.9.91 aiobotocore==0.10.2 aioboto3==6.4.1 +``` + +## Quickstart +If you want to quickly run some examples, you can jump to [examples](#examples) section. + +Integrating Tornasole into your job is as easy as adding the following lines of code: + +### Session based training +We need to add Tornasole Hook and use it to create a monitored session for the job. +First, we need to import `tornasole.tensorflow`. +``` +import tornasole.tensorflow as ts +``` +Then create the TornasoleHook by specifying what you want +to save, when you want to save them and +where you want to save them. +``` +hook = ts.TornasoleHook(out_dir = 's3://bucket/tornasole_outputs/trial', + include_collections = ['weights','gradients'], + save_config = ts.SaveConfig(save_interval=2)) +``` +Set the mode you are running the job in. This helps you group steps by mode, +for easier analysis. +If you do not specify this, it saves steps under a `GLOBAL` mode. +``` +hook.set_mode(ts.modes.TRAIN) +``` +Wrap your optimizer with TornasoleOptimizer so that +Tornasole can identify your gradients and automatically +provide these tensors as part of the `gradients` collection. +Use this new optimizer to minimize the loss. +``` +optimizer = ts.TornasoleOptimizer(optimizer) +``` +Create a monitored session with the above hook, and use this for executing your TensorFlow job. +``` +sess = tf.train.MonitoredSession(hooks=[hook]) +``` +Refer [this page](../../examples/tensorflow/training_scripts/simple/README.md) describing an example + of using Tornasole with a session based training script. +### Estimator based training +We need to create TornasoleHook and provide it to the estimator's train, predict or evaluate methods. +First, we need to import `tornasole.tensorflow`. +``` +import tornasole.tensorflow as ts +``` +Then create the TornasoleHook by specifying what you want +to save, when you want to save them and +where you want to save them. +``` +hook = ts.TornasoleHook(out_dir = 's3://bucket/tornasole_outputs/trial', + include_collections = ['weights','gradients'], + save_config = ts.SaveConfig(save_interval=2)) +``` +Set the mode you are running the job in. This helps you group steps by mode, for easier +analysis. +If you do not specify this, it saves steps under a `GLOBAL` mode. +``` +hook.set_mode(ts.modes.TRAIN) +``` +Wrap your optimizer with TornasoleOptimizer so that +Tornasole can identify your gradients and automatically +provide these tensors as part of the `gradients` collection. +Use this new optimizer to minimize the loss. +``` +opt = ts.TornasoleOptimizer(opt) +``` +Now pass this hook to the estimator object's train, predict or evaluate methods, whichever ones you want to monitor. +``` +classifier = tf.estimator.Estimator(...) + +classifier.train(input_fn, hooks=[hook]) +classifier.predict(input_fn, hooks=[hook]) +classifier.evaluate(input_fn, hooks=[hook]) +``` +Refer [TF Estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator) for information on the train, predict, evaluate functions. +Refer [this page](../../examples/tensorflow/training_scripts/resnet50/README.md) describing an example of using Tornasole with the Estimator interface. + +#### Note +**Keras** support is Work in Progress. Please stay tuned! +We will also support **Eager** mode in the future. + +## Examples + +### Simple CPU training +`examples/tensorflow/training_scripts/simple/simple.py`: a simple example that +shows Tornasole in a very small example using session based training +You can run this for 100 steps while saving data locally as follows. +This run below produces tensors which have nans in them. + +##### Tornasole local mode example +``` +python examples/tensorflow/training_scripts/simple/simple.py --lr 100 \ + --scale 100000000000 --tornasole_frequency 9 --steps 100 \ + --tornasole_path ~/ts_outputs/not_good +``` + +You can monitor the exploding tensors by doing the following +``` +python -m tornasole.rules.rule_invoker \ + --trial-dir ~/ts_outputs/not_good --rule-name ExplodingTensor +``` + +Refer [this page](../../examples/tensorflow/training_scripts/simple/README.md) for full description on running this example. + +You can also try some further analysis on tensors saved by following +[programming model](../analysis/README.md#the-programming-model) section of analysis README. + +##### Tornasole S3 mode example +``` +python examples/tensorflow/training_scripts/simple/simple.py --lr 100 \ + --scale 100000000000 --tornasole_frequency 9 --steps 100 \ + --tornasole_path s3://my-ts-test/ts_outputs/not_good +``` + +You can monitor the exploding tensors by doing the following +``` +python -m tornasole_rules.rule_invoker \ + --trial-dir s3://my-ts-test/ts_outputs/not_good --rule-name ExplodingTensor +``` +Refer [this page](../../examples/tensorflow/training_scripts/simple/README.md) for full description on running this example. + +Note: You can also try some further analysis on tensors saved by following +[programming model](../analysis/README.md#the-programming-model) section of analysis README. + +### ResNet50 GPU training +`examples/tensorflow/training_scripts/resnet50/train_imagenet_resnet_hvd.py` +is a Tornasole-enabled GPU training script for ResNet50/ImageNet. +This uses Estimator interface of TensorFlow. + +**Recommended EC2 instance to run this example is p3.2xl**. +If you are using the DLAMI, activate the tensorflow environment +`source activate tensorflow_p36` + +##### Tornasole local mode example +You can simulate the vanishing gradient scenario when using this script by running the following command. +``` +python examples/tensorflow/training_scripts/resnet50/train_imagenet_resnet_hvd.py --clear_log \ + --enable_tornasole --tornasole_save_weights --tornasole_save_gradients \ + --tornasole_step_interval 10 --constant_initializer 0.01 \ + --tornasole_path ~/ts_outputs/vanishing +``` + +You can monitor the vanishing tensors by doing the following +``` +python -m tornasole.rules.rule_invoker \ + --trial-dir ~/ts_outputs/vanishing --rule-name VanishingGradient +``` + +Note: You can also try some further analysis on tensors saved by following +[programming model](../analysis/README.md#the-programming-model) section of analysis README. + +Refer [this page](../../examples/tensorflow/training_scripts/resnet50/README.md) for detailed +description of how Tornasole was integrated to save different tensors and instructions on running the examples. + +##### Tornasole S3 mode example +You can simulate the vanishing gradient scenario when using this script by running the following command. +``` +python examples/tensorflow/training_scripts/resnet50/train_imagenet_resnet_hvd.py --clear_log \ + --enable_tornasole --tornasole_save_weights --tornasole_save_gradients \ + --tornasole_step_interval 10 --constant_initializer 0.01 \ + --tornasole_path s3://my-ts-test/ts_outputs/vanishing +``` + +You can monitor the vanishing tensors by doing the following +``` +python -m tornasole.rules.rule_invoker \ + --trial-dir s3://my-ts-test/ts_outputs/vanishing --rule-name VanishingGradient +``` +**Note: You can run analysis in parallel (potentially on a different machine) when training is going on and +analysis will happen in real time as training job produces required data for analysis.** +To try this, you should try first invoking VanishingGradient rule above and then start training job. +You will notice that as training job will write new steps, VanishingGradient analysis will happen in real time. + +Note: You can also try some further analysis on tensors saved by following +[programming model](../analysis/README.md#the-programming-model) section of analysis README. + +Refer [this page](../../examples/tensorflow/training_scripts/resnet50/README.md) for detailed +description of how Tornasole was integrated to save different tensors and instructions on running the examples. + + +## Tornasole TensorFlow Concepts +In this section we briefly introduce the main constructs of the Tornasole TF API and some parameters important for their construction. +Please refer to [this document](api.md) for description of all the functions and parameters that our APIs support. + +#### Hook +TornasoleHook is the entry point for Tornasole into your program. +It's a subclass of `tf.train.SessionRunHook` and can be used where that is suitable, +such as MonitoredSession and Estimator's train/predict/evaluate methods. +Some key parameters to consider when creating the TornasoleHook are the following: +- `out_dir`: This represents the path to which the outputs of tornasole will be written to. +This can be a local path or an S3 prefix of the form `s3://bucket_name/prefix`. +- `save_config`: The hook takes a SaveConfig object which controls when tensors are saved. +It defaults to a SaveConfig which saves every 100 steps. +- `include_regex`: This represents the regex patterns of names of tensors to save +- `include_collections`: This represents the collections to be saved + + +It also has an important method which can be used to set the appropriate mode. +Modes can refer to 'training', 'evaluation' or 'prediction'. They can be set as follows: +```hook.set_mode(ts.modes.TRAIN)```, ```hook.set_mode(ts.modes.EVAL)``` or ```hook.set_mode(ts.modes.PREDICT)```. +This allows you to group steps by mode which allows for clearer analysis. Tornasole +also allows you to see a global ordering of steps which makes it clear after how many training +steps did a particular evaluation step happen. If you do not set this mode, all steps are saved under +a `default` mode. + +**Examples** +- Save weights and gradients every 100 steps to an S3 location +``` +import tornasole.tensorflow as ts +ts.TornasoleHook(out_dir='s3://tornasole-testing/trial_job_dir', + save_config=ts.SaveConfig(save_interval=100), + include_collections=['weights', 'gradients']) +``` + +- Save custom tensors by regex pattern to a local path +``` +import tornasole.tensorflow as ts +ts.TornasoleHook(out_dir='/home/ubuntu/tornasole-testing/trial_job_dir', + include_regex=['loss*']) +``` +Refer [API](api.md) for all parameters available and their detailed descriptions. + +#### Mode +A machine learning job can be executing steps in multiple modes, such as training, evaluating, or predicting. +Tornasole provides you the construct of a `mode` to keep data from these modes separate +and make it easy for analysis. To leverage this functionality you have to +call the `set_mode` function of hook such as the following call `hook.set_mode(modes.TRAIN)`. +The different modes available are `modes.TRAIN`, `modes.EVAL` and `modes.PREDICT`. + +If the mode was not set, all steps will be available together. + +#### Collection +Collection object helps group tensors for easier handling of tensors being saved. +A collection has its own list of tensors, include/exclude regex patterns, reduction config and save config. +This allows setting of different save and reduction configs for different tensors. +These collections are then also available during analysis with `tornasole_rules`. +- Creating or accessing a collection: The following method allows you to access a collection. +It also creates the collection if it does not exist. Here `biases` is the name of the collection. +``` +import tornasole.tensorflow as ts +ts.get_collection('biases') +``` +- Adding to a collection +``` +import tornasole.tensorflow as ts +ts.add_to_collection('inputs', features) +``` + +- Passing regex pattern to collection +``` +import tornasole.tensorflow as ts +ts.get_collection(collection_name).include(['loss*']) +``` +Refer [API](api.md) for all methods available when using collections such as setting SaveConfig, +ReductionConfig for a specific collection, retrieving all collections, or resetting all collections. + +#### SaveConfig +SaveConfig class allows you to customize the frequency of saving tensors. +The hook takes a SaveConfig object which is applied as +default to all tensors included. +A collection can also have its own SaveConfig object which is applied +to the tensors belonging to that collection. + +SaveConfig also allows you to save tensors when certain tensors become nan. +This list of tensors to watch for is taken as a list of strings representing names of tensors. + +The parameters taken by SaveConfig are: + +- `save_interval`: This allows you to save tensors every `n` steps, i.e. when step_num % save_interval == 0 +- `skip_num_steps`: Allows you to avoid saving for the first n steps of the job. It defaults to 0, i.e. do not skip any steps in the beginning +- `save_steps`: Allows you to pass a list of step numbers at which tensors should be saved. If this is passed, then `save_interval` is ignored. +- `when_nan`: Allows you to save tensors whenever any of the list of tensors passed here is not finite (i.e. becomes nan or infinite). +If this is passed along with either `save_steps` or `save_interval`, then tensors will be saved whenever this list of tensors is not finite +as well as when a particular step should be saved based on the above two parameters. + +**Examples** +- ```SaveConfig(save_interval=10)``` Saving every 10 steps + +- ```SaveConfig(skip_num_steps=1000, save_interval=10)``` Save every 10 steps after skipping the first 1000 steps + +- ```SaveConfig(save_steps=[10, 500, 10000, 20000])``` Saves only at the supplied steps + +- ```SaveConfig(when_nan=['loss:0'], save_interval=100)``` Saves every 100 steps and also saves whenever the tensor matching 'loss:0' is nan. + +These save config instances can be passed to the hook as follows +``` +import tornasole.tensorflow as ts +hook = ts.TornasoleHook(..., save_config=ts.SaveConfig(save_interval=10), ...) +``` +Refer [API](api.md) for all parameters available and detailed descriptions for them. + +#### ReductionConfig +ReductionConfig allows the saving of certain reductions of tensors instead +of saving the full tensor. By reduction here we mean an operation that converts the tensor to a scalar. +The motivation here is to reduce the amount of data +saved, and increase the speed in cases where you don't need the full tensor. +The reduction operations which are computed in the training process and then saved. +During analysis, these are available as reductions of the original tensor. +**Please note that using reduction config means that you will not have +the full tensor available during analysis, so this can restrict what you can do with the tensor saved.** +The hook takes a ReductionConfig object which is applied as default to all tensors included. +A collection can also have its own ReductionConfig object which is applied +to the tensors belonging to that collection. + +**Examples** +- ```ReductionConfig(abs_reductions=['min','max','mean'])``` Save min, max, mean on absolute values of the tensors included + +- ```ReductionConfig(reductions=['min','max','mean'])``` Save min, max, mean of the tensors included + +- ```ReductionConfig(norms=['l1'])``` Saves l1 norm of the tensors included + +These reduction config instances can be passed to the hook as follows +``` +import tornasole.tensorflow as ts +hook = ts.TornasoleHook(..., reduction_config=ts.ReductionConfig(norms=['l1']), ...) +``` +Refer [API](api.md) for a full list of the reductions available. + +## How to save tensors +There are different ways to save tensors when using Tornasole. +Tornasole provides easy ways to save certain standard tensors by way of default collections (a Collection represents a group of tensors). +Examples of such collections are `weights`, `gradients`, `optimizer variables`. +Besides these tensors, you can save tensors by name or regex patterns on those names. +You can also save them by letting Tornasole know which variables in your code are to be saved. +This section will take you through these ways in more detail. + +### Default collections +Collection object helps group tensors for easier handling of tensors being saved. +These collections are then also available during analysis. + +Tornasole creates a few default collections and populates +them with the relevant tensors. + +#### Weights +Weights is a default collection managed by Tornasole. +Saving weights is as easy as passing `weights` in the `include_collections` parameter of the hook. +``` +import tornasole.tensorflow as ts +hook = ts.TornasoleHook(..., include_collections = ['weights'], ...) +``` + +#### Gradients +We provide an easy way to populate the collection named `gradients` with the gradients wrt to the weights. +This can be done by wrapping around your optimizer with `TornasoleOptimizer` as follows. +This will also enable us to access the gradients during analysis without having to identify which tensors out of the saved ones are the gradients. + +``` +import tornasole.tensorflow as ts +... +opt = ts.TornasoleOptimizer(opt) +``` +An example for this can be seen in [this script](../../examples/tensorflow/training_scripts/resnet50/train_imagenet_resnet_hvd.py#L738) +Alternatively, you can refer to [customize collections](#customizing-collections) for +information on how you can create the gradients collection manually. + +Then, you need to pass `gradients` in the `include_collections` parameter of the hook. +``` +import tornasole.tensorflow as ts +hook = ts.TornasoleHook(..., include_collections = ['gradients'], ...) +``` + +#### Optimizer Variables +Optimizer variables such as momentum can also be saved easily with the +above approach of wrapping your optimizer with `TornasoleOptimizer` +followed by passing `optimizer_variables` in the `include_collections` parameter of the hook. +``` +import tornasole.tensorflow as ts +hook = ts.TornasoleHook(..., include_collections = ['optimizer_variables'], ...) +``` + +Please refer [API](api.md) for more details on using collections + +### Customizing collections +You can also create any other customized collection yourself. +You can create new collections as well as modify existing collections +(such as including gradients if you do not want to use the above `TornasoleOptimizer`) +#### Creating or accessing a collection +Each collection should have a unique name (which is a string). +You can get the collection named as `collection_name` by +calling the following function. +It creates the collection if it does not already exist. +``` +ts.get_collection('collection_name') +``` +#### Adding tensors +Tensors can be added to a collection by either passing an include regex parameter to the collection. +If you don't know the name of the tensors you want to add, you can also add the tensors to the collection +by the variables representing the tensors in code. The following sections describe these two scenarios. + +##### Adding tensors by regex +If you know the name of the tensors you want to save and can write regex +patterns to match those tensornames, you can pass the regex patterns to the collection. +The tensors which match these patterns are included and added to the collection. +``` +ts.get_collection('default').include(['foobar/weight*']) +``` + +**Quick note about names**: TensorFlow layers or operations take a name parameter which along with the name scope +of the layer or variable defines the full name of the operation. +For example, refer [`examples/tensorflow/training_scripts/simple.py`](../../examples/tensorflow/training_scripts/simple/simple.py#L20), +the weight there is named `foobar/weight1:0`. Here `foobar/weight1` refers to the +node representing operation in the graph, and the suffix `:0` indicates that this is the 0th output of the node. +To make clear the meaning of a given tensor, it helps to organize your code by name scopes and +set the names of different operations you might be interested in. + +##### Adding tensors from variables in the code +If you do not know the names of the tensors you are interested in, you can also just pass the variables to Tornasole. +Collection has an add method which takes either a TensorFlow Operation, Variable, or Tensor. + +For example, say you want to log the activations of relu layers in your model. You can save them as follows to a +collection named 'relu_activations'. All the tensors represented by this variable (there could be multiple if this line is a loop for instance) +are saved to this collection. +``` +x = tf.nn.relu(x) + +ts.add_to_collection('relu_activations', x) +``` + +### Regex pattern +A quick way to save tensors when you know the name of the tensors you want to save and +can write a regex pattern to match those tensornames, is to just pass the regex patterns to the hook. +You can use this approach if you just want to save a small number of tensors and do not care about collections. +The tensors which match these patterns are included and added to the collection named `default`. + +``` +hook = ts.TornasoleHook(..., + include_regex=['foobar/weight*'], + ...) +``` + +**Note** Above does the same as in the Regex section above in Customizing collections. + +### Saving all tensors +Tornasole makes it easy to save all the tensors in the model. You just need to set the flag `save_all=True` when creating the hook. +**Please note that this can severely reduce performance of the job and will generate lot of data** + +## Analyzing the Results +For full details on how to analyze the tensors saved, go to [Analysis](../analysis/README.md) + +## FAQ +#### Logging +You can control the logging from Tornasole by setting the appropriate +level for the python logger `tornasole` using either of the following approaches. + +**In Python code** +``` +import logging +logging.getLogger('tornasole').setLevel = logging.INFO +``` + +**Using environment variable** +You can also set the environment variable `TORNASOLE_LOG_LEVEL` as below + +``` +export TORNASOLE_LOG_LEVEL=INFO +``` +Log levels available are 'INFO', 'DEBUG', 'WARNING', 'ERROR', 'CRITICAL', 'OFF'. + +#### S3Access +The instance running tornasole in s3 mode needs to have s3 access. There are different ways to provide an instance to your s3 account. +- If you using EC2 instance, you should launch your instance with proper iam role to access s3. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +- If you are using mac or other machine, you can create a IAM user for your account to have s3 access by following this guide (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html) and then configure your instance to use your AWS_ACCESS_KEY_ID AND AWS_SECRET_KEY_ID by using doc here https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html +- Once you are done configuring, please verify that below is working and buckets returned are from the account and region you want to use. +``` +aws s3 ls +``` + +## ContactUs +We would like to hear from you. If you have any question or feedback, please reach out to us tornasole-users@amazon.com + +## License +This library is licensed under the Apache 2.0 License. diff --git a/docs/tensorflow/api.md b/docs/tensorflow/api.md new file mode 100644 index 0000000000..a1ef9c0464 --- /dev/null +++ b/docs/tensorflow/api.md @@ -0,0 +1,186 @@ +## API + +Tornasole TF provides the following constructs: +### Hook +TornasoleHook is the entry point for Tornasole into your program. +It's a subclass of `tf.train.SessionRunHook` and can be used where that is suitable, +such as MonitoredSession and Estimator's train/predict/evaluate methods. + +``` +class TornasoleHook: + """ + A class used to represent the hook which gets attached to the + training process. + + ... + + Attributes + ---------- + out_dir : str + represents a path to which the outputs of tornasole will be written to. + This can be a local path or + an S3 prefix of the form s3://bucket_name/prefix + + dry_run : bool + when dry_run is set to True, behavior is only described in the log file. + The tensors are not actually saved. + + worker: str + name of worker in a multi process training job + outputs and tensors are organized by this name during retrieval. + + save_config: SaveConfig object + SaveConfig allows you to customize when tensors are saved. + Hook takes SaveConfig object which is applied as + default for all included tensors. + A collection can optionally have its own SaveConfig object + which overrides this for its tensors. + Refer to documentation for SaveConfig. + + reduction_config: ReductionConfig object + ReductionConfig allows you to save tensors as their reductions + instead of saving full tensors. + If ReductionConfig is passed then the chosen reductions are applied + as default for all tensors included. + A collection can optionally have its own ReductionConfig object + which overrides this for its tensors. + + include_regex: list of str + takes as input the list of string representing regular expressions. Tensors whose names match + these regular expressions will be saved. These tensors will be available as part of the `default` + collection. + + include_collections: list of str + takes as input the names of collections which should be saved. + by default, ['weights','gradients'] are passed to include_collections. + + save_all: bool + a shortcut for saving all tensors in the model. + tensors are all grouped into the `default` collection + """ + def __init__(self, + out_dir, + dry_run=False, + worker='worker0', + reduction_config=None, + save_config=SaveConfig(save_interval=100), + include_regex=None, + include_collections=['weights', 'gradients'], + save_all=False, + ): +``` + + +It also has an important method which can be used to set the appropriate mode. +Modes can refer to 'training', 'evaluation' or 'prediction'. They can be set as follows: +```hook.set_mode(ts.modes.TRAIN)```, ```hook.set_mode(ts.modes.EVAL)``` or ```hook.set_mode(ts.modes.PREDICT)```. +This allows you to group steps by mode which allows for clearer analysis. Tornasole +also allows you to see a global ordering of steps which makes it clear after how many training +steps did a particular evaluation step happen. If you do not set this mode, all steps are saved under +a `default` mode. + + +### Collection + +Collection object helps group tensors for easier handling of tensors being saved. +A collection has its own list of tensors, include/exclude regex patterns, reduction config and save config. +This allows setting of different save and reduction configs for different tensors. +These collections are then also available during analysis with `tornasole_rules`. + +#### Creating or accessing a collection +``` +import tornasole.tensorflow as ts +``` + +| Function | Behavior | +|---|---| +| ```ts.get_collection(collection_name)``` | Returns the collection with the given name. Creates the collection if it doesn't already exist | +| ```ts.get_collections()``` | Returns all collections as a dictionary with the keys being names of the collections | +| ```ts.add_to_collection(collection_name, args)``` | Equivalent to calling `coll.add(args)` on the collection with name `collection_name` | +| ```ts.add_to_default_collection(args)``` | Equivalent to calling `coll.add(args)` on the collection with the name `default`| +| ```ts.reset_collections()``` | Clears all collections | + +#### Methods +The following methods can be called on a collection object. + +| Method | Behavior | +|---|---| +| ```coll.include(t)``` | Takes a regex or a list of regex to match tensors to be included to the collection | +| ```coll.add(t)``` | Takes an instance or list or set of tf.Operation/tf.Variable/tf.Tensor to add to the collection | +| ```coll.get_include_regex()``` | Returns include_regex for the collection | +| ```coll.get_save_config()``` | Returns save config for the collection | +| ```coll.set_save_config(s)``` | Sets save config for the collection | +| ```coll.get_reduction_config()``` | Returns reduction config for the collection | +| ```coll.set_reduction_config()``` | Sets reduction config for the collection | + +### SaveConfig +SaveConfig class allows you to customize the frequency of saving tensors. +The hook takes a SaveConfig object which is applied as +default to all tensors included. +A collection can also have its own SaveConfig object which is applied +to the tensors belonging to that collection. + +SaveConfig also allows you to save tensors when certain tensors become nan. +This list of tensors to watch for is taken as a list of strings representing names of tensors. +``` +class SaveConfig: + """ + + Attributes + ---------- + + save_interval: int + allows you to save every n steps by passing n to save_interval + + skip_num_steps: int + allows you to avoid saving for the first n steps of the job. + it defaults to 0, i.e. don't skip any steps in the beginning. + + save_steps: list of int + save at all the steps given in this list. + if this is given, it ignores the save_interval. + + when_nan: list of str representing name of tensor + saves the tensors to which this saveConfig is attached + whenever any of the tensors in this list become nan or infinite. + This means that if your save_interval is set to 10, and 'loss' is in when_nan + your tensors will be saved whenever save_interval is multiple of 10 as well as + whenever loss becomes nan or infinite. + """ +``` + +### ReductionConfig +ReductionConfig allows the saving of certain reductions of tensors instead +of saving the full tensor. The motivation here is to reduce the amount of data +saved, and increase the speed in cases where you don't need the full +tensor. The reduction operations which are computed in the training process +and then saved. +During analysis, these are available as reductions of the original tensor. +Please note that using reduction config means that you will not have +the full tensor available during analysis, so this can restrict what you can do with the tensor saved. +The hook takes a ReductionConfig object which is applied as default to all tensors included. +A collection can also have its own ReductionConfig object which is applied +to the tensors belonging to that collection. + +``` + Attributes + ---------- + + reductions: list of str + takes list of names of reductions to be computed. + should be one of 'min', 'max', 'median', 'mean', 'std', 'variance', 'sum', 'prod' + + abs_reductions: list of str + takes list of names of reductions to be computed after converting the tensor + to abs(tensor) i.e. reductions are applied on the absolute values of tensor. + should be one of 'min', 'max', 'median', 'mean', 'std', 'variance', 'sum', 'prod' + + norms: list of str + takes names of norms to be computed of the tensor. + should be one of 'l1', 'l2' + + abs_norms: list of str + takes names of norms to be computed of the tensor after taking absolute value + should be one of 'l1', 'l2' + """ +``` diff --git a/examples/analysis/notebooks/NNRecipe/NNRecipes.ipynb b/examples/analysis/notebooks/NNRecipe/NNRecipes.ipynb new file mode 100644 index 0000000000..56b944bcf9 --- /dev/null +++ b/examples/analysis/notebooks/NNRecipe/NNRecipes.ipynb @@ -0,0 +1,377 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Recipes for Neural Networks" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "pycharm": { + "is_executing": false + } + }, + "outputs": [ + { + "name": "stdout", + "text": [ + "The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n" + ], + "output_type": "stream" + } + ], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "pycharm": { + "is_executing": false + } + }, + "outputs": [ + { + "name": "stdout", + "text": [ + "Requirement already satisfied: aioboto3 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (6.4.1)\r\n", + "Collecting boto\r\n", + " Using cached https://files.pythonhosted.org/packages/23/10/c0b78c27298029e4454a472a1919bde20cb182dab1662cec7f2ca1dcc523/boto-2.49.0-py2.py3-none-any.whl\r\n", + "Requirement already satisfied: aiobotocore[boto3]~=0.10.2 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from aioboto3) (0.10.2)\r\nRequirement already satisfied: wrapt>=1.10.10 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from aiobotocore[boto3]~=0.10.2->aioboto3) (1.11.2)\r\nRequirement already satisfied: async-generator>=1.10 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from aiobotocore[boto3]~=0.10.2->aioboto3) (1.10)\r\nRequirement already satisfied: aiohttp>=3.3.1 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from aiobotocore[boto3]~=0.10.2->aioboto3) (3.5.4)\r\nRequirement already satisfied: botocore<1.12.92,>=1.12.91 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from aiobotocore[boto3]~=0.10.2->aioboto3) (1.12.91)\r\n", + "Requirement already satisfied: boto3==1.9.91; extra == \"boto3\" in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from aiobotocore[boto3]~=0.10.2->aioboto3) (1.9.91)\r\nRequirement already satisfied: async-timeout<4.0,>=3.0 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from aiohttp>=3.3.1->aiobotocore[boto3]~=0.10.2->aioboto3) (3.0.1)\r\nRequirement already satisfied: attrs>=17.3.0 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from aiohttp>=3.3.1->aiobotocore[boto3]~=0.10.2->aioboto3) (19.1.0)\r\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from aiohttp>=3.3.1->aiobotocore[boto3]~=0.10.2->aioboto3) (1.3.0)\r\nRequirement already satisfied: multidict<5.0,>=4.0 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from aiohttp>=3.3.1->aiobotocore[boto3]~=0.10.2->aioboto3) (4.5.2)\r\nRequirement already satisfied: typing-extensions>=3.6.5; python_version < \"3.7\" in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from aiohttp>=3.3.1->aiobotocore[boto3]~=0.10.2->aioboto3) (3.7.4)\r\nRequirement already satisfied: idna-ssl>=1.0; python_version < \"3.7\" in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from aiohttp>=3.3.1->aiobotocore[boto3]~=0.10.2->aioboto3) (1.1.0)\r\nRequirement already satisfied: chardet<4.0,>=2.0 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from aiohttp>=3.3.1->aiobotocore[boto3]~=0.10.2->aioboto3) (3.0.4)\r\n", + "Requirement already satisfied: jmespath<1.0.0,>=0.7.1 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from botocore<1.12.92,>=1.12.91->aiobotocore[boto3]~=0.10.2->aioboto3) (0.9.4)\r\nRequirement already satisfied: urllib3<1.25,>=1.20; python_version >= \"3.4\" in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from botocore<1.12.92,>=1.12.91->aiobotocore[boto3]~=0.10.2->aioboto3) (1.24.3)\r\n", + "Requirement already satisfied: docutils>=0.10 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from botocore<1.12.92,>=1.12.91->aiobotocore[boto3]~=0.10.2->aioboto3) (0.14)\r\nRequirement already satisfied: python-dateutil<3.0.0,>=2.1; python_version >= \"2.7\" in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from botocore<1.12.92,>=1.12.91->aiobotocore[boto3]~=0.10.2->aioboto3) (2.8.0)\r\nRequirement already satisfied: s3transfer<0.3.0,>=0.2.0 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from boto3==1.9.91; extra == \"boto3\"->aiobotocore[boto3]~=0.10.2->aioboto3) (0.2.1)\r\nRequirement already satisfied: idna>=2.0 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from yarl<2.0,>=1.0->aiohttp>=3.3.1->aiobotocore[boto3]~=0.10.2->aioboto3) (2.8)\r\nRequirement already satisfied: six>=1.5 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from python-dateutil<3.0.0,>=2.1; python_version >= \"2.7\"->botocore<1.12.92,>=1.12.91->aiobotocore[boto3]~=0.10.2->aioboto3) (1.12.0)\r\n", + "Installing collected packages: boto\r\n", + "Successfully installed boto-2.49.0\r\n" + ], + "output_type": "stream" + } + ], + "source": [ + "!rm -rf model/\n", + "!rm -rf ts_logs/\n", + "!rm -rf ts_outputs/" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": { + "pycharm": { + "is_executing": false + } + }, + "outputs": [ + { + "name": "stdout", + "text": [ + "Collecting matplotlib\r\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/cf/a4/d5387a74204542a60ad1baa84cd2d3353c330e59be8cf2d47c0b11d3cde8/matplotlib-3.1.1-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl (14.4MB)\r\n\r\u001b[K 0% | | 10kB 906kB/s eta 0:00:16\r\u001b[K 0% | | 20kB 1.2MB/s eta 0:00:13\r\u001b[K 0% | | 30kB 1.1MB/s eta 0:00:13", + "\r\u001b[K 0% | | 40kB 1.5MB/s eta 0:00:10\r\u001b[K 0% |▏ | 51kB 1.5MB/s eta 0:00:10\r\u001b[K 0% |▏ | 61kB 1.6MB/s eta 0:00:10\r\u001b[K 0% |▏ | 71kB 1.8MB/s eta 0:00:08\r\u001b[K 0% |▏ | 81kB 1.8MB/s eta 0:00:08\r\u001b[K 0% |▏ | 92kB 1.8MB/s eta 0:00:08\r\u001b[K 0% |▎ | 102kB 2.0MB/s eta 0:00:08\r\u001b[K 0% |▎ | 112kB 2.4MB/s eta 0:00:06\r\u001b[K 0% |▎ | 122kB 2.8MB/s eta 0:00:06\r\u001b[K 0% |▎ | 133kB 3.3MB/s eta 0:00:05\r\u001b[K 0% |▎ | 143kB 2.8MB/s eta 0:00:06\r\u001b[K 1% |▍ | 153kB 3.1MB/s eta 0:00:05\r\u001b[K 1% |▍ | 163kB 3.3MB/s eta 0:00:05\r\u001b[K 1% |▍ | 174kB 2.7MB/s eta 0:00:06\r\u001b[K 1% |▍ | 184kB 3.2MB/s eta 0:00:05", + "\r\u001b[K 1% |▍ | 194kB 2.9MB/s eta 0:00:05\r\u001b[K 1% |▌ | 204kB 2.9MB/s eta 0:00:05\r\u001b[K 1% |▌ | 215kB 2.1MB/s eta 0:00:07\r\u001b[K 1% |▌ | 225kB 2.0MB/s eta 0:00:08\r\u001b[K 1% |▌ | 235kB 2.2MB/s eta 0:00:07\r\u001b[K 1% |▌ | 245kB 2.3MB/s eta 0:00:07\r\u001b[K 1% |▋ | 256kB 2.2MB/s eta 0:00:07\r\u001b[K 1% |▋ | 266kB 2.3MB/s eta 0:00:07\r\u001b[K 1% |▋ | 276kB 2.4MB/s eta 0:00:06\r\u001b[K 1% |▋ | 286kB 2.4MB/s eta 0:00:06\r\u001b[K 2% |▋ | 296kB 2.8MB/s eta 0:00:06\r\u001b[K 2% |▊ | 307kB 2.6MB/s eta 0:00:06\r\u001b[K 2% |▊ | 317kB 4.1MB/s eta 0:00:04\r\u001b[K 2% |▊ | 327kB 4.2MB/s eta 0:00:04\r\u001b[K 2% |▊ | 337kB 3.9MB/s eta 0:00:04\r\u001b[K 2% |▊ | 348kB 4.4MB/s eta 0:00:04\r\u001b[K 2% |▉ | 358kB 4.9MB/s eta 0:00:03\r\u001b[K 2% |▉ | 368kB 5.4MB/s eta 0:00:03\r\u001b[K 2% |▉ | 378kB 7.0MB/s eta 0:00:02", + "\r\u001b[K 2% |▉ | 389kB 6.2MB/s eta 0:00:03\r\u001b[K 2% |▉ | 399kB 7.1MB/s eta 0:00:02\r\u001b[K 2% |█ | 409kB 7.0MB/s eta 0:00:03\r\u001b[K 2% |█ | 419kB 4.1MB/s eta 0:00:04\r\u001b[K 2% |█ | 430kB 4.3MB/s eta 0:00:04\r\u001b[K 3% |█ | 440kB 4.5MB/s eta 0:00:04\r\u001b[K 3% |█ | 450kB 4.5MB/s eta 0:00:04\r\u001b[K 3% |█ | 460kB 4.0MB/s eta 0:00:04\r\u001b[K 3% |█ | 471kB 3.0MB/s eta 0:00:05\r\u001b[K 3% |█ | 481kB 3.0MB/s eta 0:00:05\r\u001b[K 3% |█ | 491kB 2.7MB/s eta 0:00:06\r\u001b[K 3% |█▏ | 501kB 2.5MB/s eta 0:00:06\r\u001b[K 3% |█▏ | 512kB 2.6MB/s eta 0:00:06\r\u001b[K 3% |█▏ | 522kB 3.2MB/s eta 0:00:05\r\u001b[K 3% |█▏ | 532kB 3.2MB/s eta 0:00:05\r\u001b[K 3% |█▏ | 542kB 3.2MB/s eta 0:00:05", + "\r\u001b[K 3% |█▎ | 552kB 2.8MB/s eta 0:00:05\r\u001b[K 3% |█▎ | 563kB 3.1MB/s eta 0:00:05\r\u001b[K 3% |█▎ | 573kB 3.8MB/s eta 0:00:04\r\u001b[K 4% |█▎ | 583kB 3.6MB/s eta 0:00:04\r\u001b[K 4% |█▎ | 593kB 4.3MB/s eta 0:00:04\r\u001b[K 4% |█▍ | 604kB 4.9MB/s eta 0:00:03\r\u001b[K 4% |█▍ | 614kB 4.8MB/s eta 0:00:03\r\u001b[K 4% |█▍ | 624kB 4.8MB/s eta 0:00:03\r\u001b[K 4% |█▍ | 634kB 4.6MB/s eta 0:00:03\r\u001b[K 4% |█▍ | 645kB 4.9MB/s eta 0:00:03\r\u001b[K 4% |█▌ | 655kB 5.9MB/s eta 0:00:03\r\u001b[K 4% |█▌ | 665kB 4.8MB/s eta 0:00:03\r\u001b[K 4% |█▌ | 675kB 5.1MB/s eta 0:00:03\r\u001b[K 4% |█▌ | 686kB 5.7MB/s eta 0:00:03\r\u001b[K 4% |█▌ | 696kB 5.8MB/s eta 0:00:03\r\u001b[K 4% |█▋ | 706kB 5.7MB/s eta 0:00:03\r\u001b[K 4% |█▋ | 716kB 6.1MB/s eta 0:00:03\r\u001b[K 5% |█▋ | 727kB 7.3MB/s eta 0:00:02\r\u001b[K 5% |█▋ | 737kB 7.7MB/s eta 0:00:02\r\u001b[K 5% |█▋ | 747kB 7.3MB/s eta 0:00:02\r\u001b[K 5% |█▊ | 757kB 7.8MB/s eta 0:00:02\r\u001b[K 5% |█▊ | 768kB 10.5MB/s eta 0:00:02\r\u001b[K 5% |█▊ | 778kB 10.9MB/s eta 0:00:02\r\u001b[K 5% |█▊ | 788kB 9.6MB/s eta 0:00:02\r\u001b[K 5% |█▊ | 798kB 9.0MB/s eta 0:00:02\r\u001b[K 5% |█▉ | 808kB 10.2MB/s eta 0:00:02\r\u001b[K 5% |█▉ | 819kB 9.8MB/s eta 0:00:02\r\u001b[K 5% |█▉ | 829kB 9.1MB/s eta 0:00:02\r\u001b[K 5% |█▉ | 839kB 9.5MB/s eta 0:00:02\r\u001b[K 5% |██ | 849kB 8.6MB/s eta 0:00:02\r\u001b[K 5% |██ | 860kB 8.1MB/s eta 0:00:02\r\u001b[K 6% |██ | 870kB 6.9MB/s eta 0:00:02", + "\r\u001b[K 6% |██ | 880kB 6.2MB/s eta 0:00:03\r\u001b[K 6% |██ | 890kB 6.9MB/s eta 0:00:02\r\u001b[K 6% |██ | 901kB 6.4MB/s eta 0:00:03\r\u001b[K 6% |██ | 911kB 4.6MB/s eta 0:00:03\r\u001b[K 6% |██ | 921kB 4.7MB/s eta 0:00:03\r\u001b[K 6% |██ | 931kB 4.4MB/s eta 0:00:04\r\u001b[K 6% |██ | 942kB 4.3MB/s eta 0:00:04\r\u001b[K 6% |██▏ | 952kB 4.2MB/s eta 0:00:04\r\u001b[K 6% |██▏ | 962kB 4.1MB/s eta 0:00:04\r\u001b[K 6% |██▏ | 972kB 4.5MB/s eta 0:00:03\r\u001b[K 6% |██▏ | 983kB 4.6MB/s eta 0:00:03\r\u001b[K 6% |██▏ | 993kB 4.6MB/s eta 0:00:03\r\u001b[K 6% |██▎ | 1.0MB 5.0MB/s eta 0:00:03\r\u001b[K 7% |██▎ | 1.0MB 6.8MB/s eta 0:00:02\r\u001b[K 7% |██▎ | 1.0MB 6.8MB/s eta 0:00:02\r\u001b[K 7% |██▎ | 1.0MB 6.0MB/s eta 0:00:03\r\u001b[K 7% |██▎ | 1.0MB 5.6MB/s eta 0:00:03\r\u001b[K 7% |██▍ | 1.1MB 6.6MB/s eta 0:00:03\r\u001b[K 7% |██▍ | 1.1MB 6.0MB/s eta 0:00:03\r\u001b[K 7% |██▍ | 1.1MB 5.2MB/s eta 0:00:03\r\u001b[K 7% |██▍ | 1.1MB 5.6MB/s eta 0:00:03\r\u001b[K 7% |██▍ | 1.1MB 5.1MB/s eta 0:00:03\r\u001b[K 7% |██▌ | 1.1MB 4.9MB/s eta 0:00:03\r\u001b[K 7% |██▌ | 1.1MB 4.4MB/s eta 0:00:04", + "\r\u001b[K 7% |██▌ | 1.1MB 4.2MB/s eta 0:00:04\r\u001b[K 7% |██▌ | 1.1MB 4.3MB/s eta 0:00:04\r\u001b[K 7% |██▌ | 1.1MB 3.5MB/s eta 0:00:04\r\u001b[K 8% |██▋ | 1.2MB 3.2MB/s eta 0:00:05\r\u001b[K 8% |██▋ | 1.2MB 3.5MB/s eta 0:00:04\r\u001b[K 8% |██▋ | 1.2MB 3.6MB/s eta 0:00:04\r\u001b[K 8% |██▋ | 1.2MB 3.6MB/s eta 0:00:04\r\u001b[K 8% |██▋ | 1.2MB 3.8MB/s eta 0:00:04\r\u001b[K 8% |██▊ | 1.2MB 3.7MB/s eta 0:00:04\r\u001b[K 8% |██▊ | 1.2MB 4.1MB/s eta 0:00:04\r\u001b[K 8% |██▊ | 1.2MB 4.4MB/s eta 0:00:04\r\u001b[K 8% |██▊ | 1.2MB 4.8MB/s eta 0:00:03\r\u001b[K 8% |██▉ | 1.2MB 7.1MB/s eta 0:00:02\r\u001b[K 8% |██▉ | 1.3MB 7.8MB/s eta 0:00:02\r\u001b[K 8% |██▉ | 1.3MB 7.8MB/s eta 0:00:02\r\u001b[K 8% |██▉ | 1.3MB 8.3MB/s eta 0:00:02\r\u001b[K 8% |██▉ | 1.3MB 7.0MB/s eta 0:00:02\r\u001b[K 9% |███ | 1.3MB 7.2MB/s eta 0:00:02\r\u001b[K 9% |███ | 1.3MB 7.5MB/s eta 0:00:02\r\u001b[K 9% |███ | 1.3MB 6.2MB/s eta 0:00:03\r\u001b[K 9% |███ | 1.3MB 6.3MB/s eta 0:00:03\r\u001b[K 9% |███ | 1.3MB 6.3MB/s eta 0:00:03\r\u001b[K 9% |███ | 1.4MB 5.9MB/s eta 0:00:03\r\u001b[K 9% |███ | 1.4MB 5.4MB/s eta 0:00:03\r\u001b[K 9% |███ | 1.4MB 4.6MB/s eta 0:00:03\r\u001b[K 9% |███ | 1.4MB 4.8MB/s eta 0:00:03", + "\r\u001b[K 9% |███ | 1.4MB 5.3MB/s eta 0:00:03\r\u001b[K 9% |███▏ | 1.4MB 4.1MB/s eta 0:00:04\r\u001b[K 9% |███▏ | 1.4MB 4.1MB/s eta 0:00:04\r\u001b[K 9% |███▏ | 1.4MB 4.4MB/s eta 0:00:03\r\u001b[K 9% |███▏ | 1.4MB 4.5MB/s eta 0:00:03\r\u001b[K 10% |███▏ | 1.4MB 4.6MB/s eta 0:00:03\r\u001b[K 10% |███▎ | 1.5MB 4.7MB/s eta 0:00:03\r\u001b[K 10% |███▎ | 1.5MB 5.4MB/s eta 0:00:03\r\u001b[K 10% |███▎ | 1.5MB 6.2MB/s eta 0:00:03\r\u001b[K 10% |███▎ | 1.5MB 6.1MB/s eta 0:00:03\r\u001b[K 10% |███▎ | 1.5MB 6.4MB/s eta 0:00:03\r\u001b[K 10% |███▍ | 1.5MB 9.8MB/s eta 0:00:02\r\u001b[K 10% |███▍ | 1.5MB 10.9MB/s eta 0:00:02\r\u001b[K 10% |███▍ | 1.5MB 9.9MB/s eta 0:00:02\r\u001b[K 10% |███▍ | 1.5MB 8.8MB/s eta 0:00:02\r\u001b[K 10% |███▍ | 1.5MB 9.5MB/s eta 0:00:02\r\u001b[K 10% |███▌ | 1.6MB 9.7MB/s eta 0:00:02\r\u001b[K 10% |███▌ | 1.6MB 8.9MB/s eta 0:00:02\r\u001b[K 10% |███▌ | 1.6MB 9.2MB/s eta 0:00:02\r\u001b[K 11% |███▌ | 1.6MB 9.0MB/s eta 0:00:02\r\u001b[K 11% |███▌ | 1.6MB 8.8MB/s eta 0:00:02\r\u001b[K 11% |███▋ | 1.6MB 7.5MB/s eta 0:00:02\r\u001b[K 11% |███▋ | 1.6MB 6.0MB/s eta 0:00:03\r\u001b[K 11% |███▋ | 1.6MB 6.9MB/s eta 0:00:02\r\u001b[K 11% |███▋ | 1.6MB 6.0MB/s eta 0:00:03\r\u001b[K 11% |███▊ | 1.6MB 5.1MB/s eta 0:00:03\r\u001b[K 11% |███▊ | 1.7MB 5.2MB/s eta 0:00:03\r\u001b[K 11% |███▊ | 1.7MB 4.8MB/s eta 0:00:03\r\u001b[K 11% |███▊ | 1.7MB 4.9MB/s eta 0:00:03\r\u001b[K 11% |███▊ | 1.7MB 4.6MB/s eta 0:00:03", + "\r\u001b[K 11% |███▉ | 1.7MB 4.0MB/s eta 0:00:04\r\u001b[K 11% |███▉ | 1.7MB 4.5MB/s eta 0:00:03\r\u001b[K 11% |███▉ | 1.7MB 4.9MB/s eta 0:00:03\r\u001b[K 12% |███▉ | 1.7MB 4.4MB/s eta 0:00:03\r\u001b[K 12% |███▉ | 1.7MB 5.2MB/s eta 0:00:03\r\u001b[K 12% |████ | 1.8MB 5.6MB/s eta 0:00:03\r\u001b[K 12% |████ | 1.8MB 5.5MB/s eta 0:00:03\r\u001b[K 12% |████ | 1.8MB 5.6MB/s eta 0:00:03\r\u001b[K 12% |████ | 1.8MB 5.0MB/s eta 0:00:03\r\u001b[K 12% |████ | 1.8MB 5.7MB/s eta 0:00:03\r\u001b[K 12% |████ | 1.8MB 6.2MB/s eta 0:00:03\r\u001b[K 12% |████ | 1.8MB 5.3MB/s eta 0:00:03\r\u001b[K 12% |████ | 1.8MB 5.4MB/s eta 0:00:03\r\u001b[K 12% |████ | 1.8MB 5.5MB/s eta 0:00:03\r\u001b[K 12% |████ | 1.8MB 5.5MB/s eta 0:00:03\r\u001b[K 12% |████▏ | 1.9MB 6.0MB/s eta 0:00:03\r\u001b[K 12% |████▏ | 1.9MB 5.6MB/s eta 0:00:03\r\u001b[K 13% |████▏ | 1.9MB 6.5MB/s eta 0:00:02\r\u001b[K 13% |████▏ | 1.9MB 6.1MB/s eta 0:00:03\r\u001b[K 13% |████▏ | 1.9MB 5.3MB/s eta 0:00:03\r\u001b[K 13% |████▎ | 1.9MB 5.9MB/s eta 0:00:03\r\u001b[K 13% |████▎ | 1.9MB 5.9MB/s eta 0:00:03\r\u001b[K 13% |████▎ | 1.9MB 6.6MB/s eta 0:00:02\r\u001b[K 13% |████▎ | 1.9MB 6.6MB/s eta 0:00:02\r\u001b[K 13% |████▎ | 1.9MB 5.6MB/s eta 0:00:03\r\u001b[K 13% |████▍ | 2.0MB 5.7MB/s eta 0:00:03\r\u001b[K 13% |████▍ | 2.0MB 5.6MB/s eta 0:00:03\r\u001b[K 13% |████▍ | 2.0MB 4.8MB/s eta 0:00:03\r\u001b[K 13% |████▍ | 2.0MB 5.6MB/s eta 0:00:03", + "\r\u001b[K 13% |████▍ | 2.0MB 5.7MB/s eta 0:00:03\r\u001b[K 13% |████▌ | 2.0MB 5.8MB/s eta 0:00:03\r\u001b[K 14% |████▌ | 2.0MB 5.3MB/s eta 0:00:03\r\u001b[K 14% |████▌ | 2.0MB 4.9MB/s eta 0:00:03\r\u001b[K 14% |████▌ | 2.0MB 5.5MB/s eta 0:00:03\r\u001b[K 14% |████▋ | 2.0MB 5.6MB/s eta 0:00:03\r\u001b[K 14% |████▋ | 2.1MB 4.9MB/s eta 0:00:03\r\u001b[K 14% |████▋ | 2.1MB 5.3MB/s eta 0:00:03\r\u001b[K 14% |████▋ | 2.1MB 5.3MB/s eta 0:00:03\r\u001b[K 14% |████▋ | 2.1MB 5.3MB/s eta 0:00:03\r\u001b[K 14% |████▊ | 2.1MB 4.6MB/s eta 0:00:03\r\u001b[K 14% |████▊ | 2.1MB 4.0MB/s eta 0:00:04\r\u001b[K 14% |████▊ | 2.1MB 4.7MB/s eta 0:00:03\r\u001b[K 14% |████▊ | 2.1MB 4.9MB/s eta 0:00:03\r\u001b[K 14% |████▊ | 2.1MB 4.6MB/s eta 0:00:03\r\u001b[K 14% |████▉ | 2.2MB 5.0MB/s eta 0:00:03\r\u001b[K 15% |████▉ | 2.2MB 5.3MB/s eta 0:00:03\r\u001b[K 15% |████▉ | 2.2MB 5.3MB/s eta 0:00:03\r\u001b[K 15% |████▉ | 2.2MB 5.6MB/s eta 0:00:03\r\u001b[K 15% |████▉ | 2.2MB 4.0MB/s eta 0:00:04\r\u001b[K 15% |█████ | 2.2MB 4.8MB/s eta 0:00:03\r\u001b[K 15% |█████ | 2.2MB 5.3MB/s eta 0:00:03\r\u001b[K 15% |█████ | 2.2MB 5.0MB/s eta 0:00:03\r\u001b[K 15% |█████ | 2.2MB 5.2MB/s eta 0:00:03", + "\r\u001b[K 15% |█████ | 2.2MB 4.8MB/s eta 0:00:03\r\u001b[K 15% |█████ | 2.3MB 4.9MB/s eta 0:00:03\r\u001b[K 15% |█████ | 2.3MB 4.3MB/s eta 0:00:03\r\u001b[K 15% |█████ | 2.3MB 4.1MB/s eta 0:00:03\r\u001b[K 15% |█████ | 2.3MB 4.4MB/s eta 0:00:03\r\u001b[K 15% |█████ | 2.3MB 5.7MB/s eta 0:00:03\r\u001b[K 16% |█████▏ | 2.3MB 5.4MB/s eta 0:00:03\r\u001b[K 16% |█████▏ | 2.3MB 5.7MB/s eta 0:00:03\r\u001b[K 16% |█████▏ | 2.3MB 5.3MB/s eta 0:00:03\r\u001b[K 16% |█████▏ | 2.3MB 5.4MB/s eta 0:00:03\r\u001b[K 16% |█████▏ | 2.3MB 6.0MB/s eta 0:00:03\r\u001b[K 16% |█████▎ | 2.4MB 5.8MB/s eta 0:00:03\r\u001b[K 16% |█████▎ | 2.4MB 7.9MB/s eta 0:00:02\r\u001b[K 16% |█████▎ | 2.4MB 8.0MB/s eta 0:00:02\r\u001b[K 16% |█████▎ | 2.4MB 6.9MB/s eta 0:00:02\r\u001b[K 16% |█████▎ | 2.4MB 8.3MB/s eta 0:00:02\r\u001b[K 16% |█████▍ | 2.4MB 7.7MB/s eta 0:00:02\r\u001b[K 16% |█████▍ | 2.4MB 6.3MB/s eta 0:00:02\r\u001b[K 16% |█████▍ | 2.4MB 6.0MB/s eta 0:00:02\r\u001b[K 16% |█████▍ | 2.4MB 5.7MB/s eta 0:00:03\r\u001b[K 17% |█████▌ | 2.4MB 6.0MB/s eta 0:00:03\r\u001b[K 17% |█████▌ | 2.5MB 6.1MB/s eta 0:00:02\r\u001b[K 17% |█████▌ | 2.5MB 5.7MB/s eta 0:00:03\r\u001b[K 17% |█████▌ | 2.5MB 6.1MB/s eta 0:00:02\r\u001b[K 17% |█████▌ | 2.5MB 6.0MB/s eta 0:00:02\r\u001b[K 17% |█████▋ | 2.5MB 6.0MB/s eta 0:00:02\r\u001b[K 17% |█████▋ | 2.5MB 6.3MB/s eta 0:00:02\r\u001b[K 17% |█████▋ | 2.5MB 6.7MB/s eta 0:00:02\r\u001b[K 17% |█████▋ | 2.5MB 8.9MB/s eta 0:00:02\r\u001b[K 17% |█████▋ | 2.5MB 7.8MB/s eta 0:00:02\r\u001b[K 17% |█████▊ | 2.5MB 7.0MB/s eta 0:00:02\r\u001b[K 17% |█████▊ | 2.6MB 7.1MB/s eta 0:00:02", + "\r\u001b[K 17% |█████▊ | 2.6MB 6.5MB/s eta 0:00:02\r\u001b[K 17% |█████▊ | 2.6MB 6.6MB/s eta 0:00:02\r\u001b[K 18% |█████▊ | 2.6MB 6.3MB/s eta 0:00:02\r\u001b[K 18% |█████▉ | 2.6MB 5.4MB/s eta 0:00:03\r\u001b[K 18% |█████▉ | 2.6MB 5.7MB/s eta 0:00:03\r\u001b[K 18% |█████▉ | 2.6MB 6.0MB/s eta 0:00:02\r\u001b[K 18% |█████▉ | 2.6MB 5.1MB/s eta 0:00:03\r\u001b[K 18% |█████▉ | 2.6MB 5.9MB/s eta 0:00:02\r\u001b[K 18% |██████ | 2.7MB 5.5MB/s eta 0:00:03\r\u001b[K 18% |██████ | 2.7MB 5.5MB/s eta 0:00:03\r\u001b[K 18% |██████ | 2.7MB 5.5MB/s eta 0:00:03\r\u001b[K 18% |██████ | 2.7MB 4.6MB/s eta 0:00:03\r\u001b[K 18% |██████ | 2.7MB 5.1MB/s eta 0:00:03\r\u001b[K 18% |██████ | 2.7MB 5.2MB/s eta 0:00:03\r\u001b[K 18% |██████ | 2.7MB 4.6MB/s eta 0:00:03\r\u001b[K 18% |██████ | 2.7MB 4.9MB/s eta 0:00:03\r\u001b[K 19% |██████ | 2.7MB 5.1MB/s eta 0:00:03\r\u001b[K 19% |██████ | 2.7MB 5.2MB/s eta 0:00:03\r\u001b[K 19% |██████▏ | 2.8MB 5.1MB/s eta 0:00:03\r\u001b[K 19% |██████▏ | 2.8MB 4.6MB/s eta 0:00:03\r\u001b[K 19% |██████▏ | 2.8MB 5.0MB/s eta 0:00:03\r\u001b[K 19% |██████▏ | 2.8MB 5.5MB/s eta 0:00:03\r\u001b[K 19% |██████▏ | 2.8MB 4.8MB/s eta 0:00:03\r\u001b[K 19% |██████▎ | 2.8MB 5.5MB/s eta 0:00:03\r\u001b[K 19% |██████▎ | 2.8MB 5.5MB/s eta 0:00:03\r\u001b[K 19% |██████▎ | 2.8MB 5.5MB/s eta 0:00:03", + "\r\u001b[K 19% |██████▎ | 2.8MB 5.5MB/s eta 0:00:03\r\u001b[K 19% |██████▍ | 2.8MB 4.8MB/s eta 0:00:03\r\u001b[K 19% |██████▍ | 2.9MB 5.6MB/s eta 0:00:03\r\u001b[K 19% |██████▍ | 2.9MB 5.6MB/s eta 0:00:03\r\u001b[K 20% |██████▍ | 2.9MB 4.9MB/s eta 0:00:03\r\u001b[K 20% |██████▍ | 2.9MB 5.4MB/s eta 0:00:03\r\u001b[K 20% |██████▌ | 2.9MB 5.6MB/s eta 0:00:03\r\u001b[K 20% |██████▌ | 2.9MB 5.6MB/s eta 0:00:03\r\u001b[K 20% |██████▌ | 2.9MB 6.0MB/s eta 0:00:02\r\u001b[K 20% |██████▌ | 2.9MB 5.0MB/s eta 0:00:03\r\u001b[K 20% |██████▌ | 2.9MB 5.5MB/s eta 0:00:03\r\u001b[K 20% |██████▋ | 2.9MB 5.9MB/s eta 0:00:02\r\u001b[K 20% |██████▋ | 3.0MB 5.0MB/s eta 0:00:03\r\u001b[K 20% |██████▋ | 3.0MB 5.4MB/s eta 0:00:03\r\u001b[K 20% |██████▋ | 3.0MB 5.6MB/s eta 0:00:03\r\u001b[K 20% |██████▋ | 3.0MB 5.5MB/s eta 0:00:03\r\u001b[K 20% |██████▊ | 3.0MB 6.1MB/s eta 0:00:02\r\u001b[K 20% |██████▊ | 3.0MB 5.4MB/s eta 0:00:03\r\u001b[K 21% |██████▊ | 3.0MB 5.8MB/s eta 0:00:02\r\u001b[K 21% |██████▊ | 3.0MB 5.6MB/s eta 0:00:03\r\u001b[K 21% |██████▊ | 3.0MB 5.2MB/s eta 0:00:03\r\u001b[K 21% |██████▉ | 3.1MB 5.6MB/s eta 0:00:03\r\u001b[K 21% |██████▉ | 3.1MB 6.1MB/s eta 0:00:02\r\u001b[K 21% |██████▉ | 3.1MB 6.2MB/s eta 0:00:02\r\u001b[K 21% |██████▉ | 3.1MB 5.9MB/s eta 0:00:02\r\u001b[K 21% |██████▉ | 3.1MB 5.4MB/s eta 0:00:03\r\u001b[K 21% |███████ | 3.1MB 5.6MB/s eta 0:00:03\r\u001b[K 21% |███████ | 3.1MB 5.3MB/s eta 0:00:03", + "\r\u001b[K 21% |███████ | 3.1MB 4.2MB/s eta 0:00:03\r\u001b[K 21% |███████ | 3.1MB 5.0MB/s eta 0:00:03\r\u001b[K 21% |███████ | 3.1MB 4.9MB/s eta 0:00:03\r\u001b[K 21% |███████ | 3.2MB 4.9MB/s eta 0:00:03\r\u001b[K 22% |███████ | 3.2MB 4.6MB/s eta 0:00:03\r\u001b[K 22% |███████ | 3.2MB 3.7MB/s eta 0:00:04\r\u001b[K 22% |███████ | 3.2MB 4.1MB/s eta 0:00:03\r\u001b[K 22% |███████ | 3.2MB 3.8MB/s eta 0:00:03\r\u001b[K 22% |███████▏ | 3.2MB 3.1MB/s eta 0:00:04\r\u001b[K 22% |███████▏ | 3.2MB 3.5MB/s eta 0:00:04\r\u001b[K 22% |███████▏ | 3.2MB 4.1MB/s eta 0:00:03\r\u001b[K 22% |███████▏ | 3.2MB 4.1MB/s eta 0:00:03\r\u001b[K 22% |███████▎ | 3.2MB 4.0MB/s eta 0:00:03\r\u001b[K 22% |███████▎ | 3.3MB 3.4MB/s eta 0:00:04\r\u001b[K 22% |███████▎ | 3.3MB 3.9MB/s eta 0:00:03\r\u001b[K 22% |███████▎ | 3.3MB 4.3MB/s eta 0:00:03\r\u001b[K 22% |███████▎ | 3.3MB 3.7MB/s eta 0:00:03\r\u001b[K 22% |███████▍ | 3.3MB 4.2MB/s eta 0:00:03", + "\r\u001b[K 23% |███████▍ | 3.3MB 3.3MB/s eta 0:00:04\r\u001b[K 23% |███████▍ | 3.3MB 3.2MB/s eta 0:00:04\r\u001b[K 23% |███████▍ | 3.3MB 3.2MB/s eta 0:00:04\r\u001b[K 23% |███████▍ | 3.3MB 2.5MB/s eta 0:00:05\r\u001b[K 23% |███████▌ | 3.3MB 2.7MB/s eta 0:00:05\r\u001b[K 23% |███████▌ | 3.4MB 2.5MB/s eta 0:00:05\r\u001b[K 23% |███████▌ | 3.4MB 2.3MB/s eta 0:00:05\r\u001b[K 23% |███████▌ | 3.4MB 2.4MB/s eta 0:00:05\r\u001b[K 23% |███████▌ | 3.4MB 2.5MB/s eta 0:00:05\r\u001b[K 23% |███████▋ | 3.4MB 2.5MB/s eta 0:00:05\r\u001b[K 23% |███████▋ | 3.4MB 3.2MB/s eta 0:00:04\r\u001b[K 23% |███████▋ | 3.4MB 3.1MB/s eta 0:00:04\r\u001b[K 23% |███████▋ | 3.4MB 3.2MB/s eta 0:00:04\r\u001b[K 23% |███████▋ | 3.4MB 4.2MB/s eta 0:00:03\r\u001b[K 24% |███████▊ | 3.5MB 4.0MB/s eta 0:00:03\r\u001b[K 24% |███████▊ | 3.5MB 5.5MB/s eta 0:00:02\r\u001b[K 24% |███████▊ | 3.5MB 6.7MB/s eta 0:00:02\r\u001b[K 24% |███████▊ | 3.5MB 7.2MB/s eta 0:00:02\r\u001b[K 24% |███████▊ | 3.5MB 8.7MB/s eta 0:00:02\r\u001b[K 24% |███████▉ | 3.5MB 8.3MB/s eta 0:00:02\r\u001b[K 24% |███████▉ | 3.5MB 10.7MB/s eta 0:00:02\r\u001b[K 24% |███████▉ | 3.5MB 12.6MB/s eta 0:00:01\r\u001b[K 24% |███████▉ | 3.5MB 11.9MB/s eta 0:00:01\r\u001b[K 24% |███████▉ | 3.5MB 13.3MB/s eta 0:00:01\r\u001b[K 24% |████████ | 3.6MB 14.6MB/s eta 0:00:01\r\u001b[K 24% |████████ | 3.6MB 14.8MB/s eta 0:00:01", + "\r\u001b[K 24% |████████ | 3.6MB 14.8MB/s eta 0:00:01\r\u001b[K 24% |████████ | 3.6MB 12.6MB/s eta 0:00:01\r\u001b[K 24% |████████ | 3.6MB 13.7MB/s eta 0:00:01\r\u001b[K 25% |████████ | 3.6MB 14.0MB/s eta 0:00:01\r\u001b[K 25% |████████ | 3.6MB 11.3MB/s eta 0:00:01\r\u001b[K 25% |████████ | 3.6MB 12.0MB/s eta 0:00:01\r\u001b[K 25% |████████ | 3.6MB 11.3MB/s eta 0:00:01\r\u001b[K 25% |████████▏ | 3.6MB 11.2MB/s eta 0:00:01\r\u001b[K 25% |████████▏ | 3.7MB 11.2MB/s eta 0:00:01\r\u001b[K 25% |████████▏ | 3.7MB 9.9MB/s eta 0:00:02\r\u001b[K 25% |████████▏ | 3.7MB 10.6MB/s eta 0:00:02\r\u001b[K 25% |████████▏ | 3.7MB 11.6MB/s eta 0:00:01\r\u001b[K 25% |████████▎ | 3.7MB 10.8MB/s eta 0:00:01\r\u001b[K 25% |████████▎ | 3.7MB 11.5MB/s eta 0:00:01\r\u001b[K 25% |████████▎ | 3.7MB 13.1MB/s eta 0:00:01\r\u001b[K 25% |████████▎ | 3.7MB 13.3MB/s eta 0:00:01\r\u001b[K 25% |████████▎ | 3.7MB 14.8MB/s eta 0:00:01\r\u001b[K 26% |████████▍ | 3.7MB 10.8MB/s eta 0:00:01\r\u001b[K 26% |████████▍ | 3.8MB 11.3MB/s eta 0:00:01\r\u001b[K 26% |████████▍ | 3.8MB 10.6MB/s eta 0:00:02\r\u001b[K 26% |████████▍ | 3.8MB 8.1MB/s eta 0:00:02\r\u001b[K 26% |████████▍ | 3.8MB 8.0MB/s eta 0:00:02\r\u001b[K 26% |████████▌ | 3.8MB 7.3MB/s eta 0:00:02\r\u001b[K 26% |████████▌ | 3.8MB 7.3MB/s eta 0:00:02\r\u001b[K 26% |████████▌ | 3.8MB 6.7MB/s eta 0:00:02\r\u001b[K 26% |████████▌ | 3.8MB 5.9MB/s eta 0:00:02\r\u001b[K 26% |████████▌ | 3.8MB 6.0MB/s eta 0:00:02\r\u001b[K 26% |████████▋ | 3.9MB 5.7MB/s eta 0:00:02\r\u001b[K 26% |████████▋ | 3.9MB 4.9MB/s eta 0:00:03\r\u001b[K 26% |████████▋ | 3.9MB 5.3MB/s eta 0:00:02\r\u001b[K 26% |████████▋ | 3.9MB 5.7MB/s eta 0:00:02\r\u001b[K 27% |████████▋ | 3.9MB 5.8MB/s eta 0:00:02\r\u001b[K 27% |████████▊ | 3.9MB 5.9MB/s eta 0:00:02\r\u001b[K 27% |████████▊ | 3.9MB 5.1MB/s eta 0:00:03\r\u001b[K 27% |████████▊ | 3.9MB 5.6MB/s eta 0:00:02\r\u001b[K 27% |████████▊ | 3.9MB 5.8MB/s eta 0:00:02", + "\r\u001b[K 27% |████████▊ | 3.9MB 5.0MB/s eta 0:00:03\r\u001b[K 27% |████████▉ | 4.0MB 6.1MB/s eta 0:00:02\r\u001b[K 27% |████████▉ | 4.0MB 6.2MB/s eta 0:00:02\r\u001b[K 27% |████████▉ | 4.0MB 6.2MB/s eta 0:00:02\r\u001b[K 27% |████████▉ | 4.0MB 6.2MB/s eta 0:00:02\r\u001b[K 27% |████████▉ | 4.0MB 5.6MB/s eta 0:00:02\r\u001b[K 27% |█████████ | 4.0MB 6.2MB/s eta 0:00:02\r\u001b[K 27% |█████████ | 4.0MB 6.3MB/s eta 0:00:02\r\u001b[K 27% |█████████ | 4.0MB 5.5MB/s eta 0:00:02\r\u001b[K 28% |█████████ | 4.0MB 6.1MB/s eta 0:00:02\r\u001b[K 28% |█████████ | 4.0MB 6.2MB/s eta 0:00:02\r\u001b[K 28% |█████████ | 4.1MB 6.1MB/s eta 0:00:02\r\u001b[K 28% |█████████ | 4.1MB 6.2MB/s eta 0:00:02\r\u001b[K 28% |█████████ | 4.1MB 5.4MB/s eta 0:00:02\r\u001b[K 28% |█████████ | 4.1MB 5.7MB/s eta 0:00:02\r\u001b[K 28% |█████████▏ | 4.1MB 5.8MB/s eta 0:00:02\r\u001b[K 28% |█████████▏ | 4.1MB 5.3MB/s eta 0:00:02\r\u001b[K 28% |█████████▏ | 4.1MB 6.1MB/s eta 0:00:02\r\u001b[K 28% |█████████▏ | 4.1MB 5.5MB/s eta 0:00:02\r\u001b[K 28% |█████████▏ | 4.1MB 5.5MB/s eta 0:00:02\r\u001b[K 28% |█████████▎ | 4.1MB 5.6MB/s eta 0:00:02\r\u001b[K 28% |█████████▎ | 4.2MB 4.9MB/s eta 0:00:03\r\u001b[K 28% |█████████▎ | 4.2MB 5.4MB/s eta 0:00:02\r\u001b[K 29% |█████████▎ | 4.2MB 5.7MB/s eta 0:00:02\r\u001b[K 29% |█████████▎ | 4.2MB 5.0MB/s eta 0:00:03\r\u001b[K 29% |█████████▍ | 4.2MB 5.4MB/s eta 0:00:02\r\u001b[K 29% |█████████▍ | 4.2MB 5.2MB/s eta 0:00:02\r\u001b[K 29% |█████████▍ | 4.2MB 5.2MB/s eta 0:00:02", + "\r\u001b[K 29% |█████████▍ | 4.2MB 5.4MB/s eta 0:00:02\r\u001b[K 29% |█████████▍ | 4.2MB 4.7MB/s eta 0:00:03\r\u001b[K 29% |█████████▌ | 4.2MB 5.2MB/s eta 0:00:02\r\u001b[K 29% |█████████▌ | 4.3MB 5.4MB/s eta 0:00:02\r\u001b[K 29% |█████████▌ | 4.3MB 3.9MB/s eta 0:00:03\r\u001b[K 29% |█████████▌ | 4.3MB 4.2MB/s eta 0:00:03\r\u001b[K 29% |█████████▌ | 4.3MB 4.3MB/s eta 0:00:03\r\u001b[K 29% |█████████▋ | 4.3MB 4.3MB/s eta 0:00:03\r\u001b[K 29% |█████████▋ | 4.3MB 4.0MB/s eta 0:00:03\r\u001b[K 30% |█████████▋ | 4.3MB 3.5MB/s eta 0:00:03\r\u001b[K 30% |█████████▋ | 4.3MB 4.0MB/s eta 0:00:03\r\u001b[K 30% |█████████▋ | 4.3MB 4.2MB/s eta 0:00:03\r\u001b[K 30% |█████████▊ | 4.4MB 3.7MB/s eta 0:00:03\r\u001b[K 30% |█████████▊ | 4.4MB 4.0MB/s eta 0:00:03\r\u001b[K 30% |█████████▊ | 4.4MB 4.7MB/s eta 0:00:03\r\u001b[K 30% |█████████▊ | 4.4MB 4.6MB/s eta 0:00:03\r\u001b[K 30% |█████████▊ | 4.4MB 4.3MB/s eta 0:00:03\r\u001b[K 30% |█████████▉ | 4.4MB 3.9MB/s eta 0:00:03\r\u001b[K 30% |█████████▉ | 4.4MB 4.4MB/s eta 0:00:03\r\u001b[K 30% |█████████▉ | 4.4MB 4.9MB/s eta 0:00:03\r\u001b[K 30% |█████████▉ | 4.4MB 4.8MB/s eta 0:00:03\r\u001b[K 30% |██████████ | 4.4MB 5.2MB/s eta 0:00:02\r\u001b[K 30% |██████████ | 4.5MB 6.0MB/s eta 0:00:02\r\u001b[K 31% |██████████ | 4.5MB 6.0MB/s eta 0:00:02", + "\r\u001b[K 31% |██████████ | 4.5MB 6.0MB/s eta 0:00:02\r\u001b[K 31% |██████████ | 4.5MB 5.6MB/s eta 0:00:02\r\u001b[K 31% |██████████ | 4.5MB 7.1MB/s eta 0:00:02\r\u001b[K 31% |██████████ | 4.5MB 7.5MB/s eta 0:00:02\r\u001b[K 31% |██████████ | 4.5MB 6.3MB/s eta 0:00:02\r\u001b[K 31% |██████████ | 4.5MB 6.8MB/s eta 0:00:02\r\u001b[K 31% |██████████ | 4.5MB 6.3MB/s eta 0:00:02\r\u001b[K 31% |██████████▏ | 4.5MB 6.3MB/s eta 0:00:02\r\u001b[K 31% |██████████▏ | 4.6MB 5.7MB/s eta 0:00:02\r\u001b[K 31% |██████████▏ | 4.6MB 4.8MB/s eta 0:00:03\r\u001b[K 31% |██████████▏ | 4.6MB 5.8MB/s eta 0:00:02\r\u001b[K 31% |██████████▏ | 4.6MB 5.6MB/s eta 0:00:02\r\u001b[K 31% |██████████▎ | 4.6MB 4.7MB/s eta 0:00:03\r\u001b[K 32% |██████████▎ | 4.6MB 5.0MB/s eta 0:00:02\r\u001b[K 32% |██████████▎ | 4.6MB 5.8MB/s eta 0:00:02\r\u001b[K 32% |██████████▎ | 4.6MB 5.8MB/s eta 0:00:02\r\u001b[K 32% |██████████▎ | 4.6MB 5.5MB/s eta 0:00:02\r\u001b[K 32% |██████████▍ | 4.6MB 4.5MB/s eta 0:00:03\r\u001b[K 32% |██████████▍ | 4.7MB 5.0MB/s eta 0:00:02\r\u001b[K 32% |██████████▍ | 4.7MB 5.4MB/s eta 0:00:02\r\u001b[K 32% |██████████▍ | 4.7MB 4.6MB/s eta 0:00:03\r\u001b[K 32% |██████████▍ | 4.7MB 5.3MB/s eta 0:00:02\r\u001b[K 32% |██████████▌ | 4.7MB 6.2MB/s eta 0:00:02\r\u001b[K 32% |██████████▌ | 4.7MB 6.3MB/s eta 0:00:02\r\u001b[K 32% |██████████▌ | 4.7MB 5.5MB/s eta 0:00:02", + "\r\u001b[K 32% |██████████▌ | 4.7MB 3.9MB/s eta 0:00:03\r\u001b[K 32% |██████████▌ | 4.7MB 4.3MB/s eta 0:00:03\r\u001b[K 33% |██████████▋ | 4.8MB 3.8MB/s eta 0:00:03\r\u001b[K 33% |██████████▋ | 4.8MB 2.8MB/s eta 0:00:04\r\u001b[K 33% |██████████▋ | 4.8MB 2.9MB/s eta 0:00:04\r\u001b[K 33% |██████████▋ | 4.8MB 3.0MB/s eta 0:00:04\r\u001b[K 33% |██████████▋ | 4.8MB 3.0MB/s eta 0:00:04\r\u001b[K 33% |██████████▊ | 4.8MB 2.7MB/s eta 0:00:04\r\u001b[K 33% |██████████▊ | 4.8MB 2.5MB/s eta 0:00:04\r\u001b[K 33% |██████████▊ | 4.8MB 2.7MB/s eta 0:00:04\r\u001b[K 33% |██████████▊ | 4.8MB 3.3MB/s eta 0:00:03\r\u001b[K 33% |██████████▉ | 4.8MB 3.1MB/s eta 0:00:04\r\u001b[K 33% |██████████▉ | 4.9MB 3.8MB/s eta 0:00:03\r\u001b[K 33% |██████████▉ | 4.9MB 5.0MB/s eta 0:00:02\r\u001b[K 33% |██████████▉ | 4.9MB 5.1MB/s eta 0:00:02\r\u001b[K 33% |██████████▉ | 4.9MB 5.2MB/s eta 0:00:02\r\u001b[K 34% |███████████ | 4.9MB 4.9MB/s eta 0:00:02\r\u001b[K 34% |███████████ | 4.9MB 6.3MB/s eta 0:00:02\r\u001b[K 34% |███████████ | 4.9MB 6.9MB/s eta 0:00:02\r\u001b[K 34% |███████████ | 4.9MB 6.3MB/s eta 0:00:02\r\u001b[K 34% |███████████ | 4.9MB 6.6MB/s eta 0:00:02\r\u001b[K 34% |███████████ | 4.9MB 7.4MB/s eta 0:00:02\r\u001b[K 34% |███████████ | 5.0MB 7.7MB/s eta 0:00:02\r\u001b[K 34% |███████████ | 5.0MB 9.2MB/s eta 0:00:02", + "\r\u001b[K 34% |███████████ | 5.0MB 8.2MB/s eta 0:00:02\r\u001b[K 34% |███████████ | 5.0MB 8.7MB/s eta 0:00:02\r\u001b[K 34% |███████████▏ | 5.0MB 6.5MB/s eta 0:00:02\r\u001b[K 34% |███████████▏ | 5.0MB 5.1MB/s eta 0:00:02\r\u001b[K 34% |███████████▏ | 5.0MB 5.2MB/s eta 0:00:02\r\u001b[K 34% |███████████▏ | 5.0MB 5.1MB/s eta 0:00:02\r\u001b[K 35% |███████████▏ | 5.0MB 5.1MB/s eta 0:00:02\r\u001b[K 35% |███████████▎ | 5.0MB 3.6MB/s eta 0:00:03\r\u001b[K 35% |███████████▎ | 5.1MB 2.6MB/s eta 0:00:04\r\u001b[K 35% |███████████▎ | 5.1MB 2.6MB/s eta 0:00:04\r\u001b[K 35% |███████████▎ | 5.1MB 2.5MB/s eta 0:00:04\r\u001b[K 35% |███████████▎ | 5.1MB 2.3MB/s eta 0:00:05\r\u001b[K 35% |███████████▍ | 5.1MB 2.5MB/s eta 0:00:04", + "\r\u001b[K 35% |███████████▍ | 5.1MB 2.5MB/s eta 0:00:04\r\u001b[K 35% |███████████▍ | 5.1MB 2.5MB/s eta 0:00:04\r\u001b[K 35% |███████████▍ | 5.1MB 2.1MB/s eta 0:00:05\r\u001b[K 35% |███████████▍ | 5.1MB 2.0MB/s eta 0:00:05\r\u001b[K 35% |███████████▌ | 5.2MB 2.4MB/s eta 0:00:04\r\u001b[K 35% |███████████▌ | 5.2MB 3.3MB/s eta 0:00:03\r\u001b[K 35% |███████████▌ | 5.2MB 3.0MB/s eta 0:00:04\r\u001b[K 36% |███████████▌ | 5.2MB 3.3MB/s eta 0:00:03\r\u001b[K 36% |███████████▌ | 5.2MB 3.6MB/s eta 0:00:03\r\u001b[K 36% |███████████▋ | 5.2MB 3.6MB/s eta 0:00:03\r\u001b[K 36% |███████████▋ | 5.2MB 4.0MB/s eta 0:00:03\r\u001b[K 36% |███████████▋ | 5.2MB 3.6MB/s eta 0:00:03\r\u001b[K 36% |███████████▋ | 5.2MB 4.9MB/s eta 0:00:02\r\u001b[K 36% |███████████▊ | 5.2MB 5.5MB/s eta 0:00:02\r\u001b[K 36% |███████████▊ | 5.3MB 4.9MB/s eta 0:00:02\r\u001b[K 36% |███████████▊ | 5.3MB 4.9MB/s eta 0:00:02\r\u001b[K 36% |███████████▊ | 5.3MB 5.4MB/s eta 0:00:02\r\u001b[K 36% |███████████▊ | 5.3MB 5.4MB/s eta 0:00:02\r\u001b[K 36% |███████████▉ | 5.3MB 5.3MB/s eta 0:00:02\r\u001b[K 36% |███████████▉ | 5.3MB 4.8MB/s eta 0:00:02\r\u001b[K 36% |███████████▉ | 5.3MB 3.7MB/s eta 0:00:03", + "\r\u001b[K 37% |███████████▉ | 5.3MB 3.8MB/s eta 0:00:03\r\u001b[K 37% |███████████▉ | 5.3MB 3.7MB/s eta 0:00:03\r\u001b[K 37% |████████████ | 5.3MB 3.8MB/s eta 0:00:03\r\u001b[K 37% |████████████ | 5.4MB 3.9MB/s eta 0:00:03\r\u001b[K 37% |████████████ | 5.4MB 4.0MB/s eta 0:00:03\r\u001b[K 37% |████████████ | 5.4MB 4.1MB/s eta 0:00:03\r\u001b[K 37% |████████████ | 5.4MB 3.9MB/s eta 0:00:03\r\u001b[K 37% |████████████ | 5.4MB 4.1MB/s eta 0:00:03\r\u001b[K 37% |████████████ | 5.4MB 4.5MB/s eta 0:00:02\r\u001b[K 37% |████████████ | 5.4MB 7.1MB/s eta 0:00:02\r\u001b[K 37% |████████████ | 5.4MB 8.7MB/s eta 0:00:02\r\u001b[K 37% |████████████ | 5.4MB 9.4MB/s eta 0:00:01\r\u001b[K 37% |████████████▏ | 5.4MB 9.9MB/s eta 0:00:01\r\u001b[K 37% |████████████▏ | 5.5MB 13.0MB/s eta 0:00:01\r\u001b[K 38% |████████████▏ | 5.5MB 11.6MB/s eta 0:00:01\r\u001b[K 38% |████████████▏ | 5.5MB 12.6MB/s eta 0:00:01\r\u001b[K 38% |████████████▏ | 5.5MB 11.8MB/s eta 0:00:01\r\u001b[K 38% |████████████▎ | 5.5MB 10.2MB/s eta 0:00:01\r\u001b[K 38% |████████████▎ | 5.5MB 10.5MB/s eta 0:00:01\r\u001b[K 38% |████████████▎ | 5.5MB 11.1MB/s eta 0:00:01\r\u001b[K 38% |████████████▎ | 5.5MB 11.0MB/s eta 0:00:01\r\u001b[K 38% |████████████▎ | 5.5MB 11.0MB/s eta 0:00:01\r\u001b[K 38% |████████████▍ | 5.6MB 8.2MB/s eta 0:00:02\r\u001b[K 38% |████████████▍ | 5.6MB 7.9MB/s eta 0:00:02\r\u001b[K 38% |████████████▍ | 5.6MB 8.4MB/s eta 0:00:02\r\u001b[K 38% |████████████▍ | 5.6MB 7.8MB/s eta 0:00:02\r\u001b[K 38% |████████████▍ | 5.6MB 9.0MB/s eta 0:00:01\r\u001b[K 38% |████████████▌ | 5.6MB 9.7MB/s eta 0:00:01\r\u001b[K 39% |████████████▌ | 5.6MB 10.1MB/s eta 0:00:01\r\u001b[K 39% |████████████▌ | 5.6MB 9.7MB/s eta 0:00:01\r\u001b[K 39% |████████████▌ | 5.6MB 9.0MB/s eta 0:00:01\r\u001b[K 39% |████████████▋ | 5.6MB 9.8MB/s eta 0:00:01\r\u001b[K 39% |████████████▋ | 5.7MB 12.5MB/s eta 0:00:01\r\u001b[K 39% |████████████▋ | 5.7MB 11.7MB/s eta 0:00:01\r\u001b[K 39% |████████████▋ | 5.7MB 11.4MB/s eta 0:00:01\r\u001b[K 39% |████████████▋ | 5.7MB 11.2MB/s eta 0:00:01\r\u001b[K 39% |████████████▊ | 5.7MB 10.8MB/s eta 0:00:01\r\u001b[K 39% |████████████▊ | 5.7MB 9.8MB/s eta 0:00:01\r\u001b[K 39% |████████████▊ | 5.7MB 9.1MB/s eta 0:00:01\r\u001b[K 39% |████████████▊ | 5.7MB 9.4MB/s eta 0:00:01\r\u001b[K 39% |████████████▊ | 5.7MB 9.3MB/s eta 0:00:01\r\u001b[K 39% |████████████▉ | 5.7MB 7.6MB/s eta 0:00:02\r\u001b[K 40% |████████████▉ | 5.8MB 7.3MB/s eta 0:00:02", + "\r\u001b[K 40% |████████████▉ | 5.8MB 5.0MB/s eta 0:00:02\r\u001b[K 40% |████████████▉ | 5.8MB 4.9MB/s eta 0:00:02\r\u001b[K 40% |████████████▉ | 5.8MB 4.2MB/s eta 0:00:03\r\u001b[K 40% |█████████████ | 5.8MB 4.0MB/s eta 0:00:03\r\u001b[K 40% |█████████████ | 5.8MB 4.3MB/s eta 0:00:03\r\u001b[K 40% |█████████████ | 5.8MB 4.0MB/s eta 0:00:03\r\u001b[K 40% |█████████████ | 5.8MB 3.5MB/s eta 0:00:03\r\u001b[K 40% |█████████████ | 5.8MB 3.6MB/s eta 0:00:03\r\u001b[K 40% |█████████████ | 5.8MB 3.7MB/s eta 0:00:03\r\u001b[K 40% |█████████████ | 5.9MB 3.7MB/s eta 0:00:03\r\u001b[K 40% |█████████████ | 5.9MB 4.7MB/s eta 0:00:02\r\u001b[K 40% |█████████████ | 5.9MB 4.7MB/s eta 0:00:02\r\u001b[K 40% |█████████████ | 5.9MB 5.6MB/s eta 0:00:02\r\u001b[K 41% |█████████████▏ | 5.9MB 4.7MB/s eta 0:00:02\r\u001b[K 41% |█████████████▏ | 5.9MB 3.8MB/s eta 0:00:03\r\u001b[K 41% |█████████████▏ | 5.9MB 4.0MB/s eta 0:00:03\r\u001b[K 41% |█████████████▏ | 5.9MB 4.4MB/s eta 0:00:02\r\u001b[K 41% |█████████████▏ | 5.9MB 4.4MB/s eta 0:00:02\r\u001b[K 41% |█████████████▎ | 5.9MB 4.6MB/s eta 0:00:02\r\u001b[K 41% |█████████████▎ | 6.0MB 4.5MB/s eta 0:00:02\r\u001b[K 41% |█████████████▎ | 6.0MB 4.8MB/s eta 0:00:02\r\u001b[K 41% |█████████████▎ | 6.0MB 4.7MB/s eta 0:00:02", + "\r\u001b[K 41% |█████████████▎ | 6.0MB 4.2MB/s eta 0:00:02\r\u001b[K 41% |█████████████▍ | 6.0MB 4.8MB/s eta 0:00:02\r\u001b[K 41% |█████████████▍ | 6.0MB 4.4MB/s eta 0:00:02\r\u001b[K 41% |█████████████▍ | 6.0MB 4.5MB/s eta 0:00:02\r\u001b[K 41% |█████████████▍ | 6.0MB 4.5MB/s eta 0:00:02\r\u001b[K 42% |█████████████▌ | 6.0MB 4.2MB/s eta 0:00:02\r\u001b[K 42% |█████████████▌ | 6.1MB 4.3MB/s eta 0:00:02\r\u001b[K 42% |█████████████▌ | 6.1MB 4.1MB/s eta 0:00:03\r\u001b[K 42% |█████████████▌ | 6.1MB 3.1MB/s eta 0:00:03\r\u001b[K 42% |█████████████▌ | 6.1MB 3.1MB/s eta 0:00:03\r\u001b[K 42% |█████████████▋ | 6.1MB 2.9MB/s eta 0:00:03\r\u001b[K 42% |█████████████▋ | 6.1MB 3.1MB/s eta 0:00:03\r\u001b[K 42% |█████████████▋ | 6.1MB 2.8MB/s eta 0:00:03", + "\r\u001b[K 42% |█████████████▋ | 6.1MB 1.8MB/s eta 0:00:05\r\u001b[K 42% |█████████████▋ | 6.1MB 1.8MB/s eta 0:00:05\r\u001b[K 42% |█████████████▊ | 6.1MB 1.7MB/s eta 0:00:05\r\u001b[K 42% |█████████████▊ | 6.2MB 1.6MB/s eta 0:00:06\r\u001b[K 42% |█████████████▊ | 6.2MB 1.6MB/s eta 0:00:06\r\u001b[K 42% |█████████████▊ | 6.2MB 1.6MB/s eta 0:00:06\r\u001b[K 43% |█████████████▊ | 6.2MB 1.6MB/s eta 0:00:06\r\u001b[K 43% |█████████████▉ | 6.2MB 1.7MB/s eta 0:00:05\r\u001b[K 43% |█████████████▉ | 6.2MB 1.7MB/s eta 0:00:05\r\u001b[K 43% |█████████████▉ | 6.2MB 2.1MB/s eta 0:00:04\r\u001b[K 43% |█████████████▉ | 6.2MB 3.6MB/s eta 0:00:03\r\u001b[K 43% |█████████████▉ | 6.2MB 3.0MB/s eta 0:00:03\r\u001b[K 43% |██████████████ | 6.2MB 3.5MB/s eta 0:00:03", + "\r\u001b[K 43% |██████████████ | 6.3MB 2.4MB/s eta 0:00:04\r\u001b[K 43% |██████████████ | 6.3MB 2.4MB/s eta 0:00:04\r\u001b[K 43% |██████████████ | 6.3MB 3.0MB/s eta 0:00:03\r\u001b[K 43% |██████████████ | 6.3MB 2.8MB/s eta 0:00:03\r\u001b[K 43% |██████████████ | 6.3MB 2.9MB/s eta 0:00:03\r\u001b[K 43% |██████████████ | 6.3MB 2.8MB/s eta 0:00:03\r\u001b[K 43% |██████████████ | 6.3MB 2.5MB/s eta 0:00:04\r\u001b[K 44% |██████████████ | 6.3MB 2.6MB/s eta 0:00:04\r\u001b[K 44% |██████████████ | 6.3MB 1.9MB/s eta 0:00:05\r\u001b[K 44% |██████████████▏ | 6.3MB 1.9MB/s eta 0:00:05", + "\r\u001b[K 44% |██████████████▏ | 6.4MB 2.2MB/s eta 0:00:04\r\u001b[K 44% |██████████████▏ | 6.4MB 2.1MB/s eta 0:00:04\r\u001b[K 44% |██████████████▏ | 6.4MB 2.2MB/s eta 0:00:04\r\u001b[K 44% |██████████████▏ | 6.4MB 2.0MB/s eta 0:00:04\r\u001b[K 44% |██████████████▎ | 6.4MB 1.9MB/s eta 0:00:05\r\u001b[K 44% |██████████████▎ | 6.4MB 1.9MB/s eta 0:00:05\r\u001b[K 44% |██████████████▎ | 6.4MB 2.0MB/s eta 0:00:05\r\u001b[K 44% |██████████████▎ | 6.4MB 1.9MB/s eta 0:00:05\r\u001b[K 44% |██████████████▍ | 6.4MB 2.7MB/s eta 0:00:03\r\u001b[K 44% |██████████████▍ | 6.5MB 2.2MB/s eta 0:00:04\r\u001b[K 44% |██████████████▍ | 6.5MB 3.0MB/s eta 0:00:03\r\u001b[K 45% |██████████████▍ | 6.5MB 2.9MB/s eta 0:00:03\r\u001b[K 45% |██████████████▍ | 6.5MB 2.6MB/s eta 0:00:03\r\u001b[K 45% |██████████████▌ | 6.5MB 3.1MB/s eta 0:00:03\r\u001b[K 45% |██████████████▌ | 6.5MB 3.3MB/s eta 0:00:03\r\u001b[K 45% |██████████████▌ | 6.5MB 3.3MB/s eta 0:00:03\r\u001b[K 45% |██████████████▌ | 6.5MB 3.6MB/s eta 0:00:03", + "\r\u001b[K 45% |██████████████▌ | 6.5MB 3.6MB/s eta 0:00:03\r\u001b[K 45% |██████████████▋ | 6.5MB 4.2MB/s eta 0:00:02\r\u001b[K 45% |██████████████▋ | 6.6MB 4.5MB/s eta 0:00:02\r\u001b[K 45% |██████████████▋ | 6.6MB 4.4MB/s eta 0:00:02\r\u001b[K 45% |██████████████▋ | 6.6MB 5.1MB/s eta 0:00:02\r\u001b[K 45% |██████████████▋ | 6.6MB 5.6MB/s eta 0:00:02\r\u001b[K 45% |██████████████▊ | 6.6MB 5.6MB/s eta 0:00:02\r\u001b[K 45% |██████████████▊ | 6.6MB 6.1MB/s eta 0:00:02\r\u001b[K 46% |██████████████▊ | 6.6MB 5.4MB/s eta 0:00:02\r\u001b[K 46% |██████████████▊ | 6.6MB 5.3MB/s eta 0:00:02\r\u001b[K 46% |██████████████▊ | 6.6MB 5.6MB/s eta 0:00:02\r\u001b[K 46% |██████████████▉ | 6.6MB 5.7MB/s eta 0:00:02\r\u001b[K 46% |██████████████▉ | 6.7MB 9.7MB/s eta 0:00:01\r\u001b[K 46% |██████████████▉ | 6.7MB 9.9MB/s eta 0:00:01\r\u001b[K 46% |██████████████▉ | 6.7MB 10.0MB/s eta 0:00:01\r\u001b[K 46% |██████████████▉ | 6.7MB 10.7MB/s eta 0:00:01\r\u001b[K 46% |███████████████ | 6.7MB 9.1MB/s eta 0:00:01\r\u001b[K 46% |███████████████ | 6.7MB 9.5MB/s eta 0:00:01\r\u001b[K 46% |███████████████ | 6.7MB 10.1MB/s eta 0:00:01\r\u001b[K 46% |███████████████ | 6.7MB 10.6MB/s eta 0:00:01\r\u001b[K 46% |███████████████ | 6.7MB 10.9MB/s eta 0:00:01\r\u001b[K 46% |███████████████ | 6.7MB 11.0MB/s eta 0:00:01\r\u001b[K 47% |███████████████ | 6.8MB 11.1MB/s eta 0:00:01\r\u001b[K 47% |███████████████ | 6.8MB 10.8MB/s eta 0:00:01\r\u001b[K 47% |███████████████ | 6.8MB 10.1MB/s eta 0:00:01\r\u001b[K 47% |███████████████ | 6.8MB 11.2MB/s eta 0:00:01\r\u001b[K 47% |███████████████▏ | 6.8MB 11.9MB/s eta 0:00:01\r\u001b[K 47% |███████████████▏ | 6.8MB 10.7MB/s eta 0:00:01\r\u001b[K 47% |███████████████▏ | 6.8MB 12.7MB/s eta 0:00:01\r\u001b[K 47% |███████████████▏ | 6.8MB 13.2MB/s eta 0:00:01\r\u001b[K 47% |███████████████▎ | 6.8MB 13.0MB/s eta 0:00:01\r\u001b[K 47% |███████████████▎ | 6.9MB 9.4MB/s eta 0:00:01\r\u001b[K 47% |███████████████▎ | 6.9MB 8.6MB/s eta 0:00:01\r\u001b[K 47% |███████████████▎ | 6.9MB 9.1MB/s eta 0:00:01\r\u001b[K 47% |███████████████▎ | 6.9MB 9.3MB/s eta 0:00:01\r\u001b[K 47% |███████████████▍ | 6.9MB 8.4MB/s eta 0:00:01\r\u001b[K 48% |███████████████▍ | 6.9MB 9.4MB/s eta 0:00:01\r\u001b[K 48% |███████████████▍ | 6.9MB 9.9MB/s eta 0:00:01\r\u001b[K 48% |███████████████▍ | 6.9MB 9.9MB/s eta 0:00:01\r\u001b[K 48% |███████████████▍ | 6.9MB 9.9MB/s eta 0:00:01\r\u001b[K 48% |███████████████▌ | 6.9MB 8.9MB/s eta 0:00:01\r\u001b[K 48% |███████████████▌ | 7.0MB 12.6MB/s eta 0:00:01\r\u001b[K 48% |███████████████▌ | 7.0MB 13.8MB/s eta 0:00:01\r\u001b[K 48% |███████████████▌ | 7.0MB 12.1MB/s eta 0:00:01\r\u001b[K 48% |███████████████▌ | 7.0MB 12.6MB/s eta 0:00:01\r\u001b[K 48% |███████████████▋ | 7.0MB 13.7MB/s eta 0:00:01\r\u001b[K 48% |███████████████▋ | 7.0MB 13.7MB/s eta 0:00:01", + "\r\u001b[K 48% |███████████████▋ | 7.0MB 13.3MB/s eta 0:00:01\r\u001b[K 48% |███████████████▋ | 7.0MB 11.6MB/s eta 0:00:01\r\u001b[K 48% |███████████████▋ | 7.0MB 12.2MB/s eta 0:00:01\r\u001b[K 49% |███████████████▊ | 7.0MB 12.5MB/s eta 0:00:01\r\u001b[K 49% |███████████████▊ | 7.1MB 11.5MB/s eta 0:00:01\r\u001b[K 49% |███████████████▊ | 7.1MB 11.8MB/s eta 0:00:01\r\u001b[K 49% |███████████████▊ | 7.1MB 12.2MB/s eta 0:00:01\r\u001b[K 49% |███████████████▊ | 7.1MB 12.1MB/s eta 0:00:01\r\u001b[K 49% |███████████████▉ | 7.1MB 11.2MB/s eta 0:00:01\r\u001b[K 49% |███████████████▉ | 7.1MB 10.3MB/s eta 0:00:01\r\u001b[K 49% |███████████████▉ | 7.1MB 11.1MB/s eta 0:00:01\r\u001b[K 49% |███████████████▉ | 7.1MB 11.8MB/s eta 0:00:01\r\u001b[K 49% |███████████████▉ | 7.1MB 10.8MB/s eta 0:00:01\r\u001b[K 49% |████████████████ | 7.1MB 12.0MB/s eta 0:00:01\r\u001b[K 49% |████████████████ | 7.2MB 12.8MB/s eta 0:00:01\r\u001b[K 49% |████████████████ | 7.2MB 13.0MB/s eta 0:00:01\r\u001b[K 49% |████████████████ | 7.2MB 13.1MB/s eta 0:00:01\r\u001b[K 49% |████████████████ | 7.2MB 12.1MB/s eta 0:00:01\r\u001b[K 50% |████████████████ | 7.2MB 14.2MB/s eta 0:00:01\r\u001b[K 50% |████████████████ | 7.2MB 14.7MB/s eta 0:00:01\r\u001b[K 50% |████████████████ | 7.2MB 13.0MB/s eta 0:00:01\r\u001b[K 50% |████████████████ | 7.2MB 13.8MB/s eta 0:00:01\r\u001b[K 50% |████████████████▏ | 7.2MB 14.6MB/s eta 0:00:01\r\u001b[K 50% |████████████████▏ | 7.2MB 14.4MB/s eta 0:00:01\r\u001b[K 50% |████████████████▏ | 7.3MB 12.6MB/s eta 0:00:01\r\u001b[K 50% |████████████████▏ | 7.3MB 11.4MB/s eta 0:00:01\r\u001b[K 50% |████████████████▏ | 7.3MB 12.3MB/s eta 0:00:01\r\u001b[K 50% |████████████████▎ | 7.3MB 11.2MB/s eta 0:00:01\r\u001b[K 50% |████████████████▎ | 7.3MB 9.0MB/s eta 0:00:01\r\u001b[K 50% |████████████████▎ | 7.3MB 9.5MB/s eta 0:00:01\r\u001b[K 50% |████████████████▎ | 7.3MB 8.5MB/s eta 0:00:01\r\u001b[K 50% |████████████████▎ | 7.3MB 8.5MB/s eta 0:00:01\r\u001b[K 51% |████████████████▍ | 7.3MB 6.9MB/s eta 0:00:02\r\u001b[K 51% |████████████████▍ | 7.4MB 5.8MB/s eta 0:00:02\r\u001b[K 51% |████████████████▍ | 7.4MB 6.3MB/s eta 0:00:02\r\u001b[K 51% |████████████████▍ | 7.4MB 5.7MB/s eta 0:00:02\r\u001b[K 51% |████████████████▍ | 7.4MB 5.2MB/s eta 0:00:02\r\u001b[K 51% |████████████████▌ | 7.4MB 5.7MB/s eta 0:00:02\r\u001b[K 51% |████████████████▌ | 7.4MB 5.5MB/s eta 0:00:02\r\u001b[K 51% |████████████████▌ | 7.4MB 5.4MB/s eta 0:00:02\r\u001b[K 51% |████████████████▌ | 7.4MB 5.8MB/s eta 0:00:02\r\u001b[K 51% |████████████████▌ | 7.4MB 5.0MB/s eta 0:00:02\r\u001b[K 51% |████████████████▋ | 7.4MB 6.0MB/s eta 0:00:02", + "\r\u001b[K 51% |████████████████▋ | 7.5MB 6.0MB/s eta 0:00:02\r\u001b[K 51% |████████████████▋ | 7.5MB 5.1MB/s eta 0:00:02\r\u001b[K 51% |████████████████▋ | 7.5MB 5.8MB/s eta 0:00:02\r\u001b[K 52% |████████████████▋ | 7.5MB 5.8MB/s eta 0:00:02\r\u001b[K 52% |████████████████▊ | 7.5MB 5.7MB/s eta 0:00:02\r\u001b[K 52% |████████████████▊ | 7.5MB 6.1MB/s eta 0:00:02\r\u001b[K 52% |████████████████▊ | 7.5MB 5.1MB/s eta 0:00:02\r\u001b[K 52% |████████████████▊ | 7.5MB 5.1MB/s eta 0:00:02\r\u001b[K 52% |████████████████▊ | 7.5MB 5.3MB/s eta 0:00:02\r\u001b[K 52% |████████████████▉ | 7.5MB 4.8MB/s eta 0:00:02\r\u001b[K 52% |████████████████▉ | 7.6MB 5.6MB/s eta 0:00:02\r\u001b[K 52% |████████████████▉ | 7.6MB 5.6MB/s eta 0:00:02\r\u001b[K 52% |████████████████▉ | 7.6MB 5.5MB/s eta 0:00:02\r\u001b[K 52% |████████████████▉ | 7.6MB 4.7MB/s eta 0:00:02\r\u001b[K 52% |█████████████████ | 7.6MB 3.9MB/s eta 0:00:02\r\u001b[K 52% |█████████████████ | 7.6MB 4.2MB/s eta 0:00:02\r\u001b[K 52% |█████████████████ | 7.6MB 4.2MB/s eta 0:00:02\r\u001b[K 53% |█████████████████ | 7.6MB 4.2MB/s eta 0:00:02\r\u001b[K 53% |█████████████████ | 7.6MB 4.5MB/s eta 0:00:02\r\u001b[K 53% |█████████████████ | 7.6MB 4.6MB/s eta 0:00:02\r\u001b[K 53% |█████████████████ | 7.7MB 4.5MB/s eta 0:00:02\r\u001b[K 53% |█████████████████ | 7.7MB 4.0MB/s eta 0:00:02", + "\r\u001b[K 53% |█████████████████ | 7.7MB 3.1MB/s eta 0:00:03\r\u001b[K 53% |█████████████████▏ | 7.7MB 3.3MB/s eta 0:00:03\r\u001b[K 53% |█████████████████▏ | 7.7MB 3.1MB/s eta 0:00:03\r\u001b[K 53% |█████████████████▏ | 7.7MB 2.8MB/s eta 0:00:03\r\u001b[K 53% |█████████████████▏ | 7.7MB 3.0MB/s eta 0:00:03\r\u001b[K 53% |█████████████████▏ | 7.7MB 3.0MB/s eta 0:00:03\r\u001b[K 53% |█████████████████▎ | 7.7MB 2.9MB/s eta 0:00:03\r\u001b[K 53% |█████████████████▎ | 7.8MB 2.9MB/s eta 0:00:03\r\u001b[K 53% |█████████████████▎ | 7.8MB 2.8MB/s eta 0:00:03\r\u001b[K 54% |█████████████████▎ | 7.8MB 3.3MB/s eta 0:00:02\r\u001b[K 54% |█████████████████▎ | 7.8MB 4.3MB/s eta 0:00:02\r\u001b[K 54% |█████████████████▍ | 7.8MB 4.4MB/s eta 0:00:02\r\u001b[K 54% |█████████████████▍ | 7.8MB 6.1MB/s eta 0:00:02\r\u001b[K 54% |█████████████████▍ | 7.8MB 6.5MB/s eta 0:00:02\r\u001b[K 54% |█████████████████▍ | 7.8MB 6.5MB/s eta 0:00:02\r\u001b[K 54% |█████████████████▍ | 7.8MB 6.3MB/s eta 0:00:02\r\u001b[K 54% |█████████████████▌ | 7.8MB 4.8MB/s eta 0:00:02\r\u001b[K 54% |█████████████████▌ | 7.9MB 5.1MB/s eta 0:00:02\r\u001b[K 54% |█████████████████▌ | 7.9MB 4.6MB/s eta 0:00:02", + "\r\u001b[K 54% |█████████████████▌ | 7.9MB 2.9MB/s eta 0:00:03\r\u001b[K 54% |█████████████████▌ | 7.9MB 3.0MB/s eta 0:00:03\r\u001b[K 54% |█████████████████▋ | 7.9MB 2.8MB/s eta 0:00:03\r\u001b[K 54% |█████████████████▋ | 7.9MB 2.8MB/s eta 0:00:03\r\u001b[K 55% |█████████████████▋ | 7.9MB 2.9MB/s eta 0:00:03\r\u001b[K 55% |█████████████████▋ | 7.9MB 2.7MB/s eta 0:00:03\r\u001b[K 55% |█████████████████▋ | 7.9MB 3.0MB/s eta 0:00:03\r\u001b[K 55% |█████████████████▊ | 7.9MB 2.6MB/s eta 0:00:03\r\u001b[K 55% |█████████████████▊ | 8.0MB 2.4MB/s eta 0:00:03\r\u001b[K 55% |█████████████████▊ | 8.0MB 2.7MB/s eta 0:00:03\r\u001b[K 55% |█████████████████▊ | 8.0MB 4.0MB/s eta 0:00:02\r\u001b[K 55% |█████████████████▊ | 8.0MB 4.0MB/s eta 0:00:02\r\u001b[K 55% |█████████████████▉ | 8.0MB 3.7MB/s eta 0:00:02\r\u001b[K 55% |█████████████████▉ | 8.0MB 3.4MB/s eta 0:00:02\r\u001b[K 55% |█████████████████▉ | 8.0MB 3.5MB/s eta 0:00:02\r\u001b[K 55% |█████████████████▉ | 8.0MB 3.5MB/s eta 0:00:02\r\u001b[K 55% |██████████████████ | 8.0MB 3.4MB/s eta 0:00:02\r\u001b[K 55% |██████████████████ | 8.0MB 4.8MB/s eta 0:00:02", + "\r\u001b[K 56% |██████████████████ | 8.1MB 4.7MB/s eta 0:00:02\r\u001b[K 56% |██████████████████ | 8.1MB 4.7MB/s eta 0:00:02\r\u001b[K 56% |██████████████████ | 8.1MB 3.6MB/s eta 0:00:02\r\u001b[K 56% |██████████████████ | 8.1MB 3.1MB/s eta 0:00:03\r\u001b[K 56% |██████████████████ | 8.1MB 3.6MB/s eta 0:00:02\r\u001b[K 56% |██████████████████ | 8.1MB 3.1MB/s eta 0:00:02\r\u001b[K 56% |██████████████████ | 8.1MB 2.8MB/s eta 0:00:03\r\u001b[K 56% |██████████████████ | 8.1MB 2.9MB/s eta 0:00:03\r\u001b[K 56% |██████████████████▏ | 8.1MB 2.9MB/s eta 0:00:03\r\u001b[K 56% |██████████████████▏ | 8.2MB 3.0MB/s eta 0:00:03\r\u001b[K 56% |██████████████████▏ | 8.2MB 3.2MB/s eta 0:00:02\r\u001b[K 56% |██████████████████▏ | 8.2MB 2.9MB/s eta 0:00:03\r\u001b[K 56% |██████████████████▏ | 8.2MB 3.7MB/s eta 0:00:02\r\u001b[K 56% |██████████████████▎ | 8.2MB 4.2MB/s eta 0:00:02\r\u001b[K 57% |██████████████████▎ | 8.2MB 4.1MB/s eta 0:00:02\r\u001b[K 57% |██████████████████▎ | 8.2MB 5.7MB/s eta 0:00:02\r\u001b[K 57% |██████████████████▎ | 8.2MB 7.4MB/s eta 0:00:01\r\u001b[K 57% |██████████████████▎ | 8.2MB 7.6MB/s eta 0:00:01\r\u001b[K 57% |██████████████████▍ | 8.2MB 7.9MB/s eta 0:00:01\r\u001b[K 57% |██████████████████▍ | 8.3MB 7.4MB/s eta 0:00:01\r\u001b[K 57% |██████████████████▍ | 8.3MB 8.4MB/s eta 0:00:01\r\u001b[K 57% |██████████████████▍ | 8.3MB 10.0MB/s eta 0:00:01", + "\r\u001b[K 57% |██████████████████▍ | 8.3MB 6.5MB/s eta 0:00:01\r\u001b[K 57% |██████████████████▌ | 8.3MB 6.1MB/s eta 0:00:02\r\u001b[K 57% |██████████████████▌ | 8.3MB 5.0MB/s eta 0:00:02\r\u001b[K 57% |██████████████████▌ | 8.3MB 4.9MB/s eta 0:00:02\r\u001b[K 57% |██████████████████▌ | 8.3MB 4.6MB/s eta 0:00:02\r\u001b[K 57% |██████████████████▌ | 8.3MB 4.3MB/s eta 0:00:02\r\u001b[K 58% |██████████████████▋ | 8.3MB 4.5MB/s eta 0:00:02\r\u001b[K 58% |██████████████████▋ | 8.4MB 4.1MB/s eta 0:00:02\r\u001b[K 58% |██████████████████▋ | 8.4MB 3.9MB/s eta 0:00:02\r\u001b[K 58% |██████████████████▋ | 8.4MB 4.0MB/s eta 0:00:02\r\u001b[K 58% |██████████████████▋ | 8.4MB 5.0MB/s eta 0:00:02\r\u001b[K 58% |██████████████████▊ | 8.4MB 2.8MB/s eta 0:00:03\r\u001b[K 58% |██████████████████▊ | 8.4MB 3.3MB/s eta 0:00:02\r\u001b[K 58% |██████████████████▊ | 8.4MB 3.3MB/s eta 0:00:02\r\u001b[K 58% |██████████████████▊ | 8.4MB 3.5MB/s eta 0:00:02\r\u001b[K 58% |██████████████████▉ | 8.4MB 3.7MB/s eta 0:00:02\r\u001b[K 58% |██████████████████▉ | 8.4MB 3.7MB/s eta 0:00:02\r\u001b[K 58% |██████████████████▉ | 8.5MB 4.2MB/s eta 0:00:02\r\u001b[K 58% |██████████████████▉ | 8.5MB 4.4MB/s eta 0:00:02\r\u001b[K 58% |██████████████████▉ | 8.5MB 3.9MB/s eta 0:00:02\r\u001b[K 59% |███████████████████ | 8.5MB 3.9MB/s eta 0:00:02\r\u001b[K 59% |███████████████████ | 8.5MB 12.1MB/s eta 0:00:01\r\u001b[K 59% |███████████████████ | 8.5MB 11.2MB/s eta 0:00:01\r\u001b[K 59% |███████████████████ | 8.5MB 9.9MB/s eta 0:00:01\r\u001b[K 59% |███████████████████ | 8.5MB 9.7MB/s eta 0:00:01\r\u001b[K 59% |███████████████████ | 8.5MB 9.8MB/s eta 0:00:01\r\u001b[K 59% |███████████████████ | 8.6MB 9.9MB/s eta 0:00:01\r\u001b[K 59% |███████████████████ | 8.6MB 10.0MB/s eta 0:00:01\r\u001b[K 59% |███████████████████ | 8.6MB 10.2MB/s eta 0:00:01\r\u001b[K 59% |███████████████████ | 8.6MB 15.2MB/s eta 0:00:01\r\u001b[K 59% |███████████████████▏ | 8.6MB 16.7MB/s eta 0:00:01", + "\r\u001b[K 59% |███████████████████▏ | 8.6MB 18.9MB/s eta 0:00:01\r\u001b[K 59% |███████████████████▏ | 8.6MB 16.3MB/s eta 0:00:01\r\u001b[K 59% |███████████████████▏ | 8.6MB 19.1MB/s eta 0:00:01\r\u001b[K 60% |███████████████████▏ | 8.6MB 13.0MB/s eta 0:00:01\r\u001b[K 60% |███████████████████▎ | 8.6MB 12.4MB/s eta 0:00:01\r\u001b[K 60% |███████████████████▎ | 8.7MB 9.9MB/s eta 0:00:01\r\u001b[K 60% |███████████████████▎ | 8.7MB 7.2MB/s eta 0:00:01\r\u001b[K 60% |███████████████████▎ | 8.7MB 7.1MB/s eta 0:00:01\r\u001b[K 60% |███████████████████▎ | 8.7MB 6.3MB/s eta 0:00:01\r\u001b[K 60% |███████████████████▍ | 8.7MB 5.6MB/s eta 0:00:02\r\u001b[K 60% |███████████████████▍ | 8.7MB 5.5MB/s eta 0:00:02\r\u001b[K 60% |███████████████████▍ | 8.7MB 5.2MB/s eta 0:00:02\r\u001b[K 60% |███████████████████▍ | 8.7MB 5.3MB/s eta 0:00:02\r\u001b[K 60% |███████████████████▍ | 8.7MB 5.8MB/s eta 0:00:01\r\u001b[K 60% |███████████████████▌ | 8.7MB 5.4MB/s eta 0:00:02\r\u001b[K 60% |███████████████████▌ | 8.8MB 5.9MB/s eta 0:00:01\r\u001b[K 60% |███████████████████▌ | 8.8MB 6.9MB/s eta 0:00:01\r\u001b[K 61% |███████████████████▌ | 8.8MB 5.8MB/s eta 0:00:01\r\u001b[K 61% |███████████████████▌ | 8.8MB 6.4MB/s eta 0:00:01\r\u001b[K 61% |███████████████████▋ | 8.8MB 6.6MB/s eta 0:00:01\r\u001b[K 61% |███████████████████▋ | 8.8MB 6.7MB/s eta 0:00:01\r\u001b[K 61% |███████████████████▋ | 8.8MB 7.6MB/s eta 0:00:01\r\u001b[K 61% |███████████████████▋ | 8.8MB 6.4MB/s eta 0:00:01\r\u001b[K 61% |███████████████████▊ | 8.8MB 6.7MB/s eta 0:00:01\r\u001b[K 61% |███████████████████▊ | 8.8MB 6.3MB/s eta 0:00:01\r\u001b[K 61% |███████████████████▊ | 8.9MB 4.5MB/s eta 0:00:02\r\u001b[K 61% |███████████████████▊ | 8.9MB 4.5MB/s eta 0:00:02", + "\r\u001b[K 61% |███████████████████▊ | 8.9MB 4.3MB/s eta 0:00:02\r\u001b[K 61% |███████████████████▉ | 8.9MB 4.2MB/s eta 0:00:02\r\u001b[K 61% |███████████████████▉ | 8.9MB 4.2MB/s eta 0:00:02\r\u001b[K 61% |███████████████████▉ | 8.9MB 4.1MB/s eta 0:00:02\r\u001b[K 62% |███████████████████▉ | 8.9MB 4.3MB/s eta 0:00:02\r\u001b[K 62% |███████████████████▉ | 8.9MB 4.6MB/s eta 0:00:02\r\u001b[K 62% |████████████████████ | 8.9MB 4.1MB/s eta 0:00:02\r\u001b[K 62% |████████████████████ | 8.9MB 4.5MB/s eta 0:00:02\r\u001b[K 62% |████████████████████ | 9.0MB 5.9MB/s eta 0:00:01\r\u001b[K 62% |████████████████████ | 9.0MB 6.2MB/s eta 0:00:01\r\u001b[K 62% |████████████████████ | 9.0MB 8.1MB/s eta 0:00:01\r\u001b[K 62% |████████████████████ | 9.0MB 7.9MB/s eta 0:00:01\r\u001b[K 62% |████████████████████ | 9.0MB 8.6MB/s eta 0:00:01\r\u001b[K 62% |████████████████████ | 9.0MB 8.7MB/s eta 0:00:01\r\u001b[K 62% |████████████████████ | 9.0MB 8.1MB/s eta 0:00:01\r\u001b[K 62% |████████████████████ | 9.0MB 8.9MB/s eta 0:00:01\r\u001b[K 62% |████████████████████▏ | 9.0MB 10.7MB/s eta 0:00:01\r\u001b[K 62% |████████████████████▏ | 9.1MB 11.2MB/s eta 0:00:01\r\u001b[K 63% |████████████████████▏ | 9.1MB 12.8MB/s eta 0:00:01\r\u001b[K 63% |████████████████████▏ | 9.1MB 11.3MB/s eta 0:00:01\r\u001b[K 63% |████████████████████▏ | 9.1MB 12.0MB/s eta 0:00:01\r\u001b[K 63% |████████████████████▎ | 9.1MB 11.3MB/s eta 0:00:01\r\u001b[K 63% |████████████████████▎ | 9.1MB 9.8MB/s eta 0:00:01\r\u001b[K 63% |████████████████████▎ | 9.1MB 10.6MB/s eta 0:00:01\r\u001b[K 63% |████████████████████▎ | 9.1MB 11.0MB/s eta 0:00:01\r\u001b[K 63% |████████████████████▎ | 9.1MB 11.1MB/s eta 0:00:01\r\u001b[K 63% |████████████████████▍ | 9.1MB 11.0MB/s eta 0:00:01\r\u001b[K 63% |████████████████████▍ | 9.2MB 9.8MB/s eta 0:00:01\r\u001b[K 63% |████████████████████▍ | 9.2MB 10.4MB/s eta 0:00:01\r\u001b[K 63% |████████████████████▍ | 9.2MB 11.0MB/s eta 0:00:01\r\u001b[K 63% |████████████████████▍ | 9.2MB 10.3MB/s eta 0:00:01\r\u001b[K 63% |████████████████████▌ | 9.2MB 12.3MB/s eta 0:00:01\r\u001b[K 64% |████████████████████▌ | 9.2MB 13.8MB/s eta 0:00:01\r\u001b[K 64% |████████████████████▌ | 9.2MB 14.1MB/s eta 0:00:01\r\u001b[K 64% |████████████████████▌ | 9.2MB 14.4MB/s eta 0:00:01\r\u001b[K 64% |████████████████████▋ | 9.2MB 10.2MB/s eta 0:00:01\r\u001b[K 64% |████████████████████▋ | 9.2MB 10.3MB/s eta 0:00:01\r\u001b[K 64% |████████████████████▋ | 9.3MB 10.7MB/s eta 0:00:01\r\u001b[K 64% |████████████████████▋ | 9.3MB 9.3MB/s eta 0:00:01\r\u001b[K 64% |████████████████████▋ | 9.3MB 9.7MB/s eta 0:00:01\r\u001b[K 64% |████████████████████▊ | 9.3MB 9.5MB/s eta 0:00:01\r\u001b[K 64% |████████████████████▊ | 9.3MB 9.5MB/s eta 0:00:01\r\u001b[K 64% |████████████████████▊ | 9.3MB 9.6MB/s eta 0:00:01\r\u001b[K 64% |████████████████████▊ | 9.3MB 9.1MB/s eta 0:00:01\r\u001b[K 64% |████████████████████▊ | 9.3MB 9.5MB/s eta 0:00:01\r\u001b[K 64% |████████████████████▉ | 9.3MB 12.2MB/s eta 0:00:01\r\u001b[K 65% |████████████████████▉ | 9.3MB 11.5MB/s eta 0:00:01\r\u001b[K 65% |████████████████████▉ | 9.4MB 12.4MB/s eta 0:00:01\r\u001b[K 65% |████████████████████▉ | 9.4MB 14.0MB/s eta 0:00:01\r\u001b[K 65% |████████████████████▉ | 9.4MB 14.4MB/s eta 0:00:01\r\u001b[K 65% |█████████████████████ | 9.4MB 14.3MB/s eta 0:00:01\r\u001b[K 65% |█████████████████████ | 9.4MB 12.4MB/s eta 0:00:01", + "\r\u001b[K 65% |█████████████████████ | 9.4MB 12.5MB/s eta 0:00:01\r\u001b[K 65% |█████████████████████ | 9.4MB 5.1MB/s eta 0:00:01\r\u001b[K 65% |█████████████████████ | 9.4MB 4.5MB/s eta 0:00:02\r\u001b[K 65% |█████████████████████ | 9.4MB 4.5MB/s eta 0:00:02\r\u001b[K 65% |█████████████████████ | 9.5MB 4.2MB/s eta 0:00:02\r\u001b[K 65% |█████████████████████ | 9.5MB 4.2MB/s eta 0:00:02\r\u001b[K 65% |█████████████████████ | 9.5MB 3.6MB/s eta 0:00:02\r\u001b[K 65% |█████████████████████ | 9.5MB 3.1MB/s eta 0:00:02\r\u001b[K 66% |█████████████████████▏ | 9.5MB 3.1MB/s eta 0:00:02\r\u001b[K 66% |█████████████████████▏ | 9.5MB 2.6MB/s eta 0:00:02\r\u001b[K 66% |█████████████████████▏ | 9.5MB 2.4MB/s eta 0:00:02\r\u001b[K 66% |█████████████████████▏ | 9.5MB 3.5MB/s eta 0:00:02\r\u001b[K 66% |█████████████████████▏ | 9.5MB 3.8MB/s eta 0:00:02\r\u001b[K 66% |█████████████████████▎ | 9.5MB 3.8MB/s eta 0:00:02\r\u001b[K 66% |█████████████████████▎ | 9.6MB 4.0MB/s eta 0:00:02\r\u001b[K 66% |█████████████████████▎ | 9.6MB 3.7MB/s eta 0:00:02\r\u001b[K 66% |█████████████████████▎ | 9.6MB 4.4MB/s eta 0:00:02", + "\r\u001b[K 66% |█████████████████████▎ | 9.6MB 4.1MB/s eta 0:00:02\r\u001b[K 66% |█████████████████████▍ | 9.6MB 3.3MB/s eta 0:00:02\r\u001b[K 66% |█████████████████████▍ | 9.6MB 4.3MB/s eta 0:00:02\r\u001b[K 66% |█████████████████████▍ | 9.6MB 4.6MB/s eta 0:00:02\r\u001b[K 66% |█████████████████████▍ | 9.6MB 4.6MB/s eta 0:00:02\r\u001b[K 67% |█████████████████████▌ | 9.6MB 3.9MB/s eta 0:00:02\r\u001b[K 67% |█████████████████████▌ | 9.6MB 3.6MB/s eta 0:00:02\r\u001b[K 67% |█████████████████████▌ | 9.7MB 3.7MB/s eta 0:00:02\r\u001b[K 67% |█████████████████████▌ | 9.7MB 3.7MB/s eta 0:00:02\r\u001b[K 67% |█████████████████████▌ | 9.7MB 3.4MB/s eta 0:00:02\r\u001b[K 67% |█████████████████████▋ | 9.7MB 4.3MB/s eta 0:00:02\r\u001b[K 67% |█████████████████████▋ | 9.7MB 4.8MB/s eta 0:00:01\r\u001b[K 67% |█████████████████████▋ | 9.7MB 4.8MB/s eta 0:00:01\r\u001b[K 67% |█████████████████████▋ | 9.7MB 4.5MB/s eta 0:00:02\r\u001b[K 67% |█████████████████████▋ | 9.7MB 4.1MB/s eta 0:00:02\r\u001b[K 67% |█████████████████████▊ | 9.7MB 4.9MB/s eta 0:00:01\r\u001b[K 67% |█████████████████████▊ | 9.7MB 4.8MB/s eta 0:00:01\r\u001b[K 67% |█████████████████████▊ | 9.8MB 4.5MB/s eta 0:00:02\r\u001b[K 67% |█████████████████████▊ | 9.8MB 4.8MB/s eta 0:00:01\r\u001b[K 68% |█████████████████████▊ | 9.8MB 4.4MB/s eta 0:00:02\r\u001b[K 68% |█████████████████████▉ | 9.8MB 4.5MB/s eta 0:00:02\r\u001b[K 68% |█████████████████████▉ | 9.8MB 5.2MB/s eta 0:00:01\r\u001b[K 68% |█████████████████████▉ | 9.8MB 4.9MB/s eta 0:00:01\r\u001b[K 68% |█████████████████████▉ | 9.8MB 5.8MB/s eta 0:00:01\r\u001b[K 68% |█████████████████████▉ | 9.8MB 6.7MB/s eta 0:00:01", + "\r\u001b[K 68% |██████████████████████ | 9.8MB 5.7MB/s eta 0:00:01\r\u001b[K 68% |██████████████████████ | 9.9MB 6.0MB/s eta 0:00:01\r\u001b[K 68% |██████████████████████ | 9.9MB 5.1MB/s eta 0:00:01\r\u001b[K 68% |██████████████████████ | 9.9MB 5.3MB/s eta 0:00:01\r\u001b[K 68% |██████████████████████ | 9.9MB 5.0MB/s eta 0:00:01\r\u001b[K 68% |██████████████████████ | 9.9MB 3.5MB/s eta 0:00:02\r\u001b[K 68% |██████████████████████ | 9.9MB 3.6MB/s eta 0:00:02\r\u001b[K 68% |██████████████████████ | 9.9MB 3.0MB/s eta 0:00:02\r\u001b[K 69% |██████████████████████ | 9.9MB 2.4MB/s eta 0:00:02\r\u001b[K 69% |██████████████████████ | 9.9MB 2.4MB/s eta 0:00:02\r\u001b[K 69% |██████████████████████▏ | 9.9MB 2.2MB/s eta 0:00:03\r\u001b[K 69% |██████████████████████▏ | 10.0MB 2.3MB/s eta 0:00:02\r\u001b[K 69% |██████████████████████▏ | 10.0MB 2.3MB/s eta 0:00:02", + "\r\u001b[K 69% |██████████████████████▏ | 10.0MB 1.7MB/s eta 0:00:03\r\u001b[K 69% |██████████████████████▏ | 10.0MB 1.9MB/s eta 0:00:03\r\u001b[K 69% |██████████████████████▎ | 10.0MB 2.2MB/s eta 0:00:02\r\u001b[K 69% |██████████████████████▎ | 10.0MB 1.9MB/s eta 0:00:03\r\u001b[K 69% |██████████████████████▎ | 10.0MB 2.1MB/s eta 0:00:03\r\u001b[K 69% |██████████████████████▎ | 10.0MB 2.1MB/s eta 0:00:03\r\u001b[K 69% |██████████████████████▍ | 10.0MB 2.1MB/s eta 0:00:03\r\u001b[K 69% |██████████████████████▍ | 10.0MB 2.1MB/s eta 0:00:03\r\u001b[K 69% |██████████████████████▍ | 10.1MB 1.7MB/s eta 0:00:03\r\u001b[K 70% |██████████████████████▍ | 10.1MB 1.8MB/s eta 0:00:03", + "\r\u001b[K 70% |██████████████████████▍ | 10.1MB 2.2MB/s eta 0:00:02\r\u001b[K 70% |██████████████████████▌ | 10.1MB 1.9MB/s eta 0:00:03\r\u001b[K 70% |██████████████████████▌ | 10.1MB 1.8MB/s eta 0:00:03\r\u001b[K 70% |██████████████████████▌ | 10.1MB 2.0MB/s eta 0:00:03\r\u001b[K 70% |██████████████████████▌ | 10.1MB 2.0MB/s eta 0:00:03\r\u001b[K 70% |██████████████████████▌ | 10.1MB 2.2MB/s eta 0:00:02\r\u001b[K 70% |██████████████████████▋ | 10.1MB 1.9MB/s eta 0:00:03\r\u001b[K 70% |██████████████████████▋ | 10.1MB 2.2MB/s eta 0:00:02\r\u001b[K 70% |██████████████████████▋ | 10.2MB 2.4MB/s eta 0:00:02", + "\r\u001b[K 70% |██████████████████████▋ | 10.2MB 1.7MB/s eta 0:00:03\r\u001b[K 70% |██████████████████████▋ | 10.2MB 1.8MB/s eta 0:00:03\r\u001b[K 70% |██████████████████████▊ | 10.2MB 1.9MB/s eta 0:00:03\r\u001b[K 70% |██████████████████████▊ | 10.2MB 2.0MB/s eta 0:00:03\r\u001b[K 71% |██████████████████████▊ | 10.2MB 2.0MB/s eta 0:00:03\r\u001b[K 71% |██████████████████████▊ | 10.2MB 1.9MB/s eta 0:00:03\r\u001b[K 71% |██████████████████████▊ | 10.2MB 2.1MB/s eta 0:00:03\r\u001b[K 71% |██████████████████████▉ | 10.2MB 2.3MB/s eta 0:00:02\r\u001b[K 71% |██████████████████████▉ | 10.3MB 2.1MB/s eta 0:00:02\r\u001b[K 71% |██████████████████████▉ | 10.3MB 2.5MB/s eta 0:00:02\r\u001b[K 71% |██████████████████████▉ | 10.3MB 3.8MB/s eta 0:00:02\r\u001b[K 71% |██████████████████████▉ | 10.3MB 3.8MB/s eta 0:00:02\r\u001b[K 71% |███████████████████████ | 10.3MB 3.4MB/s eta 0:00:02\r\u001b[K 71% |███████████████████████ | 10.3MB 3.2MB/s eta 0:00:02\r\u001b[K 71% |███████████████████████ | 10.3MB 3.5MB/s eta 0:00:02\r\u001b[K 71% |███████████████████████ | 10.3MB 3.4MB/s eta 0:00:02", + "\r\u001b[K 71% |███████████████████████ | 10.3MB 2.6MB/s eta 0:00:02\r\u001b[K 71% |███████████████████████ | 10.3MB 2.9MB/s eta 0:00:02\r\u001b[K 72% |███████████████████████ | 10.4MB 3.1MB/s eta 0:00:02\r\u001b[K 72% |███████████████████████ | 10.4MB 3.1MB/s eta 0:00:02\r\u001b[K 72% |███████████████████████ | 10.4MB 3.1MB/s eta 0:00:02\r\u001b[K 72% |███████████████████████ | 10.4MB 3.0MB/s eta 0:00:02\r\u001b[K 72% |███████████████████████▏ | 10.4MB 4.2MB/s eta 0:00:01\r\u001b[K 72% |███████████████████████▏ | 10.4MB 4.5MB/s eta 0:00:01\r\u001b[K 72% |███████████████████████▏ | 10.4MB 4.3MB/s eta 0:00:01\r\u001b[K 72% |███████████████████████▏ | 10.4MB 5.5MB/s eta 0:00:01\r\u001b[K 72% |███████████████████████▎ | 10.4MB 7.5MB/s eta 0:00:01\r\u001b[K 72% |███████████████████████▎ | 10.4MB 6.9MB/s eta 0:00:01\r\u001b[K 72% |███████████████████████▎ | 10.5MB 6.6MB/s eta 0:00:01\r\u001b[K 72% |███████████████████████▎ | 10.5MB 6.1MB/s eta 0:00:01\r\u001b[K 72% |███████████████████████▎ | 10.5MB 7.1MB/s eta 0:00:01\r\u001b[K 72% |███████████████████████▍ | 10.5MB 7.1MB/s eta 0:00:01\r\u001b[K 73% |███████████████████████▍ | 10.5MB 6.3MB/s eta 0:00:01\r\u001b[K 73% |███████████████████████▍ | 10.5MB 6.4MB/s eta 0:00:01\r\u001b[K 73% |███████████████████████▍ | 10.5MB 6.5MB/s eta 0:00:01\r\u001b[K 73% |███████████████████████▍ | 10.5MB 6.4MB/s eta 0:00:01\r\u001b[K 73% |███████████████████████▌ | 10.5MB 7.6MB/s eta 0:00:01\r\u001b[K 73% |███████████████████████▌ | 10.5MB 8.0MB/s eta 0:00:01\r\u001b[K 73% |███████████████████████▌ | 10.6MB 9.1MB/s eta 0:00:01\r\u001b[K 73% |███████████████████████▌ | 10.6MB 10.0MB/s eta 0:00:01\r\u001b[K 73% |███████████████████████▌ | 10.6MB 9.4MB/s eta 0:00:01\r\u001b[K 73% |███████████████████████▋ | 10.6MB 10.9MB/s eta 0:00:01\r\u001b[K 73% |███████████████████████▋ | 10.6MB 10.4MB/s eta 0:00:01\r\u001b[K 73% |███████████████████████▋ | 10.6MB 11.1MB/s eta 0:00:01\r\u001b[K 73% |███████████████████████▋ | 10.6MB 9.3MB/s eta 0:00:01\r\u001b[K 73% |███████████████████████▋ | 10.6MB 7.5MB/s eta 0:00:01\r\u001b[K 73% |███████████████████████▊ | 10.6MB 7.9MB/s eta 0:00:01\r\u001b[K 74% |███████████████████████▊ | 10.6MB 6.9MB/s eta 0:00:01", + "\r\u001b[K 74% |███████████████████████▊ | 10.7MB 5.6MB/s eta 0:00:01\r\u001b[K 74% |███████████████████████▊ | 10.7MB 5.7MB/s eta 0:00:01\r\u001b[K 74% |███████████████████████▊ | 10.7MB 5.2MB/s eta 0:00:01\r\u001b[K 74% |███████████████████████▉ | 10.7MB 5.1MB/s eta 0:00:01\r\u001b[K 74% |███████████████████████▉ | 10.7MB 5.0MB/s eta 0:00:01\r\u001b[K 74% |███████████████████████▉ | 10.7MB 4.5MB/s eta 0:00:01\r\u001b[K 74% |███████████████████████▉ | 10.7MB 4.9MB/s eta 0:00:01\r\u001b[K 74% |███████████████████████▉ | 10.7MB 4.9MB/s eta 0:00:01\r\u001b[K 74% |████████████████████████ | 10.7MB 4.6MB/s eta 0:00:01\r\u001b[K 74% |████████████████████████ | 10.8MB 5.2MB/s eta 0:00:01\r\u001b[K 74% |████████████████████████ | 10.8MB 5.5MB/s eta 0:00:01\r\u001b[K 74% |████████████████████████ | 10.8MB 5.5MB/s eta 0:00:01\r\u001b[K 74% |████████████████████████ | 10.8MB 5.1MB/s eta 0:00:01\r\u001b[K 75% |████████████████████████ | 10.8MB 4.7MB/s eta 0:00:01\r\u001b[K 75% |████████████████████████ | 10.8MB 5.4MB/s eta 0:00:01\r\u001b[K 75% |████████████████████████ | 10.8MB 5.4MB/s eta 0:00:01\r\u001b[K 75% |████████████████████████ | 10.8MB 4.6MB/s eta 0:00:01\r\u001b[K 75% |████████████████████████▏ | 10.8MB 5.4MB/s eta 0:00:01\r\u001b[K 75% |████████████████████████▏ | 10.8MB 4.8MB/s eta 0:00:01\r\u001b[K 75% |████████████████████████▏ | 10.9MB 4.6MB/s eta 0:00:01\r\u001b[K 75% |████████████████████████▏ | 10.9MB 5.1MB/s eta 0:00:01\r\u001b[K 75% |████████████████████████▏ | 10.9MB 4.7MB/s eta 0:00:01\r\u001b[K 75% |████████████████████████▎ | 10.9MB 5.8MB/s eta 0:00:01\r\u001b[K 75% |████████████████████████▎ | 10.9MB 5.8MB/s eta 0:00:01\r\u001b[K 75% |████████████████████████▎ | 10.9MB 5.0MB/s eta 0:00:01\r\u001b[K 75% |████████████████████████▎ | 10.9MB 5.6MB/s eta 0:00:01", + "\r\u001b[K 75% |████████████████████████▎ | 10.9MB 5.9MB/s eta 0:00:01\r\u001b[K 76% |████████████████████████▍ | 10.9MB 5.9MB/s eta 0:00:01\r\u001b[K 76% |████████████████████████▍ | 10.9MB 6.4MB/s eta 0:00:01\r\u001b[K 76% |████████████████████████▍ | 11.0MB 5.6MB/s eta 0:00:01\r\u001b[K 76% |████████████████████████▍ | 11.0MB 5.7MB/s eta 0:00:01\r\u001b[K 76% |████████████████████████▍ | 11.0MB 5.8MB/s eta 0:00:01\r\u001b[K 76% |████████████████████████▌ | 11.0MB 5.1MB/s eta 0:00:01\r\u001b[K 76% |████████████████████████▌ | 11.0MB 5.7MB/s eta 0:00:01\r\u001b[K 76% |████████████████████████▌ | 11.0MB 5.9MB/s eta 0:00:01\r\u001b[K 76% |████████████████████████▌ | 11.0MB 6.0MB/s eta 0:00:01\r\u001b[K 76% |████████████████████████▌ | 11.0MB 6.1MB/s eta 0:00:01\r\u001b[K 76% |████████████████████████▋ | 11.0MB 5.3MB/s eta 0:00:01\r\u001b[K 76% |████████████████████████▋ | 11.0MB 6.2MB/s eta 0:00:01\r\u001b[K 76% |████████████████████████▋ | 11.1MB 6.4MB/s eta 0:00:01\r\u001b[K 76% |████████████████████████▋ | 11.1MB 5.6MB/s eta 0:00:01\r\u001b[K 77% |████████████████████████▋ | 11.1MB 6.0MB/s eta 0:00:01\r\u001b[K 77% |████████████████████████▊ | 11.1MB 6.1MB/s eta 0:00:01\r\u001b[K 77% |████████████████████████▊ | 11.1MB 6.1MB/s eta 0:00:01\r\u001b[K 77% |████████████████████████▊ | 11.1MB 6.1MB/s eta 0:00:01\r\u001b[K 77% |████████████████████████▊ | 11.1MB 5.1MB/s eta 0:00:01\r\u001b[K 77% |████████████████████████▊ | 11.1MB 5.7MB/s eta 0:00:01\r\u001b[K 77% |████████████████████████▉ | 11.1MB 5.8MB/s eta 0:00:01\r\u001b[K 77% |████████████████████████▉ | 11.2MB 5.1MB/s eta 0:00:01\r\u001b[K 77% |████████████████████████▉ | 11.2MB 5.6MB/s eta 0:00:01\r\u001b[K 77% |████████████████████████▉ | 11.2MB 6.1MB/s eta 0:00:01\r\u001b[K 77% |████████████████████████▉ | 11.2MB 6.2MB/s eta 0:00:01\r\u001b[K 77% |█████████████████████████ | 11.2MB 5.9MB/s eta 0:00:01\r\u001b[K 77% |█████████████████████████ | 11.2MB 5.0MB/s eta 0:00:01\r\u001b[K 77% |█████████████████████████ | 11.2MB 5.6MB/s eta 0:00:01", + "\r\u001b[K 78% |█████████████████████████ | 11.2MB 5.5MB/s eta 0:00:01\r\u001b[K 78% |█████████████████████████ | 11.2MB 4.8MB/s eta 0:00:01\r\u001b[K 78% |█████████████████████████ | 11.2MB 5.3MB/s eta 0:00:01\r\u001b[K 78% |█████████████████████████ | 11.3MB 5.3MB/s eta 0:00:01\r\u001b[K 78% |█████████████████████████ | 11.3MB 5.4MB/s eta 0:00:01\r\u001b[K 78% |█████████████████████████ | 11.3MB 5.4MB/s eta 0:00:01\r\u001b[K 78% |█████████████████████████▏ | 11.3MB 4.5MB/s eta 0:00:01\r\u001b[K 78% |█████████████████████████▏ | 11.3MB 5.2MB/s eta 0:00:01\r\u001b[K 78% |█████████████████████████▏ | 11.3MB 5.2MB/s eta 0:00:01\r\u001b[K 78% |█████████████████████████▏ | 11.3MB 4.7MB/s eta 0:00:01\r\u001b[K 78% |█████████████████████████▏ | 11.3MB 5.5MB/s eta 0:00:01\r\u001b[K 78% |█████████████████████████▎ | 11.3MB 4.9MB/s eta 0:00:01\r\u001b[K 78% |█████████████████████████▎ | 11.3MB 4.9MB/s eta 0:00:01\r\u001b[K 78% |█████████████████████████▎ | 11.4MB 5.0MB/s eta 0:00:01\r\u001b[K 79% |█████████████████████████▎ | 11.4MB 4.4MB/s eta 0:00:01\r\u001b[K 79% |█████████████████████████▎ | 11.4MB 4.7MB/s eta 0:00:01\r\u001b[K 79% |█████████████████████████▍ | 11.4MB 5.1MB/s eta 0:00:01\r\u001b[K 79% |█████████████████████████▍ | 11.4MB 4.2MB/s eta 0:00:01\r\u001b[K 79% |█████████████████████████▍ | 11.4MB 4.7MB/s eta 0:00:01\r\u001b[K 79% |█████████████████████████▍ | 11.4MB 5.0MB/s eta 0:00:01\r\u001b[K 79% |█████████████████████████▍ | 11.4MB 5.0MB/s eta 0:00:01\r\u001b[K 79% |█████████████████████████▌ | 11.4MB 5.9MB/s eta 0:00:01\r\u001b[K 79% |█████████████████████████▌ | 11.4MB 5.1MB/s eta 0:00:01\r\u001b[K 79% |█████████████████████████▌ | 11.5MB 5.7MB/s eta 0:00:01\r\u001b[K 79% |█████████████████████████▌ | 11.5MB 6.0MB/s eta 0:00:01", + "\r\u001b[K 79% |█████████████████████████▌ | 11.5MB 5.3MB/s eta 0:00:01\r\u001b[K 79% |█████████████████████████▋ | 11.5MB 5.9MB/s eta 0:00:01\r\u001b[K 79% |█████████████████████████▋ | 11.5MB 6.3MB/s eta 0:00:01\r\u001b[K 80% |█████████████████████████▋ | 11.5MB 6.6MB/s eta 0:00:01\r\u001b[K 80% |█████████████████████████▋ | 11.5MB 6.0MB/s eta 0:00:01\r\u001b[K 80% |█████████████████████████▋ | 11.5MB 4.9MB/s eta 0:00:01\r\u001b[K 80% |█████████████████████████▊ | 11.5MB 5.2MB/s eta 0:00:01\r\u001b[K 80% |█████████████████████████▊ | 11.6MB 5.9MB/s eta 0:00:01\r\u001b[K 80% |█████████████████████████▊ | 11.6MB 5.4MB/s eta 0:00:01\r\u001b[K 80% |█████████████████████████▊ | 11.6MB 5.9MB/s eta 0:00:01\r\u001b[K 80% |█████████████████████████▊ | 11.6MB 5.9MB/s eta 0:00:01\r\u001b[K 80% |█████████████████████████▉ | 11.6MB 6.0MB/s eta 0:00:01\r\u001b[K 80% |█████████████████████████▉ | 11.6MB 6.1MB/s eta 0:00:01\r\u001b[K 80% |█████████████████████████▉ | 11.6MB 5.1MB/s eta 0:00:01\r\u001b[K 80% |█████████████████████████▉ | 11.6MB 5.9MB/s eta 0:00:01\r\u001b[K 80% |██████████████████████████ | 11.6MB 6.5MB/s eta 0:00:01\r\u001b[K 80% |██████████████████████████ | 11.6MB 5.8MB/s eta 0:00:01\r\u001b[K 81% |██████████████████████████ | 11.7MB 6.0MB/s eta 0:00:01\r\u001b[K 81% |██████████████████████████ | 11.7MB 5.8MB/s eta 0:00:01\r\u001b[K 81% |██████████████████████████ | 11.7MB 5.8MB/s eta 0:00:01\r\u001b[K 81% |██████████████████████████ | 11.7MB 5.9MB/s eta 0:00:01\r\u001b[K 81% |██████████████████████████ | 11.7MB 4.9MB/s eta 0:00:01\r\u001b[K 81% |██████████████████████████ | 11.7MB 5.4MB/s eta 0:00:01\r\u001b[K 81% |██████████████████████████ | 11.7MB 6.0MB/s eta 0:00:01\r\u001b[K 81% |██████████████████████████ | 11.7MB 5.0MB/s eta 0:00:01\r\u001b[K 81% |██████████████████████████▏ | 11.7MB 5.4MB/s eta 0:00:01\r\u001b[K 81% |██████████████████████████▏ | 11.7MB 6.0MB/s eta 0:00:01\r\u001b[K 81% |██████████████████████████▏ | 11.8MB 6.0MB/s eta 0:00:01\r\u001b[K 81% |██████████████████████████▏ | 11.8MB 5.7MB/s eta 0:00:01", + "\r\u001b[K 81% |██████████████████████████▏ | 11.8MB 4.9MB/s eta 0:00:01\r\u001b[K 81% |██████████████████████████▎ | 11.8MB 5.3MB/s eta 0:00:01\r\u001b[K 82% |██████████████████████████▎ | 11.8MB 5.8MB/s eta 0:00:01\r\u001b[K 82% |██████████████████████████▎ | 11.8MB 5.1MB/s eta 0:00:01\r\u001b[K 82% |██████████████████████████▎ | 11.8MB 5.4MB/s eta 0:00:01\r\u001b[K 82% |██████████████████████████▎ | 11.8MB 5.9MB/s eta 0:00:01\r\u001b[K 82% |██████████████████████████▍ | 11.8MB 6.0MB/s eta 0:00:01\r\u001b[K 82% |██████████████████████████▍ | 11.8MB 5.3MB/s eta 0:00:01\r\u001b[K 82% |██████████████████████████▍ | 11.9MB 4.6MB/s eta 0:00:01\r\u001b[K 82% |██████████████████████████▍ | 11.9MB 5.3MB/s eta 0:00:01\r\u001b[K 82% |██████████████████████████▍ | 11.9MB 5.5MB/s eta 0:00:01\r\u001b[K 82% |██████████████████████████▌ | 11.9MB 4.8MB/s eta 0:00:01\r\u001b[K 82% |██████████████████████████▌ | 11.9MB 5.2MB/s eta 0:00:01\r\u001b[K 82% |██████████████████████████▌ | 11.9MB 5.2MB/s eta 0:00:01\r\u001b[K 82% |██████████████████████████▌ | 11.9MB 5.2MB/s eta 0:00:01\r\u001b[K 82% |██████████████████████████▌ | 11.9MB 5.3MB/s eta 0:00:01\r\u001b[K 83% |██████████████████████████▋ | 11.9MB 4.8MB/s eta 0:00:01\r\u001b[K 83% |██████████████████████████▋ | 12.0MB 5.6MB/s eta 0:00:01\r\u001b[K 83% |██████████████████████████▋ | 12.0MB 5.4MB/s eta 0:00:01\r\u001b[K 83% |██████████████████████████▋ | 12.0MB 4.7MB/s eta 0:00:01\r\u001b[K 83% |██████████████████████████▋ | 12.0MB 5.1MB/s eta 0:00:01\r\u001b[K 83% |██████████████████████████▊ | 12.0MB 5.7MB/s eta 0:00:01\r\u001b[K 83% |██████████████████████████▊ | 12.0MB 5.6MB/s eta 0:00:01\r\u001b[K 83% |██████████████████████████▊ | 12.0MB 6.0MB/s eta 0:00:01\r\u001b[K 83% |██████████████████████████▊ | 12.0MB 5.4MB/s eta 0:00:01\r\u001b[K 83% |██████████████████████████▉ | 12.0MB 5.9MB/s eta 0:00:01\r\u001b[K 83% |██████████████████████████▉ | 12.0MB 6.1MB/s eta 0:00:01", + "\r\u001b[K 83% |██████████████████████████▉ | 12.1MB 2.9MB/s eta 0:00:01\r\u001b[K 83% |██████████████████████████▉ | 12.1MB 3.1MB/s eta 0:00:01\r\u001b[K 83% |██████████████████████████▉ | 12.1MB 2.9MB/s eta 0:00:01\r\u001b[K 84% |███████████████████████████ | 12.1MB 2.8MB/s eta 0:00:01\r\u001b[K 84% |███████████████████████████ | 12.1MB 2.7MB/s eta 0:00:01\r\u001b[K 84% |███████████████████████████ | 12.1MB 2.6MB/s eta 0:00:01\r\u001b[K 84% |███████████████████████████ | 12.1MB 2.5MB/s eta 0:00:01\r\u001b[K 84% |███████████████████████████ | 12.1MB 2.5MB/s eta 0:00:01\r\u001b[K 84% |███████████████████████████ | 12.1MB 2.3MB/s eta 0:00:01\r\u001b[K 84% |███████████████████████████ | 12.1MB 2.4MB/s eta 0:00:01\r\u001b[K 84% |███████████████████████████ | 12.2MB 3.8MB/s eta 0:00:01\r\u001b[K 84% |███████████████████████████ | 12.2MB 3.9MB/s eta 0:00:01\r\u001b[K 84% |███████████████████████████ | 12.2MB 4.4MB/s eta 0:00:01\r\u001b[K 84% |███████████████████████████▏ | 12.2MB 4.5MB/s eta 0:00:01\r\u001b[K 84% |███████████████████████████▏ | 12.2MB 4.7MB/s eta 0:00:01", + "\r\u001b[K 84% |███████████████████████████▏ | 12.2MB 3.1MB/s eta 0:00:01\r\u001b[K 84% |███████████████████████████▏ | 12.2MB 2.6MB/s eta 0:00:01\r\u001b[K 85% |███████████████████████████▏ | 12.2MB 2.7MB/s eta 0:00:01\r\u001b[K 85% |███████████████████████████▎ | 12.2MB 2.8MB/s eta 0:00:01\r\u001b[K 85% |███████████████████████████▎ | 12.2MB 2.8MB/s eta 0:00:01\r\u001b[K 85% |███████████████████████████▎ | 12.3MB 2.7MB/s eta 0:00:01\r\u001b[K 85% |███████████████████████████▎ | 12.3MB 2.6MB/s eta 0:00:01\r\u001b[K 85% |███████████████████████████▎ | 12.3MB 2.8MB/s eta 0:00:01\r\u001b[K 85% |███████████████████████████▍ | 12.3MB 2.4MB/s eta 0:00:01\r\u001b[K 85% |███████████████████████████▍ | 12.3MB 2.2MB/s eta 0:00:01\r\u001b[K 85% |███████████████████████████▍ | 12.3MB 3.0MB/s eta 0:00:01\r\u001b[K 85% |███████████████████████████▍ | 12.3MB 3.7MB/s eta 0:00:01\r\u001b[K 85% |███████████████████████████▍ | 12.3MB 3.7MB/s eta 0:00:01\r\u001b[K 85% |███████████████████████████▌ | 12.3MB 3.8MB/s eta 0:00:01\r\u001b[K 85% |███████████████████████████▌ | 12.3MB 3.8MB/s eta 0:00:01\r\u001b[K 85% |███████████████████████████▌ | 12.4MB 4.3MB/s eta 0:00:01\r\u001b[K 86% |███████████████████████████▌ | 12.4MB 4.6MB/s eta 0:00:01\r\u001b[K 86% |███████████████████████████▌ | 12.4MB 4.3MB/s eta 0:00:01\r\u001b[K 86% |███████████████████████████▋ | 12.4MB 6.2MB/s eta 0:00:01\r\u001b[K 86% |███████████████████████████▋ | 12.4MB 8.3MB/s eta 0:00:01\r\u001b[K 86% |███████████████████████████▋ | 12.4MB 9.0MB/s eta 0:00:01\r\u001b[K 86% |███████████████████████████▋ | 12.4MB 11.0MB/s eta 0:00:01", + "\r\u001b[K 86% |███████████████████████████▊ | 12.4MB 10.1MB/s eta 0:00:01\r\u001b[K 86% |███████████████████████████▊ | 12.4MB 11.5MB/s eta 0:00:01\r\u001b[K 86% |███████████████████████████▊ | 12.5MB 9.5MB/s eta 0:00:01\r\u001b[K 86% |███████████████████████████▊ | 12.5MB 8.5MB/s eta 0:00:01\r\u001b[K 86% |███████████████████████████▊ | 12.5MB 8.8MB/s eta 0:00:01\r\u001b[K 86% |███████████████████████████▉ | 12.5MB 8.0MB/s eta 0:00:01\r\u001b[K 86% |███████████████████████████▉ | 12.5MB 8.5MB/s eta 0:00:01\r\u001b[K 86% |███████████████████████████▉ | 12.5MB 8.2MB/s eta 0:00:01\r\u001b[K 87% |███████████████████████████▉ | 12.5MB 7.7MB/s eta 0:00:01\r\u001b[K 87% |███████████████████████████▉ | 12.5MB 8.2MB/s eta 0:00:01\r\u001b[K 87% |████████████████████████████ | 12.5MB 7.9MB/s eta 0:00:01\r\u001b[K 87% |████████████████████████████ | 12.5MB 6.6MB/s eta 0:00:01\r\u001b[K 87% |████████████████████████████ | 12.6MB 7.9MB/s eta 0:00:01\r\u001b[K 87% |████████████████████████████ | 12.6MB 8.6MB/s eta 0:00:01\r\u001b[K 87% |████████████████████████████ | 12.6MB 8.1MB/s eta 0:00:01\r\u001b[K 87% |████████████████████████████ | 12.6MB 8.4MB/s eta 0:00:01\r\u001b[K 87% |████████████████████████████ | 12.6MB 7.2MB/s eta 0:00:01\r\u001b[K 87% |████████████████████████████ | 12.6MB 7.6MB/s eta 0:00:01\r\u001b[K 87% |████████████████████████████ | 12.6MB 7.7MB/s eta 0:00:01\r\u001b[K 87% |████████████████████████████ | 12.6MB 7.2MB/s eta 0:00:01\r\u001b[K 87% |████████████████████████████▏ | 12.6MB 8.0MB/s eta 0:00:01\r\u001b[K 87% |████████████████████████████▏ | 12.6MB 9.5MB/s eta 0:00:01\r\u001b[K 88% |████████████████████████████▏ | 12.7MB 9.6MB/s eta 0:00:01\r\u001b[K 88% |████████████████████████████▏ | 12.7MB 8.9MB/s eta 0:00:01\r\u001b[K 88% |████████████████████████████▏ | 12.7MB 8.6MB/s eta 0:00:01\r\u001b[K 88% |████████████████████████████▎ | 12.7MB 10.1MB/s eta 0:00:01\r\u001b[K 88% |████████████████████████████▎ | 12.7MB 11.2MB/s eta 0:00:01\r\u001b[K 88% |████████████████████████████▎ | 12.7MB 10.2MB/s eta 0:00:01\r\u001b[K 88% |████████████████████████████▎ | 12.7MB 10.2MB/s eta 0:00:01\r\u001b[K 88% |████████████████████████████▎ | 12.7MB 10.2MB/s eta 0:00:01\r\u001b[K 88% |████████████████████████████▍ | 12.7MB 10.2MB/s eta 0:00:01\r\u001b[K 88% |████████████████████████████▍ | 12.7MB 10.2MB/s eta 0:00:01\r\u001b[K 88% |████████████████████████████▍ | 12.8MB 9.5MB/s eta 0:00:01\r\u001b[K 88% |████████████████████████████▍ | 12.8MB 10.7MB/s eta 0:00:01\r\u001b[K 88% |████████████████████████████▍ | 12.8MB 11.0MB/s eta 0:00:01\r\u001b[K 88% |████████████████████████████▌ | 12.8MB 10.6MB/s eta 0:00:01\r\u001b[K 89% |████████████████████████████▌ | 12.8MB 11.8MB/s eta 0:00:01\r\u001b[K 89% |████████████████████████████▌ | 12.8MB 12.6MB/s eta 0:00:01\r\u001b[K 89% |████████████████████████████▌ | 12.8MB 13.8MB/s eta 0:00:01\r\u001b[K 89% |████████████████████████████▋ | 12.8MB 15.1MB/s eta 0:00:01\r\u001b[K 89% |████████████████████████████▋ | 12.8MB 13.6MB/s eta 0:00:01\r\u001b[K 89% |████████████████████████████▋ | 12.9MB 14.6MB/s eta 0:00:01\r\u001b[K 89% |████████████████████████████▋ | 12.9MB 15.6MB/s eta 0:00:01\r\u001b[K 89% |████████████████████████████▋ | 12.9MB 14.0MB/s eta 0:00:01\r\u001b[K 89% |████████████████████████████▊ | 12.9MB 15.9MB/s eta 0:00:01\r\u001b[K 89% |████████████████████████████▊ | 12.9MB 16.2MB/s eta 0:00:01\r\u001b[K 89% |████████████████████████████▊ | 12.9MB 15.8MB/s eta 0:00:01\r\u001b[K 89% |████████████████████████████▊ | 12.9MB 14.5MB/s eta 0:00:01", + "\r\u001b[K 89% |████████████████████████████▊ | 12.9MB 9.5MB/s eta 0:00:01\r\u001b[K 89% |████████████████████████████▉ | 12.9MB 9.7MB/s eta 0:00:01\r\u001b[K 90% |████████████████████████████▉ | 12.9MB 10.0MB/s eta 0:00:01\r\u001b[K 90% |████████████████████████████▉ | 13.0MB 8.3MB/s eta 0:00:01\r\u001b[K 90% |████████████████████████████▉ | 13.0MB 8.4MB/s eta 0:00:01\r\u001b[K 90% |████████████████████████████▉ | 13.0MB 6.9MB/s eta 0:00:01\r\u001b[K 90% |█████████████████████████████ | 13.0MB 6.6MB/s eta 0:00:01\r\u001b[K 90% |█████████████████████████████ | 13.0MB 6.0MB/s eta 0:00:01\r\u001b[K 90% |█████████████████████████████ | 13.0MB 5.1MB/s eta 0:00:01\r\u001b[K 90% |█████████████████████████████ | 13.0MB 5.2MB/s eta 0:00:01\r\u001b[K 90% |█████████████████████████████ | 13.0MB 5.7MB/s eta 0:00:01\r\u001b[K 90% |█████████████████████████████ | 13.0MB 4.9MB/s eta 0:00:01\r\u001b[K 90% |█████████████████████████████ | 13.0MB 5.0MB/s eta 0:00:01\r\u001b[K 90% |█████████████████████████████ | 13.1MB 5.2MB/s eta 0:00:01\r\u001b[K 90% |█████████████████████████████ | 13.1MB 5.2MB/s eta 0:00:01\r\u001b[K 90% |█████████████████████████████ | 13.1MB 5.6MB/s eta 0:00:01\r\u001b[K 91% |█████████████████████████████▏ | 13.1MB 5.0MB/s eta 0:00:01\r\u001b[K 91% |█████████████████████████████▏ | 13.1MB 5.5MB/s eta 0:00:01\r\u001b[K 91% |█████████████████████████████▏ | 13.1MB 6.0MB/s eta 0:00:01\r\u001b[K 91% |█████████████████████████████▏ | 13.1MB 5.3MB/s eta 0:00:01\r\u001b[K 91% |█████████████████████████████▏ | 13.1MB 6.0MB/s eta 0:00:01\r\u001b[K 91% |█████████████████████████████▎ | 13.1MB 6.3MB/s eta 0:00:01\r\u001b[K 91% |█████████████████████████████▎ | 13.1MB 6.3MB/s eta 0:00:01\r\u001b[K 91% |█████████████████████████████▎ | 13.2MB 5.7MB/s eta 0:00:01\r\u001b[K 91% |█████████████████████████████▎ | 13.2MB 5.0MB/s eta 0:00:01\r\u001b[K 91% |█████████████████████████████▎ | 13.2MB 5.4MB/s eta 0:00:01\r\u001b[K 91% |█████████████████████████████▍ | 13.2MB 5.6MB/s eta 0:00:01", + "\r\u001b[K 91% |█████████████████████████████▍ | 13.2MB 4.3MB/s eta 0:00:01\r\u001b[K 91% |█████████████████████████████▍ | 13.2MB 4.2MB/s eta 0:00:01\r\u001b[K 91% |█████████████████████████████▍ | 13.2MB 3.7MB/s eta 0:00:01\r\u001b[K 92% |█████████████████████████████▌ | 13.2MB 3.6MB/s eta 0:00:01\r\u001b[K 92% |█████████████████████████████▌ | 13.2MB 3.4MB/s eta 0:00:01\r\u001b[K 92% |█████████████████████████████▌ | 13.3MB 3.4MB/s eta 0:00:01\r\u001b[K 92% |█████████████████████████████▌ | 13.3MB 3.8MB/s eta 0:00:01\r\u001b[K 92% |█████████████████████████████▌ | 13.3MB 3.8MB/s eta 0:00:01\r\u001b[K 92% |█████████████████████████████▋ | 13.3MB 3.4MB/s eta 0:00:01\r\u001b[K 92% |█████████████████████████████▋ | 13.3MB 3.4MB/s eta 0:00:01\r\u001b[K 92% |█████████████████████████████▋ | 13.3MB 3.5MB/s eta 0:00:01\r\u001b[K 92% |█████████████████████████████▋ | 13.3MB 3.8MB/s eta 0:00:01\r\u001b[K 92% |█████████████████████████████▋ | 13.3MB 4.5MB/s eta 0:00:01\r\u001b[K 92% |█████████████████████████████▊ | 13.3MB 4.4MB/s eta 0:00:01\r\u001b[K 92% |█████████████████████████████▊ | 13.3MB 5.0MB/s eta 0:00:01\r\u001b[K 92% |█████████████████████████████▊ | 13.4MB 5.1MB/s eta 0:00:01\r\u001b[K 92% |█████████████████████████████▊ | 13.4MB 4.5MB/s eta 0:00:01\r\u001b[K 93% |█████████████████████████████▊ | 13.4MB 5.0MB/s eta 0:00:01\r\u001b[K 93% |█████████████████████████████▉ | 13.4MB 6.0MB/s eta 0:00:01\r\u001b[K 93% |█████████████████████████████▉ | 13.4MB 6.7MB/s eta 0:00:01\r\u001b[K 93% |█████████████████████████████▉ | 13.4MB 6.3MB/s eta 0:00:01\r\u001b[K 93% |█████████████████████████████▉ | 13.4MB 6.0MB/s eta 0:00:01\r\u001b[K 93% |█████████████████████████████▉ | 13.4MB 6.3MB/s eta 0:00:01\r\u001b[K 93% |██████████████████████████████ | 13.4MB 6.6MB/s eta 0:00:01\r\u001b[K 93% |██████████████████████████████ | 13.4MB 6.4MB/s eta 0:00:01\r\u001b[K 93% |██████████████████████████████ | 13.5MB 6.5MB/s eta 0:00:01", + "\r\u001b[K 93% |██████████████████████████████ | 13.5MB 7.6MB/s eta 0:00:01\r\u001b[K 93% |██████████████████████████████ | 13.5MB 4.8MB/s eta 0:00:01\r\u001b[K 93% |██████████████████████████████ | 13.5MB 4.5MB/s eta 0:00:01\r\u001b[K 93% |██████████████████████████████ | 13.5MB 4.3MB/s eta 0:00:01\r\u001b[K 93% |██████████████████████████████ | 13.5MB 3.9MB/s eta 0:00:01\r\u001b[K 94% |██████████████████████████████ | 13.5MB 3.9MB/s eta 0:00:01\r\u001b[K 94% |██████████████████████████████ | 13.5MB 3.8MB/s eta 0:00:01\r\u001b[K 94% |██████████████████████████████▏ | 13.5MB 3.8MB/s eta 0:00:01\r\u001b[K 94% |██████████████████████████████▏ | 13.5MB 3.9MB/s eta 0:00:01\r\u001b[K 94% |██████████████████████████████▏ | 13.6MB 3.8MB/s eta 0:00:01\r\u001b[K 94% |██████████████████████████████▏ | 13.6MB 3.6MB/s eta 0:00:01\r\u001b[K 94% |██████████████████████████████▏ | 13.6MB 4.5MB/s eta 0:00:01\r\u001b[K 94% |██████████████████████████████▎ | 13.6MB 4.7MB/s eta 0:00:01\r\u001b[K 94% |██████████████████████████████▎ | 13.6MB 4.3MB/s eta 0:00:01\r\u001b[K 94% |██████████████████████████████▎ | 13.6MB 6.3MB/s eta 0:00:01\r\u001b[K 94% |██████████████████████████████▎ | 13.6MB 6.7MB/s eta 0:00:01\r\u001b[K 94% |██████████████████████████████▍ | 13.6MB 7.4MB/s eta 0:00:01\r\u001b[K 94% |██████████████████████████████▍ | 13.6MB 7.4MB/s eta 0:00:01\r\u001b[K 94% |██████████████████████████████▍ | 13.6MB 7.0MB/s eta 0:00:01\r\u001b[K 95% |██████████████████████████████▍ | 13.7MB 6.5MB/s eta 0:00:01", + "\r\u001b[K 95% |██████████████████████████████▍ | 13.7MB 4.7MB/s eta 0:00:01\r\u001b[K 95% |██████████████████████████████▌ | 13.7MB 2.6MB/s eta 0:00:01\r\u001b[K 95% |██████████████████████████████▌ | 13.7MB 2.6MB/s eta 0:00:01\r\u001b[K 95% |██████████████████████████████▌ | 13.7MB 2.6MB/s eta 0:00:01\r\u001b[K 95% |██████████████████████████████▌ | 13.7MB 2.1MB/s eta 0:00:01\r\u001b[K 95% |██████████████████████████████▌ | 13.7MB 2.0MB/s eta 0:00:01\r\u001b[K 95% |██████████████████████████████▋ | 13.7MB 2.0MB/s eta 0:00:01\r\u001b[K 95% |██████████████████████████████▋ | 13.7MB 1.9MB/s eta 0:00:01\r\u001b[K 95% |██████████████████████████████▋ | 13.8MB 1.9MB/s eta 0:00:01\r\u001b[K 95% |██████████████████████████████▋ | 13.8MB 1.8MB/s eta 0:00:01\r\u001b[K 95% |██████████████████████████████▋ | 13.8MB 2.0MB/s eta 0:00:01\r\u001b[K 95% |██████████████████████████████▊ | 13.8MB 3.3MB/s eta 0:00:01\r\u001b[K 95% |██████████████████████████████▊ | 13.8MB 3.2MB/s eta 0:00:01\r\u001b[K 96% |██████████████████████████████▊ | 13.8MB 3.4MB/s eta 0:00:01\r\u001b[K 96% |██████████████████████████████▊ | 13.8MB 5.3MB/s eta 0:00:01", + "\r\u001b[K 96% |██████████████████████████████▊ | 13.8MB 5.1MB/s eta 0:00:01\r\u001b[K 96% |██████████████████████████████▉ | 13.8MB 5.1MB/s eta 0:00:01\r\u001b[K 96% |██████████████████████████████▉ | 13.8MB 4.2MB/s eta 0:00:01\r\u001b[K 96% |██████████████████████████████▉ | 13.9MB 4.0MB/s eta 0:00:01\r\u001b[K 96% |██████████████████████████████▉ | 13.9MB 4.7MB/s eta 0:00:01\r\u001b[K 96% |██████████████████████████████▉ | 13.9MB 3.9MB/s eta 0:00:01\r\u001b[K 96% |███████████████████████████████ | 13.9MB 3.8MB/s eta 0:00:01\r\u001b[K 96% |███████████████████████████████ | 13.9MB 4.0MB/s eta 0:00:01\r\u001b[K 96% |███████████████████████████████ | 13.9MB 4.0MB/s eta 0:00:01\r\u001b[K 96% |███████████████████████████████ | 13.9MB 4.2MB/s eta 0:00:01\r\u001b[K 96% |███████████████████████████████ | 13.9MB 4.4MB/s eta 0:00:01\r\u001b[K 96% |███████████████████████████████ | 13.9MB 4.7MB/s eta 0:00:01\r\u001b[K 97% |███████████████████████████████ | 13.9MB 6.8MB/s eta 0:00:01\r\u001b[K 97% |███████████████████████████████ | 14.0MB 5.3MB/s eta 0:00:01\r\u001b[K 97% |███████████████████████████████ | 14.0MB 5.2MB/s eta 0:00:01\r\u001b[K 97% |███████████████████████████████ | 14.0MB 4.5MB/s eta 0:00:01\r\u001b[K 97% |███████████████████████████████▏| 14.0MB 3.9MB/s eta 0:00:01\r\u001b[K 97% |███████████████████████████████▏| 14.0MB 4.0MB/s eta 0:00:01\r\u001b[K 97% |███████████████████████████████▏| 14.0MB 3.5MB/s eta 0:00:01", + "\r\u001b[K 97% |███████████████████████████████▏| 14.0MB 2.9MB/s eta 0:00:01\r\u001b[K 97% |███████████████████████████████▎| 14.0MB 2.9MB/s eta 0:00:01\r\u001b[K 97% |███████████████████████████████▎| 14.0MB 2.7MB/s eta 0:00:01\r\u001b[K 97% |███████████████████████████████▎| 14.0MB 2.8MB/s eta 0:00:01\r\u001b[K 97% |███████████████████████████████▎| 14.1MB 3.1MB/s eta 0:00:01\r\u001b[K 97% |███████████████████████████████▎| 14.1MB 3.0MB/s eta 0:00:01\r\u001b[K 97% |███████████████████████████████▍| 14.1MB 4.4MB/s eta 0:00:01\r\u001b[K 98% |███████████████████████████████▍| 14.1MB 5.2MB/s eta 0:00:01\r\u001b[K 98% |███████████████████████████████▍| 14.1MB 5.1MB/s eta 0:00:01\r\u001b[K 98% |███████████████████████████████▍| 14.1MB 6.3MB/s eta 0:00:01\r\u001b[K 98% |███████████████████████████████▍| 14.1MB 9.4MB/s eta 0:00:01\r\u001b[K 98% |███████████████████████████████▌| 14.1MB 9.4MB/s eta 0:00:01\r\u001b[K 98% |███████████████████████████████▌| 14.1MB 11.2MB/s eta 0:00:01\r\u001b[K 98% |███████████████████████████████▌| 14.2MB 10.5MB/s eta 0:00:01\r\u001b[K 98% |███████████████████████████████▌| 14.2MB 13.5MB/s eta 0:00:01\r\u001b[K 98% |███████████████████████████████▌| 14.2MB 14.5MB/s eta 0:00:01\r\u001b[K 98% |███████████████████████████████▋| 14.2MB 12.6MB/s eta 0:00:01\r\u001b[K 98% |███████████████████████████████▋| 14.2MB 13.1MB/s eta 0:00:01\r\u001b[K 98% |███████████████████████████████▋| 14.2MB 13.4MB/s eta 0:00:01\r\u001b[K 98% |███████████████████████████████▋| 14.2MB 11.5MB/s eta 0:00:01\r\u001b[K 98% |███████████████████████████████▋| 14.2MB 9.7MB/s eta 0:00:01\r\u001b[K 98% |███████████████████████████████▊| 14.2MB 7.9MB/s eta 0:00:01\r\u001b[K 99% |███████████████████████████████▊| 14.2MB 7.8MB/s eta 0:00:01\r\u001b[K 99% |███████████████████████████████▊| 14.3MB 7.9MB/s eta 0:00:01\r\u001b[K 99% |███████████████████████████████▊| 14.3MB 7.4MB/s eta 0:00:01\r\u001b[K 99% |███████████████████████████████▊| 14.3MB 7.7MB/s eta 0:00:01\r\u001b[K 99% |███████████████████████████████▉| 14.3MB 7.9MB/s eta 0:00:01\r\u001b[K 99% |███████████████████████████████▉| 14.3MB 8.0MB/s eta 0:00:01\r\u001b[K 99% |███████████████████████████████▉| 14.3MB 8.1MB/s eta 0:00:01\r\u001b[K 99% |███████████████████████████████▉| 14.3MB 8.3MB/s eta 0:00:01\r\u001b[K 99% |███████████████████████████████▉| 14.3MB 10.4MB/s eta 0:00:01\r\u001b[K 99% |████████████████████████████████| 14.3MB 12.9MB/s eta 0:00:01\r\u001b[K 99% |████████████████████████████████| 14.3MB 12.8MB/s eta 0:00:01\r\u001b[K 99% |████████████████████████████████| 14.4MB 13.6MB/s eta 0:00:01\r\u001b[K 99% |████████████████████████████████| 14.4MB 14.4MB/s eta 0:00:01\r\u001b[K 99% |████████████████████████████████| 14.4MB 14.9MB/s eta 0:00:01", + "\r\u001b[K 100% |████████████████████████████████| 14.4MB 1.5MB/s \r\n", + "\u001b[?25hCollecting cycler>=0.10 (from matplotlib)\r\n", + " Using cached https://files.pythonhosted.org/packages/f7/d2/e07d3ebb2bd7af696440ce7e754c59dd546ffe1bbe732c8ab68b9c834e61/cycler-0.10.0-py2.py3-none-any.whl\r\n", + "Requirement already satisfied: numpy>=1.11 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from matplotlib) (1.16.4)\r\n", + "Collecting kiwisolver>=1.0.1 (from matplotlib)\r\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/49/5d/d1726d2a2fd471a69ef5014ca42812e1ccb8a13085c42bfcb238a5611f39/kiwisolver-1.1.0-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl (113kB)\r\n\r\u001b[K 9% |███ | 10kB 14.9MB/s eta 0:00:01\r\u001b[K 18% |█████▉ | 20kB 18.4MB/s eta 0:00:01\r\u001b[K 27% |████████▊ | 30kB 7.0MB/s eta 0:00:01\r\u001b[K 36% |███████████▋ | 40kB 6.6MB/s eta 0:00:01\r\u001b[K 45% |██████████████▌ | 51kB 5.3MB/s eta 0:00:01\r\u001b[K 54% |█████████████████▍ | 61kB 6.1MB/s eta 0:00:01\r\u001b[K 63% |████████████████████▎ | 71kB 5.7MB/s eta 0:00:01\r\u001b[K 72% |███████████████████████▏ | 81kB 5.0MB/s eta 0:00:01\r\u001b[K 81% |██████████████████████████ | 92kB 5.2MB/s eta 0:00:01\r\u001b[K 90% |█████████████████████████████ | 102kB 5.4MB/s eta 0:00:01\r\u001b[K 99% |███████████████████████████████▉| 112kB 5.4MB/s eta 0:00:01\r\u001b[K 100% |████████████████████████████████| 122kB 4.7MB/s \r\n", + "\u001b[?25hRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from matplotlib) (2.4.0)\r\nRequirement already satisfied: python-dateutil>=2.1 in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from matplotlib) (2.8.0)\r\n", + "Requirement already satisfied: six in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages (from cycler>=0.10->matplotlib) (1.12.0)\r\nRequirement already satisfied: setuptools in /Users/huilgolr/tornasole/venv/lib/python3.6/site-packages/setuptools-40.8.0-py3.6.egg (from kiwisolver>=1.0.1->matplotlib) (40.8.0)\r\n", + "Installing collected packages: cycler, kiwisolver, matplotlib\r\n", + "Successfully installed cycler-0.10.0 kiwisolver-1.1.0 matplotlib-3.1.1\r\n" + ], + "output_type": "stream" + } + ], + "source": [ + "!pip install matplotlib" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "!python mnist.py" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is the location TS has deposited data in" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "pycharm": { + "is_executing": false + } + }, + "outputs": [ + { + "name": "stdout", + "text": [ + "collections.ts \u001b[34mevents\u001b[m\u001b[m \u001b[34mindex\u001b[m\u001b[m\r\n" + ], + "output_type": "stream" + } + ], + "source": [ + "!ls ts_outputs/train/" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "pycharm": { + "is_executing": false + } + }, + "outputs": [ + { + "name": "stderr", + "text": [ + "INFO:tornasole:Loading trial train at path ./ts_outputs/train\n", + "INFO:tornasole:Found 5 steps\n", + "[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers.\n", + "[Parallel(n_jobs=4)]: Done 2 out of 5 | elapsed: 47.3s remaining: 1.2min\n", + "[Parallel(n_jobs=4)]: Done 3 out of 5 | elapsed: 1.0min remaining: 40.7s\n", + "[Parallel(n_jobs=4)]: Done 5 out of 5 | elapsed: 1.6min remaining: 0.0s\n[Parallel(n_jobs=4)]: Done 5 out of 5 | elapsed: 1.6min finished\nINFO:tornasole:Loaded 3 collections\n" + ], + "output_type": "stream" + } + ], + "source": [ + "# import sys\n", + "# sys.path.append('../../../../tornasole_core')\n", + "from tornasole.trials import create_trial\n", + "\n", + "# Create trial object from this path\n", + "tdir = \"./ts_outputs/train\"\n", + "tr = create_trial(tdir)" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": { + "pycharm": { + "is_executing": false + } + }, + "outputs": [ + { + "data": { + "text/plain": "['input_olg/shape:0',\n 'conv2d/BiasAdd:0',\n 'gradients/conv2d_1/Conv2D_grad/ShapeN:0',\n 'gradients/dropout/dropout/truediv_grad/RealDiv_2:0',\n 'gradients/Reshape_grad/Shape:0',\n 'GradientDescent/update_dense/bias/ApplyGradientDescent:0',\n 'random_shuffle_queue_DequeueMany:1',\n 'gradients/sparse_softmax_cross_entropy_loss/value_grad/Reshape:0',\n 'gradients/sparse_softmax_cross_entropy_loss/xentropy/xentropy_grad/PreventGradient:0',\n 'sparse_softmax_cross_entropy_loss/num_present/ones_like/Const:0',\n 'sparse_softmax_cross_entropy_loss/num_present:0',\n 'sparse_softmax_cross_entropy_loss/Const_1:0',\n 'dropout/dropout/sub:0',\n 'dense_1/BiasAdd:0',\n 'gradients/sparse_softmax_cross_entropy_loss/Sum_grad/Const:0',\n 'sparse_softmax_cross_entropy_loss/Sum:0',\n 'gradients/sparse_softmax_cross_entropy_loss/Mul_grad/Shape:0',\n 'dense_1/MatMul:0',\n 'gradients/dropout/dropout/truediv_grad/mul:0',\n 'gradients/conv2d/Conv2D_grad/Conv2DBackpropInput:0',\n 'gradients/dense_1/MatMul_grad/tuple/control_dependency:0',\n 'GradientDescent/update_conv2d_1/bias/ApplyGradientDescent:0',\n 'dropout/dropout/add:0',\n 'gradients/sparse_softmax_cross_entropy_loss/value_grad/div_no_nan_1:0',\n 'gradients/sparse_softmax_cross_entropy_loss/xentropy/xentropy_grad/ExpandDims:0',\n 'gradients/conv2d_1/BiasAdd_grad/BiasAddGrad:0',\n 'gradients/max_pooling2d_1/MaxPool_grad/MaxPoolGrad:0',\n 'gradients/dropout/dropout/mul_grad/tuple/control_dependency:0',\n 'gradients/dense/MatMul_grad/MatMul:0',\n 'gradients/conv2d_1/BiasAdd_grad/tuple/control_dependency_1:0',\n 'gradients/conv2d/BiasAdd_grad/tuple/control_dependency:0',\n 'gradients/sparse_softmax_cross_entropy_loss/value_grad/Neg:0',\n 'conv2d_1/kernel/read:0',\n 'dropout/dropout/random_uniform/min:0',\n 'gradients/sparse_softmax_cross_entropy_loss/value_grad/mul:0',\n 'GradientDescent/update_conv2d/bias/ApplyGradientDescent:0',\n 'random_shuffle_queue_DequeueMany:2',\n 'sparse_softmax_cross_entropy_loss/xentropy/xentropy:0',\n 'gradients/sparse_softmax_cross_entropy_loss/Sum_grad/Tile:0',\n 'gradients/conv2d_1/Conv2D_grad/tuple/control_dependency_1:0',\n 'gradients/sparse_softmax_cross_entropy_loss/value_grad/Sum:0',\n 'gradients/sparse_softmax_cross_entropy_loss/Mul_grad/Reshape_1:0',\n 'dropout/dropout/random_uniform:0',\n 'sparse_softmax_cross_entropy_loss/xentropy/xentropy:1',\n 'gradients/dense_1/MatMul_grad/MatMul_1:0',\n 'gradients/conv2d/Conv2D_grad/Conv2DBackpropFilter:0',\n 'conv2d/kernel/read:0',\n 'conv2d_1/bias/read:0',\n 'gradients/sparse_softmax_cross_entropy_loss/Mul_grad/BroadcastGradientArgs:1',\n 'GradientDescent/update_dense_1/kernel/ApplyGradientDescent:0',\n 'conv2d/Conv2D:0',\n 'dropout/dropout/mul:0',\n 'gradients/Fill:0',\n 'gradients/sparse_softmax_cross_entropy_loss/Mul_grad/tuple/control_dependency:0',\n 'gradients/dense/BiasAdd_grad/tuple/control_dependency:0',\n 'dropout/dropout/random_uniform/mul:0',\n 'gradients/conv2d/Relu_grad/ReluGrad:0',\n 'dense_1/kernel/read:0',\n 'gradients/conv2d/BiasAdd_grad/BiasAddGrad:0',\n 'conv2d/bias/read:0',\n 'dropout/dropout/rate:0',\n 'gradients/max_pooling2d/MaxPool_grad/MaxPoolGrad:0',\n 'dropout/dropout/random_uniform/max:0',\n 'sparse_softmax_cross_entropy_loss/Mul:0',\n 'sparse_softmax_cross_entropy_loss/num_present/Equal:0',\n 'gradients/dropout/dropout/truediv_grad/Neg:0',\n 'gradients/dense/BiasAdd_grad/BiasAddGrad:0',\n 'gradients/dense/MatMul_grad/tuple/control_dependency:0',\n 'max_pooling2d/MaxPool:0',\n 'gradients/dropout/dropout/truediv_grad/Reshape_1:0',\n 'GradientDescent/learning_rate:0',\n 'gradients/dropout/dropout/truediv_grad/Shape:0',\n 'gradients/sparse_softmax_cross_entropy_loss/Mul_grad/Mul_1:0',\n 'gradients/conv2d/Conv2D_grad/ShapeN:1',\n 'conv2d_1/Conv2D:0',\n 'sparse_softmax_cross_entropy_loss/Const:0',\n 'sparse_softmax_cross_entropy_loss/value:0',\n 'gradients/sparse_softmax_cross_entropy_loss/Sum_grad/Reshape:0',\n 'gradients/dropout/dropout/mul_grad/Mul_1:0',\n 'gradients/dropout/dropout/truediv_grad/RealDiv_1:0',\n 'conv2d/bias:0',\n 'gradients/dense_1/BiasAdd_grad/tuple/control_dependency_1:0',\n 'sparse_softmax_cross_entropy_loss/num_present/broadcast_weights/ones_like:0',\n 'gradients/sparse_softmax_cross_entropy_loss/Mul_grad/Sum_1:0',\n 'random_shuffle_queue_DequeueMany/n:0',\n 'Reshape/shape:0',\n 'dense/MatMul:0',\n 'gradients/sparse_softmax_cross_entropy_loss/Sum_1_grad/Tile:0',\n 'conv2d/kernel:0',\n 'gradients/sparse_softmax_cross_entropy_loss/Mul_grad/Reshape:0',\n 'GradientDescent:0',\n 'gradients/sparse_softmax_cross_entropy_loss/value_grad/Reshape_1:0',\n 'gradients/sparse_softmax_cross_entropy_loss/xentropy/xentropy_grad/mul:0',\n 'gradients/conv2d/Conv2D_grad/ShapeN:0',\n 'GradientDescent/update_dense/kernel/ApplyGradientDescent:0',\n 'gradients/dense/Relu_grad/ReluGrad:0',\n 'gradients/conv2d_1/Conv2D_grad/Conv2DBackpropInput:0',\n 'dense/kernel/read:0',\n 'dropout/dropout/Shape:0',\n 'dropout/dropout/sub/x:0',\n 'gradients/dense/MatMul_grad/MatMul_1:0',\n 'conv2d_1/Relu:0',\n 'gradients/dropout/dropout/truediv_grad/tuple/control_dependency:0',\n 'global_step:0',\n 'sparse_softmax_cross_entropy_loss/num_present/broadcast_weights/ones_like/Const:0',\n 'gradients/sparse_softmax_cross_entropy_loss/Mul_grad/Sum:0',\n 'gradients/dropout/dropout/truediv_grad/BroadcastGradientArgs:1',\n 'gradients/conv2d_1/Relu_grad/ReluGrad:0',\n 'gradients/dropout/dropout/truediv_grad/Sum_1:0',\n 'gradients/conv2d_1/BiasAdd_grad/tuple/control_dependency:0',\n 'max_pooling2d_1/MaxPool:0',\n 'dropout/dropout/truediv:0',\n 'gradients/dense_1/MatMul_grad/tuple/control_dependency_1:0',\n 'GradientDescent/update_conv2d_1/kernel/ApplyGradientDescent:0',\n 'conv2d/Relu:0',\n 'sparse_softmax_cross_entropy_loss/num_present/broadcast_weights/ones_like/Shape:0',\n 'gradients/sparse_softmax_cross_entropy_loss/value_grad/div_no_nan_2:0',\n 'GradientDescent/update_conv2d/kernel/ApplyGradientDescent:0',\n 'dense/BiasAdd:0',\n 'dropout/dropout/random_uniform/sub:0',\n 'sparse_softmax_cross_entropy_loss/num_present/Select:0',\n 'sparse_softmax_cross_entropy_loss/Sum_1:0',\n 'gradients/conv2d_1/Conv2D_grad/tuple/control_dependency:0',\n 'gradients/sparse_softmax_cross_entropy_loss/value_grad/div_no_nan:0',\n 'dense_1/bias:0',\n 'dense_1/kernel:0',\n 'dense/Relu:0',\n 'sparse_softmax_cross_entropy_loss/num_present/broadcast_weights:0',\n 'gradients/dense_1/MatMul_grad/MatMul:0',\n 'gradients/conv2d_1/Conv2D_grad/Conv2DBackpropFilter:0',\n 'gradients/conv2d/BiasAdd_grad/tuple/control_dependency_1:0',\n 'gradients/sparse_softmax_cross_entropy_loss/value_grad/Sum_1:0',\n 'gradients/sparse_softmax_cross_entropy_loss/xentropy/xentropy_grad/ExpandDims/dim:0',\n 'gradients/conv2d/Conv2D_grad/tuple/control_dependency_1:0',\n 'gradients/sparse_softmax_cross_entropy_loss/Sum_grad/Reshape/shape:0',\n 'gradients/grad_ys_0:0',\n 'gradients/sparse_softmax_cross_entropy_loss/value_grad/tuple/control_dependency:0',\n 'GradientDescent/value:0',\n 'dropout/dropout/Floor:0',\n 'sparse_softmax_cross_entropy_loss/num_present/zeros_like:0',\n 'gradients/Reshape_grad/Reshape:0',\n 'sparse_softmax_cross_entropy_loss/num_present/Equal/y:0',\n 'gradients/sparse_softmax_cross_entropy_loss/Sum_1_grad/Reshape:0',\n 'dense/bias:0',\n 'gradients/dropout/dropout/truediv_grad/RealDiv:0',\n 'GradientDescent/update_dense_1/bias/ApplyGradientDescent:0',\n 'input_olg:0',\n 'conv2d_1/bias:0',\n 'dense/kernel:0',\n 'conv2d_1/kernel:0',\n 'gradients/sparse_softmax_cross_entropy_loss/Mul_grad/Mul:0',\n 'gradients/dense/BiasAdd_grad/tuple/control_dependency_1:0',\n 'dense_1/bias/read:0',\n 'sparse_softmax_cross_entropy_loss/num_present/ones_like:0',\n 'gradients/dropout/dropout/mul_grad/Mul:0',\n 'gradients/dropout/dropout/truediv_grad/Reshape:0',\n 'gradients/conv2d_1/Conv2D_grad/ShapeN:1',\n 'random_shuffle_queue_DequeueMany:0',\n 'Reshape:0',\n 'sparse_softmax_cross_entropy_loss/num_present/Const:0',\n 'gradients/dense_1/BiasAdd_grad/BiasAddGrad:0',\n 'gradients/dropout/dropout/truediv_grad/Sum:0',\n 'dropout/dropout/random_uniform/RandomUniform:0',\n 'gradients/dense_1/BiasAdd_grad/tuple/control_dependency:0',\n 'gradients/dense/MatMul_grad/tuple/control_dependency_1:0',\n 'conv2d_1/BiasAdd:0',\n 'dense/bias/read:0']" + }, + "metadata": {}, + "output_type": "execute_result", + "execution_count": 24 + } + ], + "source": [ + "ts = tr.tensors()" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": { + "pycharm": { + "is_executing": false + } + }, + "outputs": [ + { + "name": "stdout", + "text": [ + "input_olg/shape:0 (4,)\nconv2d/BiasAdd:0 (100, 28, 28, 64)\nrandom_shuffle_queue_DequeueMany:1 (100, 28, 28)\nsparse_softmax_cross_entropy_loss/num_present/ones_like/Const:0 (1,)\nsparse_softmax_cross_entropy_loss/num_present:0 (1,)\nsparse_softmax_cross_entropy_loss/Const_1:0 (1,)\ndropout/dropout/sub:0 (1,)\ndense_1/BiasAdd:0 (100, 10)\nsparse_softmax_cross_entropy_loss/Sum:0 (1,)\ndense_1/MatMul:0 (100, 10)\ndropout/dropout/add:0 (100, 1024)\nconv2d_1/kernel/read:0 (5, 5, 64, 64)\ndropout/dropout/random_uniform/min:0 (1,)\nrandom_shuffle_queue_DequeueMany:2 (100,)\nsparse_softmax_cross_entropy_loss/xentropy/xentropy:0 (100,)\ndropout/dropout/random_uniform:0 (100, 1024)\nsparse_softmax_cross_entropy_loss/xentropy/xentropy:1 (100, 10)\nconv2d/kernel/read:0 (5, 5, 1, 64)\nconv2d_1/bias/read:0 (64,)\nconv2d/Conv2D:0 (100, 28, 28, 64)\ndropout/dropout/mul:0 (100, 1024)\ndropout/dropout/random_uniform/mul:0 (100, 1024)\ndense_1/kernel/read:0 (1024, 10)\nconv2d/bias/read:0 (64,)\ndropout/dropout/rate:0 (1,)\ndropout/dropout/random_uniform/max:0 (1,)\nsparse_softmax_cross_entropy_loss/Mul:0 (100,)\nsparse_softmax_cross_entropy_loss/num_present/Equal:0 (1,)\nmax_pooling2d/MaxPool:0 (100, 14, 14, 64)\nconv2d_1/Conv2D:0 (100, 14, 14, 64)\nsparse_softmax_cross_entropy_loss/Const:0 (1,)\nsparse_softmax_cross_entropy_loss/value:0 (1,)\nconv2d/bias:0 (64,)\nsparse_softmax_cross_entropy_loss/num_present/broadcast_weights/ones_like:0 (100,)\nrandom_shuffle_queue_DequeueMany/n:0 (1,)\nReshape/shape:0 (2,)\ndense/MatMul:0 (100, 1024)\nconv2d/kernel:0 (5, 5, 1, 64)\ndense/kernel/read:0 (3136, 1024)\ndropout/dropout/Shape:0 (2,)\ndropout/dropout/sub/x:0 (1,)\nconv2d_1/Relu:0 (100, 14, 14, 64)\nglobal_step:0 (1,)\nsparse_softmax_cross_entropy_loss/num_present/broadcast_weights/ones_like/Const:0 (1,)\nmax_pooling2d_1/MaxPool:0 (100, 7, 7, 64)\ndropout/dropout/truediv:0 (100, 1024)\nconv2d/Relu:0 (100, 28, 28, 64)\nsparse_softmax_cross_entropy_loss/num_present/broadcast_weights/ones_like/Shape:0 (1,)\ndense/BiasAdd:0 (100, 1024)\ndropout/dropout/random_uniform/sub:0 (1,)\nsparse_softmax_cross_entropy_loss/num_present/Select:0 (1,)\nsparse_softmax_cross_entropy_loss/Sum_1:0 (1,)\ndense_1/bias:0 (10,)\ndense_1/kernel:0 (1024, 10)\ndense/Relu:0 (100, 1024)\nsparse_softmax_cross_entropy_loss/num_present/broadcast_weights:0 (100,)\ndropout/dropout/Floor:0 (100, 1024)\nsparse_softmax_cross_entropy_loss/num_present/zeros_like:0 (1,)\nsparse_softmax_cross_entropy_loss/num_present/Equal/y:0 (1,)\ndense/bias:0 (1024,)\ninput_olg:0 (100, 28, 28, 1)\nconv2d_1/bias:0 (64,)\ndense/kernel:0 (3136, 1024)\nconv2d_1/kernel:0 (5, 5, 64, 64)\ndense_1/bias/read:0 (10,)\nsparse_softmax_cross_entropy_loss/num_present/ones_like:0 (1,)\nrandom_shuffle_queue_DequeueMany:0 (100,)\nReshape:0 (100, 3136)\nsparse_softmax_cross_entropy_loss/num_present/Const:0 (1,)\ndropout/dropout/random_uniform/RandomUniform:0 (100, 1024)\nconv2d_1/BiasAdd:0 (100, 14, 14, 64)\ndense/bias/read:0 (1024,)\n" + ], + "output_type": "stream" + } + ], + "source": [ + "import re\n", + "for t in ts:\n", + " if re.search('radient', t ):\n", + " continue\n", + " v = tr.tensor(t).value(0)\n", + " \n", + " if type(v)==bool:\n", + " print( t, \"is bool\")\n", + " elif v is None:\n", + " print( t, \"is None\" )\n", + " else:\n", + " print(t, v.shape )\n", + " \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "metadata": { + "pycharm": { + "is_executing": false + } + }, + "outputs": [], + "source": [ + "imgs = tr.tensor('input_olg:0').value(0)\n", + "imgs.shape\n", + "a= imgs[0]\n", + "a = a[:,:,0]*255" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": { + "pycharm": { + "is_executing": false + } + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "metadata": { + "pycharm": { + "is_executing": false, + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/plain": "" + }, + "metadata": {}, + "output_type": "execute_result", + "execution_count": 54 + }, + { + "data": { + "text/plain": "
", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAD4CAYAAAAq5pAIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAANeElEQVR4nO3df6jVdZ7H8dcrf0A584cVmajVNEk1aJshtVFsLoNTaz90Am2kNpcd1vljhBlYaKP9Y6RlQbZmZAkSHIpxllmngWqyYdgZE7FEGNJw0zInVwy93byFkk0QbvreP+7X2Zvd8znX8+t7vO/nAy7nnO/7fM9586VX3x+f8/XjiBCA8e+CuhsA0BuEHUiCsANJEHYgCcIOJDGxl19mm0v/QJdFhEdb3tae3fZdtvfbPmD70XY+C0B3udVxdtsTJP1R0kJJRyS9Lml5RLxdWIc9O9Bl3diz3yzpQEQcjIiTkn4paXEbnwegi9oJ+wxJh0e8PlIt+wLbK23vtL2zje8C0KauX6CLiPWS1kscxgN1amfPPiBp1ojXM6tlAPpQO2F/XdJs21+zPVnSdyRt6kxbADqt5cP4iPjc9ipJv5M0QdKzEfFWxzoD0FEtD7219GWcswNd15Uf1QA4fxB2IAnCDiRB2IEkCDuQBGEHkiDsQBKEHUiCsANJEHYgCcIOJEHYgSQIO5AEYQeSIOxAEoQdSIKwA0kQdiAJwg4kQdiBJAg7kARhB5Ig7EAShB1IgrADSRB2IAnCDiRB2IEkCDuQRMtTNqN/rF27tmFt0aJFxXXnzp1brJ88ebKlntB/2gq77UOSPpF0StLnETG/E00B6LxO7Nn/OiI+6sDnAOgiztmBJNoNe0j6ve1dtleO9gbbK23vtL2zze8C0IZ2D+Nvj4gB25dJ2mz7nYh4deQbImK9pPWSZDva/D4ALWprzx4RA9XjkKQXJd3ciaYAdF7LYbc9xfZXzzyX9C1JezvVGIDOaucwfpqkF22f+Zz/jIj/6khXOCcRjc+OZs+eXVz3zjvvLNZffvnllnpC/2k57BFxUNJfdLAXAF3E0BuQBGEHkiDsQBKEHUiCsANJcIvreeCKK64o1u+5556Gtc8++6y47vHjx1vqCecf9uxAEoQdSIKwA0kQdiAJwg4kQdiBJAg7kATj7OeB1atXF+vXXHNNw9q+ffuK627fvr2VlnAeYs8OJEHYgSQIO5AEYQeSIOxAEoQdSIKwA0kwzn4eWLp0ad0tdMVFF11UrO/YsaNY379/f7H+wAMPnHNP4xl7diAJwg4kQdiBJAg7kARhB5Ig7EAShB1IgnH2PnDDDTcU65MmTWr5s9etW9fyut22atWqYr3Zdrn++uuL9TvuuKNhbdu2bcV1x6Ome3bbz9oesr13xLKLbW+2/W71OLW7bQJo11gO438m6a6zlj0qaUtEzJa0pXoNoI81DXtEvCrp2FmLF0vaUD3fIGlJh/sC0GGtnrNPi4jB6vkHkqY1eqPtlZJWtvg9ADqk7Qt0ERG2o1BfL2m9JJXeB6C7Wh16O2p7uiRVj0OdawlAN7Qa9k2SVlTPV0h6qTPtAOgWR5SPrG1vlLRA0qWSjkr6kaRfS/qVpCskvSdpWUScfRFvtM9KeRg/Z86cYn3r1q3F+iWXXFKsnzhxomFtwYIFxXV3795drHfTp59+WqxfeOGFxfqePXuK9Ztuuqlh7dSpU8V1z2cR4dGWNz1nj4jlDUrfbKsjAD3Fz2WBJAg7kARhB5Ig7EAShB1Igltce+DWW28t1psNrTXzyCOPNKzVObTWTDu37krlIUdpfA+vtYI9O5AEYQeSIOxAEoQdSIKwA0kQdiAJwg4k0fQW145+2Ti9xfXqq68u1rdv316sX3755cX64OBgsT5jxoxivV+dPHmyWJ84sfwzkAcffLBY37hx4zn3NB40usWVPTuQBGEHkiDsQBKEHUiCsANJEHYgCcIOJMH97B1w8ODBYn3Xrl3F+t13312sf/zxx8V6aZx9YGCguG63lXqzRx0O/rNm4/AffvhhSz1lxZ4dSIKwA0kQdiAJwg4kQdiBJAg7kARhB5JgnL0HnnzyyWL9yiuvLNabTfn82muvNaytXbu2uO5TTz1VrLdr2bJlDWsTJkworvv+++8X66+88kpLPWXVdM9u+1nbQ7b3jli22vaA7d3V36LutgmgXWM5jP+ZpLtGWb42Im6s/n7b2bYAdFrTsEfEq5KO9aAXAF3UzgW6VbbfrA7zpzZ6k+2Vtnfa3tnGdwFoU6thXyfp65JulDQo6ceN3hgR6yNifkTMb/G7AHRAS2GPiKMRcSoiTkv6qaSbO9sWgE5rKey2p494+W1Jexu9F0B/aDrObnujpAWSLrV9RNKPJC2wfaOkkHRI0ve62ON5b9u2bcX6ggULivU1a9YU67fddlvD2hNPPFFc99577y3WH3rooWJ9aGioWC9pdj87Oqtp2CNi+SiLn+lCLwC6iJ/LAkkQdiAJwg4kQdiBJAg7kARTNo8DS5YsaakmSQ8//HCxfvz48WL96aefLtbnzp3bsHbfffcV1z18+HCx3uzW4KyYshlIjrADSRB2IAnCDiRB2IEkCDuQBGEHkmCcfZxrdhvpxInlGx+b3eL6+OOPF+ulKZvbtXz5aDdk/r/nnnuua9/dzxhnB5Ij7EAShB1IgrADSRB2IAnCDiRB2IEkGGdHW6677rpiffPmzQ1rM2fOLK577Fh5isFbbrmlWD9w4ECxPl4xzg4kR9iBJAg7kARhB5Ig7EAShB1IgrADSTSdxRUoeeedd4r1hQsXNqzt2bOnuO7kyZOL9csuu6xYzzrO3kjTPbvtWba32n7b9lu2f1Atv9j2ZtvvVo9Tu98ugFaN5TD+c0n/GBHfkPSXkr5v+xuSHpW0JSJmS9pSvQbQp5qGPSIGI+KN6vknkvZJmiFpsaQN1ds2SCrPMwSgVud0zm77KknzJP1B0rSIGKxKH0ia1mCdlZJWtt4igE4Y89V421+R9LykH0bEiZG1GL6bZtSbXCJifUTMj4j5bXUKoC1jCrvtSRoO+i8i4oVq8VHb06v6dElD3WkRQCc0PYz38L9F/IykfRHxkxGlTZJWSFpTPb7UlQ5xXisNzTW7vXrKlCnF+tKlS4v1HTt2FOvZjOWc/TZJfytpj+3d1bLHNBzyX9n+rqT3JC3rTosAOqFp2CNiu6RGMw18s7PtAOgWfi4LJEHYgSQIO5AEYQeSIOxAEtziitps2rSpWL///vuL9WuvvbaT7Yx77NmBJAg7kARhB5Ig7EAShB1IgrADSRB2IAmmbEZtLrigvK/ZsGFDsT5nzpxifd68eefc03jAlM1AcoQdSIKwA0kQdiAJwg4kQdiBJAg7kAT3s6M2p0+fLtY3b95crDcbZ8cXsWcHkiDsQBKEHUiCsANJEHYgCcIOJEHYgSSa3s9ue5akn0uaJikkrY+If7e9WtI/SPqweutjEfHbJp/F/exAlzW6n30sYZ8uaXpEvGH7q5J2SVqi4fnY/xQRT461CcIOdF+jsI9lfvZBSYPV809s75M0o7PtAei2czpnt32VpHmS/lAtWmX7TdvP2p7aYJ2Vtnfa3tlWpwDaMuZ/g872VyRtk/SvEfGC7WmSPtLwefy/aPhQ/++bfAaH8UCXtXzOLkm2J0n6jaTfRcRPRqlfJek3EVG8M4GwA93X8j84aduSnpG0b2TQqwt3Z3xb0t52mwTQPWO5Gn+7pNck7ZF05p7ExyQtl3Sjhg/jD0n6XnUxr/RZ7NmBLmvrML5TCDvQffy78UByhB1IgrADSRB2IAnCDiRB2IEkCDuQBGEHkiDsQBKEHUiCsANJEHYgCcIOJEHYgSR6PWXzR5LeG/H60mpZP+rX3vq1L4neWtXJ3q5sVOjp/exf+nJ7Z0TMr62Bgn7trV/7kuitVb3qjcN4IAnCDiRRd9jX1/z9Jf3aW7/2JdFbq3rSW63n7AB6p+49O4AeIexAErWE3fZdtvfbPmD70Tp6aMT2Idt7bO+ue366ag69Idt7Ryy72PZm2+9Wj6POsVdTb6ttD1TbbrftRTX1Nsv2Vttv237L9g+q5bVuu0JfPdluPT9ntz1B0h8lLZR0RNLrkpZHxNs9baQB24ckzY+I2n+AYfuvJP1J0s/PTK1l+98kHYuINdX/KKdGxD/1SW+rdY7TeHept0bTjP+datx2nZz+vBV17NlvlnQgIg5GxElJv5S0uIY++l5EvCrp2FmLF0vaUD3foOH/WHquQW99ISIGI+KN6vknks5MM17rtiv01RN1hH2GpMMjXh9Rf833HpJ+b3uX7ZV1NzOKaSOm2fpA0rQ6mxlF02m8e+msacb7Ztu1Mv15u7hA92W3R8RNkv5G0verw9W+FMPnYP00drpO0tc1PAfgoKQf19lMNc3485J+GBEnRtbq3Haj9NWT7VZH2AckzRrxema1rC9ExED1OCTpRQ2fdvSTo2dm0K0eh2ru588i4mhEnIqI05J+qhq3XTXN+POSfhERL1SLa992o/XVq+1WR9hflzTb9tdsT5b0HUmbaujjS2xPqS6cyPYUSd9S/01FvUnSiur5Ckkv1djLF/TLNN6NphlXzduu9unPI6Lnf5IWafiK/P9I+uc6emjQ19WS/rv6e6vu3iRt1PBh3f9q+NrGdyVdImmLpHclvSLp4j7q7T80PLX3mxoO1vSaertdw4fob0raXf0tqnvbFfrqyXbj57JAElygA5Ig7EAShB1IgrADSRB2IAnCDiRB2IEk/g9x+S8AWuw0bAAAAABJRU5ErkJggg==\n" + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plt.imshow(a, cmap='gray')\n", + "\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.4" + }, + "pycharm": { + "stem_cell": { + "cell_type": "raw", + "source": [], + "metadata": { + "collapsed": false + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/examples/analysis/notebooks/NNRecipe/mnist.py b/examples/analysis/notebooks/NNRecipe/mnist.py new file mode 100644 index 0000000000..d21aace8a4 --- /dev/null +++ b/examples/analysis/notebooks/NNRecipe/mnist.py @@ -0,0 +1,127 @@ +#@title Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np + +#import os +#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # or any {'0', '1', '2'} +import tensorflow as tf + +from tornasole.tensorflow import TornasoleHook, SaveConfig + +tf.logging.set_verbosity(tf.logging.ERROR) + + +def cnn_model_fn(features, labels, mode): + """Model function for CNN.""" + # Input Layer + input_layer = tf.reshape(features["x"], [-1, 28, 28, 1], name="input_olg") + + # Convolutional Layer #1 + conv1 = tf.layers.conv2d( + inputs=input_layer, + filters=64, + kernel_size=[5, 5], + padding="same", + activation=tf.nn.relu) + + # Pooling Layer #1 + pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) + + # Convolutional Layer #2 and Pooling Layer #2 + conv2 = tf.layers.conv2d( + inputs=pool1, + filters=64, + kernel_size=[5, 5], + padding="same", + activation=tf.nn.relu) + pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) + + # Dense Layer + pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64]) + dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu) + dropout = tf.layers.dropout( + inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN) + + # Logits Layer + logits = tf.layers.dense(inputs=dropout, units=10) + + predictions = { + # Generate predictions (for PREDICT and EVAL mode) + "classes": tf.argmax(input=logits, axis=1), + # Add `softmax_tensor` to the graph. It is used for PREDICT and by the + # `logging_hook`. + "probabilities": tf.nn.softmax(logits, name="softmax_tensor") + } + + if mode == tf.estimator.ModeKeys.PREDICT: + return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) + + # Calculate Loss (for both TRAIN and EVAL modes) + loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) + + # Configure the Training Op (for TRAIN mode) + if mode == tf.estimator.ModeKeys.TRAIN: + optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001) + train_op = optimizer.minimize( + loss=loss, + global_step=tf.train.get_global_step()) + return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) + + # Add evaluation metrics (for EVAL mode) + eval_metric_ops = { + "accuracy": tf.metrics.accuracy( + labels=labels, predictions=predictions["classes"]) + } + return tf.estimator.EstimatorSpec( + mode=mode, loss=loss, eval_metric_ops=eval_metric_ops) + + +# Load training and eval data +((train_data, train_labels), + (eval_data, eval_labels)) = tf.keras.datasets.mnist.load_data() + +train_data = train_data/np.float32(255) +print(train_data.shape) +train_labels = train_labels.astype(np.int32) # not required + +eval_data = eval_data/np.float32(255) +eval_labels = eval_labels.astype(np.int32) # not required + + +mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn, model_dir="./model") + +ts_hook = TornasoleHook(out_dir="ts_outputs/train", + save_all=True, + save_config=SaveConfig(save_interval=1)) + +# Train the model +train_input_fn = tf.estimator.inputs.numpy_input_fn( + x={"x": train_data}, + y=train_labels, + batch_size=100, + num_epochs=None, + shuffle=True) + + +mnist_classifier.train(input_fn=train_input_fn, steps=5, hooks=[ts_hook]) + + +eval_input_fn = tf.estimator.inputs.numpy_input_fn( + x={"x": eval_data}, + y=eval_labels, + num_epochs=1, + shuffle=False) + +eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn) +print(eval_results) + diff --git a/examples/analysis/scripts/README.md b/examples/analysis/scripts/README.md new file mode 100644 index 0000000000..c25880437d --- /dev/null +++ b/examples/analysis/scripts/README.md @@ -0,0 +1,28 @@ +# Example Scripts +This folder has some example scripts which invoke rules that we have written. +The way to run the scripts is straight forward. + +### Vanishing Gradient +``` +python check_grads.py --trial-dir ~/ts_outputs/grads/ +``` + +### Similar across runs +This scripts you how to check which tensors have different values across two runs. +You pass both trials as `trial-dir`. +``` +python similar_across_runs.py \ + --trial-dir ~/ts_outputs/trial1 \ + --trial-dir ~/ts_outputs/trial2 +``` + +## Weight Update Ratio +This script lets you monitor the ratio of weights to updates. +You can configure the thresholds by passing them. + +``` +python weight_update_ratio.py \ + --trial-dir ~/ts_outputs/trial \ + --large-threshold 10 \ + --small-threshold 0.00000001 +``` diff --git a/examples/analysis/scripts/check_grads.py b/examples/analysis/scripts/check_grads.py new file mode 100644 index 0000000000..59019681ec --- /dev/null +++ b/examples/analysis/scripts/check_grads.py @@ -0,0 +1,18 @@ +import argparse +from tornasole.trials import create_trial +from tornasole.rules.generic import VanishingGradient +from tornasole.rules import invoke_rule + +#example trial root +# local: /home/ubuntu/tmp/pycharm_project_932/repositories/tornasole_tf/examples/training_scripts/tornasole_outputs/ +# s3: s3://huilgolr-tf/tornasole/tornasole_outputs + +parser = argparse.ArgumentParser() +parser.add_argument('--trial-dir', type=str) +parser.add_argument('--threshold', type=float, default=0.0000001) +args = parser.parse_args() + +trial_obj = create_trial(args.trial_dir) +vr = VanishingGradient(base_trial=trial_obj, threshold=args.threshold) +invoke_rule(vr) + diff --git a/examples/analysis/scripts/similar_across_runs.py b/examples/analysis/scripts/similar_across_runs.py new file mode 100644 index 0000000000..2514b7ff2d --- /dev/null +++ b/examples/analysis/scripts/similar_across_runs.py @@ -0,0 +1,22 @@ +import argparse +from tornasole.rules.generic import SimilarAcrossRuns +from tornasole.trials import create_trial +from tornasole.rules.rule_invoker import invoke_rule + +parser = argparse.ArgumentParser() +parser.add_argument('--trial-dir', default=[], type=str, action='append') +parser.add_argument('--include', default=[], type=str, action='append', + help="""List of REs for tensors to include for this check""") +parser.add_argument('--start-step', default=0, type=int) +parser.add_argument('--end-step', type=int) +args = parser.parse_args() +if len(args.trial_dir) != 2: + raise RuntimeError('This rule requires two trials') + +trials = [] +for t in args.trial_dir: + trials.append(create_trial(t, range_steps=(args.start_step, args.end_step))) + +sr = SimilarAcrossRuns(trials[0], trials[1], include_regex=args.include) +invoke_rule(sr, start_step=args.start_step, end_step=args.end_step) + diff --git a/examples/analysis/scripts/weight_update_ratio.py b/examples/analysis/scripts/weight_update_ratio.py new file mode 100644 index 0000000000..d1569f680e --- /dev/null +++ b/examples/analysis/scripts/weight_update_ratio.py @@ -0,0 +1,18 @@ +import argparse +from tornasole.trials import create_trial +from tornasole.rules.generic import WeightUpdateRatio +from tornasole.rules.rule_invoker import invoke_rule + +parser = argparse.ArgumentParser() +parser.add_argument('--trial-dir', type=str) +parser.add_argument('--start-step', type=int, default=0) +parser.add_argument('--end-step', type=int) +parser.add_argument('--large-threshold', type=float, default=10) +parser.add_argument('--small-threshold', type=float, default=0.00000001) +args = parser.parse_args() + +trial = create_trial(args.trial_dir, range_steps=(args.start_step, args.end_step)) +wur = WeightUpdateRatio(trial, + large_threshold=args.large_threshold, + small_threshold=args.small_threshold) +invoke_rule(wur, start_step=args.start_step, end_step=args.end_step) \ No newline at end of file diff --git a/examples/mxnet/notebook/mnist/SimpleInteractiveAnalysis.ipynb b/examples/mxnet/notebook/mnist/SimpleInteractiveAnalysis.ipynb new file mode 100644 index 0000000000..0519eaf599 --- /dev/null +++ b/examples/mxnet/notebook/mnist/SimpleInteractiveAnalysis.ipynb @@ -0,0 +1,964 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Simple Interactive Analysis in Tornasole\n", + "This notebook will demonstrate the simplest kind of interactive analysis that can be run in Tornasole. It will focus on the [vanishing/exploding gradient](https://medium.com/learn-love-ai/the-curious-case-of-the-vanishing-exploding-gradient-bf58ec6822eb) problems on a simple MNIST digit recognition." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Some basic setup that's always helpful" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The autoreload extension is already loaded. To reload it, use:\n", + " %reload_ext autoreload\n" + ] + } + ], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Make sure that MXNet is accessible! If you are on the EC2 Deep Learning AMI, you will probably want\n", + "to activate the right MXNet environment\n", + "```\n", + "sh> source activate mxnet_p36\n", + "```\n", + "You'll probably have to restart this notebook after doing this" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's import some basic libraries for ML" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import mxnet as mx\n", + "from mxnet import gluon, autograd\n", + "from mxnet.gluon import nn\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's copy the Tornasole libraries to this instance, this step has to be executed only once. \n", + "Please make sure that the AWS account you are using can access the `tornasole-binaries-use1` bucket.\n", + "\n", + "To do so you'll need the appropriate AWS credentials. There are several ways of doing this:\n", + "- inject temporary credentials \n", + "- if running on EC2, use [EC2 roles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html) that can access all S3 buckets\n", + "- (preferred) run this notebook on a [SageMaker notebook instance](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi.html)\n", + "\n", + "The code below downloads the necessary `.whl` files and installs them in the current environment. Only run the first time!\n" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "#WARNING - uncomment this code only if you haven't done this before\n", + "\n", + "#!aws s3 cp s3://tornasole-binaries-use1/tornasole_core/py3/tornasole_core-0.1-py3-none-any.whl .\n", + "#!aws s3 cp s3://tornasole-binaries-use1/tornasole_rules/py3/tornasole_rules-0.1-py3-none-any.whl .\n", + "#!aws s3 cp s3://tornasole-binaries-use1/tornasole_mxnet/py3/tornasole_mxnet-0.1-py3-none-any.whl .\n", + "#!pip install tornasole_core-0.1-py3-none-any.whl tornasole_rules-0.1-py3-none-any.whl tornasole_mxnet-0.1-py3-none-any.whl" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Model Training and Gradient Analysis\n", + "At this point we have all the ingredients installed on our machine. We can now start training.\n", + "\n", + "The goal of this notebook is to show how to detect the Vanishing Gradient problem. We will first do it manually and then automatic." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "from tornasole.mxnet import TornasoleHook, SaveConfig\n", + "from tornasole.trials import LocalTrial" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can change the logging level if appropriate " + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "#import logging\n", + "#logging.getLogger(\"tornasole\").setLevel(logging.WARNING)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can define a simple network - it doesn't really matter what it is.\n", + "Importantly - we **add the Tornasole Hook**. This hook will be run at every batch and will save selected tensors (in this case, all of them) to the desired directory (in this case, `'{base_loc}/{run_id}'`.\n", + "\n", + "`{base_loc}` can be either a path on a local file system (for instance, `./ts_output/`) or an S3 bucket/object (`s3://mybucket/myprefix/`).\n", + "\n", + "See the [documentation](https://github.com/awslabs/tornasole_mxnet/blob/master/README.md) for more details." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "def create_net( tornasole_save_interval, base_loc, run_id ):\n", + " net = nn.Sequential()\n", + " with net.name_scope():\n", + " net.add(nn.Dense(128, activation='relu'))\n", + " net.add(nn.Dense(64, activation='relu'))\n", + " net.add(nn.Dense(10))\n", + "\n", + " # Create and add the hook. Arguments:\n", + " # - save data in './{base_loc}/{run_id} - Note: s3 is also supported\n", + " # - save every 100 batches\n", + " # - save every tensor: inputs/outputs to each layer, as well as gradients\n", + " trial_dir = base_loc + run_id\n", + " hook = TornasoleHook(out_dir=trial_dir,\n", + " save_config=SaveConfig(save_interval=100), \n", + " save_all=True)\n", + " hook.register_hook(net)\n", + " return net" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And we create a simple training script. No Tornasole-specific code here, this is a slightly modified version of the [digit recognition](https://github.com/apache/incubator-mxnet/blob/master/example/gluon/mnist/mnist.py) example on the MXNet website." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "def transformer(data, label):\n", + " data = data.reshape((-1,)).astype(np.float32)/255\n", + " return data, label\n", + "\n", + "def test(ctx, val_data):\n", + " metric = mx.metric.Accuracy()\n", + " for data, label in val_data:\n", + " data = data.as_in_context(ctx)\n", + " label = label.as_in_context(ctx)\n", + " output = net(data)\n", + " metric.update([label], [output])\n", + "\n", + " return metric.get()\n", + "\n", + "\n", + "def train(net, epochs, ctx, learning_rate, momentum):\n", + " train_data = gluon.data.DataLoader(\n", + " gluon.data.vision.MNIST('./data', train=True, transform=transformer),\n", + " batch_size=100, shuffle=True, last_batch='discard')\n", + "\n", + " val_data = gluon.data.DataLoader(\n", + " gluon.data.vision.MNIST('./data', train=False, transform=transformer),\n", + " batch_size=100, shuffle=False)\n", + " \n", + " # Collect all parameters from net and its children, then initialize them.\n", + " net.initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx)\n", + " # Trainer is for updating parameters with gradient.\n", + " trainer = gluon.Trainer(net.collect_params(), 'sgd',\n", + " {'learning_rate': learning_rate, 'momentum': momentum})\n", + " metric = mx.metric.Accuracy()\n", + " loss = gluon.loss.SoftmaxCrossEntropyLoss()\n", + "\n", + " for epoch in range(epochs):\n", + " # reset data iterator and metric at begining of epoch.\n", + " metric.reset()\n", + " for i, (data, label) in enumerate(train_data):\n", + " # Copy data to ctx if necessary\n", + " data = data.as_in_context(ctx)\n", + " label = label.as_in_context(ctx)\n", + " # Start recording computation graph with record() section.\n", + " # Recorded graphs can then be differentiated with backward.\n", + " with autograd.record():\n", + " output = net(data)\n", + " L = loss(output, label)\n", + " L.backward()\n", + " # take a gradient step with batch_size equal to data.shape[0]\n", + " trainer.step(data.shape[0])\n", + " # update metric at last.\n", + " metric.update([label], [output])\n", + "\n", + " if i % 100 == 0 and i > 0:\n", + " name, acc = metric.get()\n", + " print('[Epoch %d Batch %d] Training: %s=%f'%(epoch, i, name, acc))\n", + "\n", + " name, acc = metric.get()\n", + " print('[Epoch %d] Training: %s=%f'%(epoch, name, acc))\n", + "\n", + " name, val_acc = test(ctx, val_data)\n", + " print('[Epoch %d] Validation: %s=%f'%(epoch, name, val_acc))\n", + "\n", + " net.save_parameters('mnist.params')\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Clear up from previous runs, we remove old data (warning - we assume that we have set `ts_output` as the directory into which we send data)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "!rm -rf ./ts_output/" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "At this point we are ready to train. We will train this simple model.\n", + "\n", + "For the purposes of this example, we will name this run as `'good'` because we know it will converge to a good solution. If you have a GPU on your machine, you can change `ctx=mx.gpu(0)`.\n", + "\n", + "Behind the scenes, the `TornasoleHook` is saving the data requested." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Downloading ./data/train-images-idx3-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/mnist/train-images-idx3-ubyte.gz...\n", + "Downloading ./data/train-labels-idx1-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/mnist/train-labels-idx1-ubyte.gz...\n", + "Downloading ./data/t10k-images-idx3-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/mnist/t10k-images-idx3-ubyte.gz...\n", + "Downloading ./data/t10k-labels-idx1-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/mnist/t10k-labels-idx1-ubyte.gz...\n", + "[Epoch 0 Batch 100] Training: accuracy=0.799406\n", + "[Epoch 0 Batch 200] Training: accuracy=0.859552\n", + "[Epoch 0 Batch 300] Training: accuracy=0.886047\n", + "[Epoch 0 Batch 400] Training: accuracy=0.899127\n", + "[Epoch 0 Batch 500] Training: accuracy=0.909840\n", + "[Epoch 0] Training: accuracy=0.917400\n", + "[Epoch 0] Validation: accuracy=0.956400\n", + "[Epoch 1 Batch 100] Training: accuracy=0.962178\n", + "[Epoch 1 Batch 200] Training: accuracy=0.963234\n", + "[Epoch 1 Batch 300] Training: accuracy=0.963555\n", + "[Epoch 1 Batch 400] Training: accuracy=0.964489\n", + "[Epoch 1 Batch 500] Training: accuracy=0.963653\n", + "[Epoch 1] Training: accuracy=0.964333\n", + "[Epoch 1] Validation: accuracy=0.963900\n", + "[Epoch 2 Batch 100] Training: accuracy=0.973267\n", + "[Epoch 2 Batch 200] Training: accuracy=0.972736\n", + "[Epoch 2 Batch 300] Training: accuracy=0.973123\n", + "[Epoch 2 Batch 400] Training: accuracy=0.973317\n", + "[Epoch 2 Batch 500] Training: accuracy=0.972894\n", + "[Epoch 2] Training: accuracy=0.973183\n", + "[Epoch 2] Validation: accuracy=0.971000\n", + "[Epoch 3 Batch 100] Training: accuracy=0.980495\n", + "[Epoch 3 Batch 200] Training: accuracy=0.980199\n", + "[Epoch 3 Batch 300] Training: accuracy=0.980465\n", + "[Epoch 3 Batch 400] Training: accuracy=0.979825\n", + "[Epoch 3 Batch 500] Training: accuracy=0.979641\n", + "[Epoch 3] Training: accuracy=0.979417\n", + "[Epoch 3] Validation: accuracy=0.970100\n" + ] + } + ], + "source": [ + "net = create_net( tornasole_save_interval=100, base_loc='./ts_output/', run_id='good')\n", + "train(net=net, epochs=4, ctx=mx.cpu(), learning_rate=0.1, momentum=0.9)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Data Analysis - Manual\n", + "Now that we have trained the system we can analyze the data. Notice that this notebook focuses on after-the-fact analysis. Tornasole also provides a collection of tools to do automatic analysis as the training run is progressing, which will be covered in a different notebook." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We import a basic analysis library, which defines a concept of `Trial`. A `Trial` is a single training run, which is depositing values in a local directory (`LocalTrial`) or S3 (`S3Trial`). In this case we are using a `LocalTrial` - if you wish, you can change the output from `./ts_output` to `s3://mybucket/myprefix` and use `S3Trial` instead of `LocalTrial`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And we read the data" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tornasole:Loading trial myrun at path ./ts_output/good/\n", + "INFO:tornasole:Loaded 4 collections\n", + "INFO:tornasole:Loading 28 new steps\n" + ] + } + ], + "source": [ + "good_trial = LocalTrial( 'myrun', './ts_output/good/')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can list all the tensors we know something about. Each one of these names is the name of a tensor - the name is a combination of the layer name (which, in these cases, is auto-assigned by MXNet) and whether it's an input/output/gradient." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['sequential1_dense0_relu_input_0',\n", + " 'sequential1_dense0_relu_output',\n", + " 'sequential1_dense0_input_0',\n", + " 'sequential1_dense0_output',\n", + " 'sequential1_dense1_relu_input_0',\n", + " 'sequential1_dense1_relu_output',\n", + " 'sequential1_dense1_input_0',\n", + " 'sequential1_dense1_output',\n", + " 'sequential1_dense2_input_0',\n", + " 'sequential1_dense2_output',\n", + " 'sequential1_input_0',\n", + " 'sequential1_output',\n", + " 'sequential1_dense0_weight',\n", + " 'gradient/sequential1_dense0_weight',\n", + " 'sequential1_dense0_bias',\n", + " 'gradient/sequential1_dense0_bias',\n", + " 'sequential1_dense1_weight',\n", + " 'gradient/sequential1_dense1_weight',\n", + " 'sequential1_dense1_bias',\n", + " 'gradient/sequential1_dense1_bias',\n", + " 'sequential1_dense2_weight',\n", + " 'gradient/sequential1_dense2_weight',\n", + " 'sequential1_dense2_bias',\n", + " 'gradient/sequential1_dense2_bias']" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "good_trial.tensors()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For each tensor we can ask for which steps we have data - in this case, every 100 steps" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "ename": "AttributeError", + "evalue": "'NoneType' object has no attribute 'steps'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mgood_trial\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'gradient/sequential0_dense0_weight'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msteps\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m: 'NoneType' object has no attribute 'steps'" + ] + } + ], + "source": [ + "good_trial.tensor('gradient/sequential0_dense0_weight').steps()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can obtain each tensor at each step as a `numpy` array" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "ename": "AttributeError", + "evalue": "'NoneType' object has no attribute 'step'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgood_trial\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'gradient/sequential0_dense0_weight'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m300\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m: 'NoneType' object has no attribute 'step'" + ] + } + ], + "source": [ + "type(good_trial.tensor('gradient/sequential0_dense0_weight').step(300).value)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Gradient Analysis" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also create a simple function that prints the `np.mean` of the `np.abs` of each gradient. We expect each gradient to get smaller over time, as the system converges to a good solution. Now, remember that this is an interactive analysis - we are showing these tensors to give an idea of the data. \n", + "\n", + "Later on in this notebook we will run an automated analysis." + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "# Define a function that, for the given tensor name, walks through all \n", + "# the batches for which we have data and computes mean(abs(tensor)).\n", + "# Returns the set of steps and the values\n", + "\n", + "def get_data(trial, tname):\n", + " tensor = trial.tensor(tname)\n", + " steps = tensor.steps()\n", + " vals = []\n", + " for s in steps:\n", + " val = tensor.step(s).value\n", + " val = np.mean(np.abs(val))\n", + " vals.append(val)\n", + " return steps, vals" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "def plot_gradients( lt ):\n", + " for tname in lt.tensors():\n", + " if not 'gradient' in tname: continue\n", + " steps, data = get_data(lt, tname)\n", + " plt.plot( steps, data, label=tname)\n", + " plt.legend()\n", + " plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can plot these gradiends. Notice how they are (mostly!) decreasing. We should investigate the spikes!" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAD8CAYAAACMwORRAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzsnXlclVX+x98Pi4DsqxsqKoGAILIoiJDmnuZomY1Zk5nZYqNT2S9npjJLW61xzMoszRqt0Vyy3DIVFRUUEFBAFFBiERd2kB2e3x/IHZDtAvdy4XLerxcvvc89zznf+9x7P/c833PO50iyLCMQCAQC7UJH0wEIBAKBQPUIcRcIBAItRIi7QCAQaCFC3AUCgUALEeIuEAgEWogQd4FAINBChLgLBAKBFiLEXSAQCLQQIe4CgUCghehpqmEbGxvZwcFBU80LBAJBlyQyMjJLlmXblsppTNwdHByIiIjQVPMCgUDQJZEk6Q9lyom0jEAgEGghQtwFAoFACxHiLhAIBFqIxnLugq5HRUUF6enplJaWajoUgUDrMTQ0xN7eHn19/TadL8RdoDTp6emYmpri4OCAJEmaDkcg0FpkWSY7O5v09HQGDRrUpjpEWkagNKWlpVhbWwthFwjUjCRJWFtbt+suWYi7oFUIYRcIOob2fte6nLhnpaYQ8uN3lBQVajoUgUAg6LR0OXHPvZnJuZ9/ouDWTU2HIhAIBJ2WLifuppbWABTmZGs4EoE24ODgQFZWFgCjR49ucz1btmzh+vXr9Y7997//ZfXq1e2KT12sXbuW4uJixeMHH3yQvLy8Zs+pe60WLFiAnZ0dw4YNa1W7KSkprT6nrciyzJIlS3B0dMTDw4Pz58+rpR1lrt3YsWMbXZEfHR3NgQMH1BJXlxN3E6sacS8S4i5ogsrKyjadd+bMmTa32Zi4Hzx4kClTprS5TnVyr7gfOHAACwsLpc+fP38+hw4dUkdoKuPgwYMkJiaSmJjIxo0beeGFF9TSTmuvXV3UKe5dbipkTwsLJB0dIe4aZuWvccRfL1Bpna59zVjxkFuL5d599122bt2Kra0t/fv3x9vbm3379uHp6cmpU6eYO3cuTk5OrFq1ivLycqytrdm2bRu9evUiOzubuXPnkpGRgb+/P7IsK+o1MTGhqKgIgI8//pgdO3ZQVlbGrFmzWLlyJSkpKUydOpUxY8Zw5swZ+vXrx969e9m/fz8RERHMmzcPIyMjQkNDMTQ0JDo6Gi8vL06cOMHSpUuBmkGykydPYmpq2mgbAKtXr+a7777Dzs5O8fqWLVvG2LFjWbNmDT4+PmRlZeHj40NKSgpVVVUsX76c48ePU1ZWxuLFi3nuuec4fvw4b7/9NjY2NsTGxuLt7c3WrVv57LPPuH79OuPGjcPGxobg4GCF15ONjQ0zZ84kLS2N0tJSli5dyqJFixq8B0FBQaSkpCj1vkZGRrJgwQIAJk2apDje2rglSWL58uX88ssv6OnpMWnSJNasWcPt27d5/vnnSU1NBWp+uAICAti7dy9/+ctfkCQJPz8/8vLyyMzMpE+fPg1iXLx4MZMnT2bGjBnMmjULS0tLNm/ezObNm0lOTmb16tVs3bqVdevWUV5ezqhRo/jiiy/Q1dWtd+0a+2wuW7YMgJ9++okXX3yRvLw8Nm3axKhRo3jrrbcoKSnh1KlT/P3vf+exxx5T6poqQ5fruevo6GJsYSnEvZsSHh7Orl27iImJ4eDBg/VudcvLy4mIiODVV19lzJgxhIWFERUVxZ///Gc++ugjAFauXMmYMWOIi4tj1qxZCkGoy+HDh0lMTOTcuXNER0cTGRnJyZMnAUhMTGTx4sXExcVhYWHBrl27mD17Nj4+Pmzbto3o6GiMjIyIiopi+PDhSJLEmjVr+Pzzz4mOjiYkJAQjI6Mm24iMjOS///2vokcXHh7e4jXZtGkT5ubmhIeHEx4eztdff821a9cAiIqKYu3atcTHx3P16lVOnz7NkiVL6Nu3L8HBwQQHBzeob/PmzURGRhIREcG6devIzm7fd+3pp5/ms88+IyYmpl1xZ2dns2fPHuLi4rhw4QJvvPEGAEuXLuXll19WfDYWLlwIQEZGBv3791e0Z29vT0ZGRqMxBgYGEhISojgvPj4egJCQEIKCgrh06RLbt2/n9OnTREdHo6ury7Zt2+rV0dxnE2ruKM+dO8fatWtZuXIlPXr04J133uGxxx4jOjpapcIOXbDnDjWpmaJcIe6aRJketjo4ffo0f/rTnzA0NMTQ0JCHHnpI8VzdL0d6ejqPPfYYmZmZlJeXKxaCnDx5kt27dwMwbdo0LC0tG7Rx+PBhDh8+zIgRIwAoKioiMTGRAQMGMGjQIDw9PQHw9vZusvd66NAhpk6dCkBAQACvvPIK8+bN4+GHH8be3r7JNgoLC5k1axY9e/YEYMaMGS1ek8OHD3PhwgV27twJQH5+PomJifTo0YORI0dib28PgKenJykpKYwZM6bZ+tatW8eePXsASEtLIzExEWtr6xbjaIy8vDzy8vIICgoC4Mknn+TgwYNtitvPzw9DQ0OeeeYZpk+fzvTp0wE4cuSIQowBCgoKFHdgyhIYGKj4MXF1dSU3N5fMzExCQ0NZt24d3333HZGRkfj6+gJQUlKCnZ1dvTqa+2wCPPzww0DznxtV0jXF3dKa3MzGf4EF3RdjY2PF///617/yyiuvMGPGDMVtvrLIsszf//53nnvuuXrHU1JSMDAwUDzW1dWlpKSk0ToOHz7Mrl27AFi+fDnTpk3jwIEDBAQE8NtvvzXZxtq1a5uMS09Pj+rqaoB6i1tkWeazzz5j8uTJ9cofP368QbwtjUccP36cI0eOEBoaSs+ePRk7dqza7CZaG7eenh7nzp3j6NGj7Ny5k/Xr13Ps2DGqq6sJCwvD0NCwXj39+vUjLS1N8Tg9PZ1+/fo1Gku/fv3Iy8vj0KFDBAUFkZOTw44dOzAxMcHU1BRZlnnqqad4//332/x6a1+TMu+DKuhyaRm423MXaZluSUBAAL/++iulpaUUFRWxb9++Rsvl5+crvsjfffed4nhQUBA//PADUDPglpub2+DcyZMns3nzZkXvLyMjg1u3bjUbl6mpKYWFhYq2KysrFb3d5ORk3N3def311/H19SUhIaHJNoKCgvj5558pKSmhsLCQX3/9VdGGg4MDkZGRAIrebm28X375JRUVFQBcuXKFO3fuKB1vXfLz87G0tKRnz54kJCQQFhbWbD0tYWFhgYWFBadOnQKol8pobdxFRUXk5+fz4IMP8q9//UuR5pk0aRKfffaZolx0dDRQc9fz/fffI8syYWFhmJubN5pvr8XPz4+1a9cSFBREYGAga9asITAwEIDx48ezc+dOxecgJyeHP/6ob6uu7GezLk29D6qgy4p7WfEdyksb7zUJtBdfX19mzJiBh4cHU6dOxd3dHXNz8wbl3n77bR599FG8vb2xsbFRHF+xYgUnT57Ezc2N3bt3M2DAgAbnTpo0iccffxx/f3/c3d2ZPXt2i1/A+fPn8/zzz+Pp6ckvv/zChAkTFM+tXbuWYcOG4eHhgb6+PlOnTm2yDS8vLx577DGGDx/O1KlTFWkAgGXLlvHll18yYsQIxZREgIULF+Lq6oqXlxfDhg3jueeea7FnuGjRIqZMmcK4cePqHZ8yZQqVlZW4uLiwfPly/Pz8Gj1/7ty5+Pv7c/nyZezt7dm0aVOTbX377bcsXrwYT0/PegPYrY27sLCQ6dOn4+HhwZgxY/j000+BmjRSREQEHh4euLq6smHDBqBmiuLgwYNxdHTk2Wef5Ysvvmj2mgQGBlJZWYmjoyNeXl7k5OQoxN3V1ZVVq1YxadIkPDw8mDhxIpmZmfXOV/azWZdx48YRHx+Pp6cn27dvb7Zsq5FlWSN/3t7ecluJO3FUXjNnmpydkdbmOgStJz4+XtMhyLIsy4WFhbIsy/KdO3dkb29vOTIyUsMR1eeZZ56RQ0NDVVLXihUr5I8//lgldQnUj6o/m41954AIWQmN7Zo59zpz3a362ms4GkFHs2jRIuLj4yktLeWpp57Cy8tL0yHV45tvvtF0CAIN0Zk+m11e3AXdj9qceXegNQPBmmbx4sWcPn263rGlS5fy9NNPayiihly8eJEnn3yy3jEDAwPOnj2rkvo702ezS4u7sCAQCDoPn3/+uaZDaBF3d3fFgKu20yUHVHsYGtHDqKfouQsEAkETdElxBzEdUiAQCJqja4u7WKUqEAgEjdJlxd1U9NwFAoGgSbqsuJtY2XAnL5fq6ipNhyLowgg/9xq6u5/78ePHFV4196LMtemMdGFxt0aurqa4C150gXoRfu4tI/zclac9fu2apEtOhYT6c91r/y/oQA4uhxsXVVtnb3eY+kGLxYSfu/BzV7WfO9S4SU6bNo2kpCTGjRvHF198gY6OTovXpqqqimeeeYaIiAgkSWLBggW8/PLLSl0bddJlxd20dq57bja9NRyLoOOo65ldUVGBl5cX3t7ewP/83AFyc3MJCwtDkiS++eYbPvroIz755BOFn/tbb73F/v37G/VEqeu1LssyM2bM4OTJkwwYMIDExER+/PFHvv76a+bMmcOuXbt44oknWL9+vUJ4Ac6fP9/Azz0gIICioiIMDQ2bbMPY2Fjh515ZWVnv9TVFXV/0srIyAgICFCIaFRVFXFwcffv2JSAgQOHn/umnnxIcHFzPd6eWzZs3Y2VlRUlJCb6+vjzyyCNttvyFGj/39evXExQUxGuvvdbmuF1cXNizZw8JCQlIkqRIldT6uY8ZM4bU1FQmT57MpUuXmvRzb0rcz507R3x8PAMHDmTKlCns3r2b2bNnt3htUlJSyMjIIDY2FqDTpHC6rLiLVaoaRoketjoQfu4NEX7u7fdzBxg5ciSDBw8GaozRTp061UDcG7s2zs7OXL16lb/+9a9Mmzat3t2JJmkx5y5JUn9JkoIlSYqXJClOkqSljZSRJElaJ0lSkiRJFyRJUruhQk8zc3R0dYW4CxTc6+f+0ksvcfHiRb766qtWeZLLd73Wo6OjiY6OJikpiWeeeQZAaX/0w4cPK77ky5cv55tvvqGkpISAgAASEhKabaMpWvJzr63r2rVrirbb4+ceExPDiBEj1O7nrmzctX7us2fPZt++fYrxjFo/99p6MjIyMDExaZWfO9SkzJp73NS1sbS0JCYmhrFjx7JhwwbFTlCaRpkB1UrgVVmWXQE/YLEkSa73lJkK3Hf3bxHwpUqjbARJRwdjCysh7t0M4ecu/NzV5ed+7tw5rl27RnV1Ndu3b29wh9PUtcnKyqK6uppHHnmEVatWNTsrpyNpMS0jy3ImkHn3/4WSJF0C+gHxdYr9Cfj+rh1lmCRJFpIk9bl7rtowsRLi3t2o65ndq1evFv3cLS0teeCBBxR7c65YsYK5c+fi5ubG6NGjm/Rzv3TpEv7+/kDNQOvWrVvR1dVtMq5aP3cjIyNeffXVBn7uwcHB6Ojo4ObmxtSpUzEwMGi0jbp+7nZ2dg383OfMmcPGjRuZNm2a4vjChQtJSUnBy8sLWZaxtbXl559/bvY61vq51+6lWsuUKVPYsGEDLi4uODs7N+vnfvz4cbKysrC3t2flypVN3nl8++23LFiwAEmS6qUsWht3YWEhf/rTnygtLUWW5Xp+7osXL8bDw4PKykqCgoLYsGEDDz74IAcOHMDR0ZGePXvy7bffNntNfH19eemllxQDqrNmzar3fFPXJiMjg6efflpxV9We3ZpUijK+wLV/gAOQCpjdc3wfMKbO46OAT3N1tcfPvZa9n6yWN//tuXbXI1AO4eeuHMLPXaAqOsTPXZIkE2AX8DdZlgva8kMiSdIiatI2jfaYWouJlTV/XIhqdz2CrkVn8sxuDOHnLugMKCXukiTpUyPs22RZ3t1IkQygf53H9neP1UOW5Y3ARgAfHx/53udbi4mlNeUlJZSXFNPDqGd7qxN0ETqTZ7a6EX7uqkXdfu6diRbFXaoZMt4EXJJl+dMmiv0CvCRJ0n+BUUC+rOZ8O9SZ656TjXU/Ie4CgSYRfu6dC2V67gHAk8BFSZJqr8o/gAEAsixvAA4ADwJJQDHQIT/Vdee6W/fr30JpgUAg6D4oM1vmFCC1UEYGFqsqKGURC5kEAoGgcbqscRgIcRcIBIKm6NLirm9giIGxsdi0Q9BmhOVvDdpo+ZuQkIC/vz8GBgasWbNGbe0sXLiwnv1BY8yfP7/ewrNaUlJS1DZBoEuLO9TMmBE9d0FdhOVvy3QHy18rKyvWrVvHsmXL1NrON998g6vrvYv2lUOIezOIvVS7H++++y7Ozs6MGTOGuXPnsmbNGsaOHcvf/vY3fHx8+Pe//82vv/7KqFGjGDFiBBMmTODmzZsAZGdnM2nSJNzc3Fi4cGEDy99aPv74Y3x9ffHw8GDFihVAzRfRxcWFZ599Fjc3NyZNmkRJSQk7d+5UWP56enpSUlKCLMv1LH89PT3x9PRkxIgRimX/jbUBNZa/Tk5O9V4fwNixYxWul1lZWTg4OAA11rmvvfaaoq6vvvoKqPFCGTt2LLNnz2bo0KHMmzcPWZZZt26dwvJ33LhxQP1e+cyZM/H29sbNzY2NGzc2+h4EBQVhZWWl1PsVGRnJ8OHDGT58eL0ZNa2NG2p8elxdXfHw8FCI9u3bt3nkkUfw9fXF19dXMR2zdoWvvr5+izF+/PHHrFu3DoCXX36ZBx54AIBjx44xb948oMYvyN/fHy8vLx599FGFdUTd92XTpk04OTkxcuRInn32WV566SVFGydPnmT06NEMHjxY0Ytfvnw5ISEheHp68q9//Uup66ksXdYVshYTK2uy0/7QdBjdjg/PfUhCToJK6xxqNZTXR77ebBlh+dsQYfnbuOVvawgMDOSTTz5hyZIlREREUFZWRkVFBSEhIQQFBZGVlcWqVas4cuQIxsbGfPjhh3z66ae89dZbijquX7/Ou+++y/nz5zE1NeWBBx5g+PDhiuczMzM5deoUCQkJzJgxg9mzZ/PBBx+wZs2aJj2S2kOXF3dTK2vu5OVRXVWFTjPeHwLtQFj+NkRY/jZu+Vv3TqwlvL29iYyMpKCgAAMDA7y8vIiIiCAkJIR169YRFhZGfHw8AQEBQE1HotYXqJZz585x//33K+5oHn30Ua5cuaJ4fubMmejo6ODq6qq4k1QnXV7cTayskeVq7uTnYmrVsBciUA8t9bA1wb2Wv6+88gozZsxQ7OyjLPJdO97nnnuu3vGUlJQGVrQlJSWN1nH48GF27doF1Nx6T5s2jQMHDhAQEMBvv/3WZBtr165tMq6WLH8nT55cr/zx48fbZfnbs2dPxo4dq3bLX2XjrrX8PXr0KDt37mT9+vUcO3ZMYflraGjY5lj09fUZNGgQW7ZsYfTo0Xh4eBAcHExSUhIuLi4kJyczceJEfvzxxza3Ufc11U0HqgutyLmDmA7ZXRCWv8LyV1nL39YSGBjImjVrCAoKIjAwkA0bNjBixAjFNn2nT58mKSkJgDt37tTrlUONq+SJEyfIzc2lsrJS8ePeHE29D6qg6/fcLYW4dyeE5a+w/FXW8vfGjRv4+PhQUFCAjo4Oa9euJT4+HjMzs0brDwwMZPXq1fj7+2NsbIyhoSGBgYEA2NrasmXLFubOnUtZWRkAq1atwsnJSXF+v379+Mc//sHIkSOxsrJi6NChjX426+Lh4YGuri7Dhw9n/vz5qt17VRnrSHX8qcLyV5ZluSg3R14zZ5oceeAXldQnaBph+ascwvK3+1L72ayoqJCnT58u7969u131dYjlb2elZrs9PbGQqRshLH8FnZW3336bI0eOUFpayqRJk5g5c6bGYuny4i7p6GBsaSnSMt0IYfnbOekKlr/Z2dmMHz++wfGjR4+2a7pnLepcCdtaury4g1jIJBB0BrqC5a+1tXW3sfzt8rNlAEyFBYFAIBDUQyvEvbbnLnfA3FGBQCDoCmiHuFvbUFFWSnlJccuFBQKBoBugHeIuFjIJBAJBPbRC3E0t/7eXqkDQGoSfew3d3c+9ubiU8WvvjGiFuIueu6Auws+9ZYSfu/K0x69dk2iFuBvfdWET4t49EH7uws9d1X7uUNMpmDdvHi4uLsyePVvx41f3ur/wwgv4+Pjg5uZW7z1rLC5NoxXz3PV7GGBoYirEvQO58d57lF1SrZ+7gctQev/jH82WEX7uDRF+7u33cwe4fPkymzZtIiAggAULFvDFF180EOrVq1djZWVFVVUV48eP58KFC/Tr16/RuDSNVog73J0OKSwItB7h594Q4efefj93gP79+yv82p944olGUzo7duxg48aNVFZWkpmZSXx8PK6uro3GpWm0S9xFz73DaKmHrQmEn7vwc28PkiQ1+/jatWusWbOG8PBwLC0tmT9/PqWlpU3GpWm0IucOYqPs7oLwcxd+7uryc09NTSU0NBSo8S+69w6noKAAY2NjzM3NuXnzpuIOpKm4NI1W9dzv5OdRVVmJrp7WvCzBPQg/d+Hnri4/d2dnZz7//HMWLFiAq6srL7zwQr3nhw8fzogRIxg6dGi9FE5TcWkcZXyB1fGnKj/3WmJ+PyivmTNNzr99S6X1Cv6H8HNXDuHnLlAV3drPvZa6c93NbGw1HI1AnQg/d4GgZbRP3MWMGa1H+Ll3ToSfe+dC+8RdDKoKBBpB+Ll3LrRmtoyRqRm6enpC3AUCgQAtEndJkjAW0yEFAoEA0CJxB7GQSSAQCGrRPnEXA6oCgUCgXeJuamVFodhuT9AKhJ97Ddro575t2zY8PDxwd3dn9OjRals5qozf+/z58+utKq4lJSVFbbO/WhR3SZI2S5J0S5Kk2CaeHytJUr4kSdF3/95SfZjKYWJpTWVZGWXFzS+9Fmg3ws+9ZbqDn/ugQYM4ceIEFy9e5M0332TRokVqaac9fu/qFHdlpkJuAdYD3zdTJkSWZY1bodWdDmlo3DpHOEHrCNlxhay0IpXWadPfhMA5Ti2We/fdd9m6dSu2trb0798fb29v9u3bh6enJ6dOnWLu3Lk4OTmxatUqysvLsba2Ztu2bfTq1Yvs7Gzmzp1LRkYG/v7+Dfzca71ePv74Y3bs2EFZWRmzZs1i5cqVpKSkMHXqVMaMGcOZM2fo168fe/fuZf/+/Qo/dyMjI0JDQzE0NKzn57506VKgZuD/5MmTmJqaNtoG1NjKfvfdd9jZ2Sle37Jlyxg7dqzCVjgrKwsfHx9SUlKoqqpi+fLlHD9+nLKyMhYvXsxzzz2nMEyzsbEhNjYWb29vtm7dymeffabwc7exsSE4OBgHBwciIiKwsbFh5syZpKWlUVpaytKlSxsVxaCgoCYdMe8lMjKSBQsWANSzH2ht3JIksXz5cn755Rf09PSYNGkSa9as4fbt2zz//POkpqYCNT9cAQEB9e7E/Pz8SE9PbzLGjz/+GAMDA5YsWcLLL79MTEwMx44d49ixY2zatIlt27Zx+PBhVqxYQVlZGUOGDOHbb7/FxMSk3vuyadMmPvzwQywsLBg+fDgGBgasX78eqHEk/fTTT7lx4wYfffQRs2fPZvny5Vy6dAlPT0+eeuopXn75ZaWuqTK02HOXZfkkkKOyFtWImOuu/dT1cz948KDCvx3+5+f+6quvMmbMGMLCwoiKiuLPf/4zH330EYDCzz0uLo5Zs2YpBKEudb3Wo6OjiYyM5OTJkwAkJiayePFi4uLisLCwYNeuXcyePRsfHx+2bdtGdHQ0RkZGREVFNfBzj46OJiQkBCMjoybbiIyMVPi5HzhwgPDw8BavSV1f9PDwcL7++muFl05UVJTCU+Xq1asKP/daT5m6vjK1bN68mcjISCIiIli3bh3Z2e37Pj399NN89tlnDdIirY07OzubPXv2EBcXx4ULF3jjjTeA//m51342Fi5c2Og1qrVgbozAwEBCQkIAiIiIoKioiIqKCkJCQggKCiIrK4tVq1Zx5MgRzp8/j4+PTwMPmevXr/Puu+8SFhbG6dOnSUiov99BZmYmp06dYt++fSxfvhyADz74gMDAQKKjo1Uq7KC6RUz+kiTFANeBZbIsx6mo3lZhYlWz8YAQd/WjTA9bHQg/94YIP/fm/dyDg4PZtGmTwpmyMby9vYmMjKSgoAADAwO8vLyIiIggJCSEdevWERYWRnx8vMIsrLy8XGH6Vsu5c+e4//77FTtUPfroo1y5ckXx/MyZM9HR0cHV1VWxM5g6UYW4nwcGyrJcJEnSg8DPwH2NFZQkaRGwCGjUja+9mFiK7fa6M8LPXfi538uFCxdYuHAhBw8ebPYHSl9fn0GDBrFlyxZGjx6Nh4cHwcHBJCUl4eLiQnJyMhMnTuTHH39s8+ut+5o6YtJHu2fLyLJcIMty0d3/HwD0JUlquHdXzfMbZVn2kWXZx9ZW9eZeej16YGhqJqZDajHCz134uSvr556amsrDDz/Mf/7zH5ycWr7TDAwMZM2aNQQFBREYGMiGDRsYMWIEkiTh5+fH6dOnSUpKAuDOnTv1euVQY0d94sQJcnNzqaysVPy4N0dT74MqaHfPXZKk3sBNWZZlSZJGUvODoTF1NbW0ojA7S1PNC9SM8HMXfu7K+rm/8847ZGdn8+KLLwI1dz51x2juJTAwkNWrV+Pv74+xsTGGhoYEBgYCYGtry5YtW5g7dy5lZWUArFq1qt6PRr9+/fjHP/7ByJEjsbKyYujQoY1+Nuvi4eGBrq4uw4cPZ/78+arNu7fkCQz8CGQCFUA68AzwPPD83edfAuKAGCAMGK2M17Cq/dxr2fXeW/L3/7dELXV3d4Sfu3IIP/fuS+1ns6KiQp4+fbq8e/fudtWnVj93WZbntvD8emqmSnYKTKysuXktWdNhCNSI8HMXdFbefvttjhw5QmlpKZMmTWLmzJkai0VrLH9rMbGypjg/j6rKCnT19DUdjkANCD/3zonwc4c1a9a0uw5VoYXiXjOWeyc3FzNbOw1HIxB0H4Sfe+dCq7xlAEzvLmQqFNMhBQJBN0brxF2sUhUIBAIh7gKBQKCVaJ24G5qYoquvLxYyCQSCbo3WibskSWJHJoHSCD/3Grq7n3tzcSnj194Z0TpxhxpfdyHu3Rfh594yws9dedpyJfXDAAAgAElEQVTj165JtG4qJNxdyJScqOkwtJrgLRu59cdVldZpN3Aw4+a3/AUUfu7Cz13Vfu5Q0ymYN28e58+fx83Nje+//15hnlZ73V944QXCw8MpKSlh9uzZivessbg0jdaKe3J4GLIsI0mSpsMRqJC6fu4VFRV4eXnh7e0N/M/PHSA3N5ewsDAkSeKbb77ho48+4pNPPlH4ub/11lvs37+fTZs2NWijrte6LMvMmDGDkydPMmDAABITE/nxxx/5+uuvmTNnDrt27eKJJ55g/fr1CgEAOH/+fAM/94CAAIqKijA0NGyyDWNjY4Wfe2VlZb3X1xR1fdHLysoICAhQiGhUVBRxcXH07duXgIAAhZ/7p59+SnBwMDY2DT3+Nm/ejJWVFSUlJfj6+vLII4+0a4HP008/zfr16wkKCuK1115rc9wuLi7s2bOHhIQEJElSpJFq/dzHjBlDamoqkydP5tKlSw2uUXN+7gCXL19m06ZNBAQEsGDBAr744guWLVtWr8zq1auxsrKiqqqK8ePHc+HCBfr169doXJpGK8Xd1MqayopySu8UYWRiqulwtBJletjqQPi5N0T4ubffzx2gf//+Cr/2J554gnXr1jUQ9x07drBx40YqKyvJzMwkPj4eV1fXRuPSNFop7nWnQwpx7z4IP3fh534vyvq5Aw3u8u99fO3aNdasWUN4eDiWlpbMnz+f0tLSJuPSNFo7oApirrs2IvzchZ+7uvzcU1NTCQ0NBWr8i+69wykoKMDY2Bhzc3Nu3rypuANpKi5No/U9d4F2IfzchZ+7uvzcnZ2d+fzzz1mwYAGurq688MIL9Z4fPnw4I0aMYOjQofVSOE3FpWmkurMFOhIfHx+5uQvdHqoqK1g7bxajH52H/+xmHYsFreDSpUu4uLhoOgxFPrW4uJigoCA2btzYqWx/Fy5cyMKFC5sUxtbw9ttvY2Ji0iD3K+geNPadkyQpUpZln5bO1cqeu66ePkZm5qLnrqUIP3eBoGW0UtyhJjUjLAi0E+Hn3jkRfu6dC60Vd1Mra2H7KxB0IMLPvXOhlbNlQFgQqAtNjdEIBN2N9n7XtFfcrawpKcin8u40K0H7MTQ0JDs7Wwi8QKBmZFkmOzu70bn7yqK1aZna6ZB3cnMwt+ul4Wi0A3t7e9LT07l9+7amQxEItB5DQ0PFKt22oPXiXpSTLcRdRejr6yuW8QsEgs6NVqdlADFjRiAQdEu0X9zFoKpAIOiGaK24GxqboKffQ0yHFAgE3RKtFXex3Z5AIOjOaK24A0LcBQJBt0X7xV0MqAoEgm6I9ot7jlh0IxAIuh/aLe6W1lRVVFBSWKDpUAQCgaBD0WpxN7UW0yEFAkH3RKvFXSxkEggE3ZXuIe7ZQtwFAkH3QuvF3djCkuTIs5oORSAQCDoUrRZ3HR1d3MZO4FpUJIXZWZoORyAQCDqMFsVdkqTNkiTdkiQptonnJUmS1kmSlCRJ0gVJkjrVhpbuD0xGlquJDf5d06EIBAJBh6FMz30LMKWZ56cC9939WwR82f6wVIdFr94McPfkYvBhqqurNB2OQM2UFBaIdQ0CAUqIuyzLJ4GcZor8CfheriEMsJAkqY+qAlQFHuMnU5h1mz8udI29EwuyblNZXq7pMLocRbk5bHzxac79/JOmQxEINI4qcu79gLQ6j9PvHmuAJEmLJEmKkCQpoiN383H09cPIzJyLR3/rsDbbSnlJMd8te5HTO7ZqOpQuR/zJY1SWl3H2558oLsjXdDgCgUbp0AFVWZY3yrLsI8uyj62tbYe1q6unj9v940mOPMudvNwOa7ctJEecpbykhCthp0V6oRXIskxs8O9Y9ulXI/B7dmg6JIFAo6hC3DOA/nUe29891qlwf2AS1VVVxB4/oulQmuVy2CkACm7fJCvtDw1H03W4fvkSuZkZjPzTbIaNnUDM4f0U3L6l6bAEAo2hCnH/BfjL3VkzfkC+LMuZKqhXpVj1tcfedRixxw4jV1drOpxGKb1TREp0JM6jg4CaXrxAOWKP/46+gSFO/mPwn/04SBJnftqm6bAEAo2hzFTIH4FQwFmSpHRJkp6RJOl5SZKev1vkAHAVSAK+Bl5UW7TtxOOByeTdzCQ17oKmQ2mU5IizVFVW4jV1Bn0cnUmOCNN0SF2C8tISLp8JwXl0ID0MjTC1tmHElIeIO3mMrNQUTYcnEGgEZWbLzJVluY8sy/qyLNvLsrxJluUNsixvuPu8LMvyYlmWh8iy7C7LcoT6w24b940KwNDYpNMOrF4+cxIzWzv63OfMEJ9R3EhOFKZnSnAl9BQVZaUMGztRcWzkzEcxMOrJqe3/0WBkAoHm0OoVqvei16MHrkEPkBQe2ulmU5QUFvDHxWic/MYgSRJDfEYBcPV8uIYj6/zEHv8dy7729HV2URwzMjHFd8YjJEecJePyJQ1GJxBohm4l7gDu4ydTVVlJ/Imjmg6lHonnQqmuqmLo3Xy7tf0AzHv1JkmkZpol53oGGQnxDBs7AUmS6j3nNXUGxhaWhPywRcw8EnQ7up242/QfSB+noVw4drhTfeEvh4Zg0asPdoOGADUbfA/xHkVqbAzlpSUajq5lMhLiyb91o8PbjTv+O5KODq5BDzR4Tt/QEL9H5pKREMe16E6bLRQI1EK3E3cAj/FTyL2eTkZCnKZDAaA4P4+02As4jw6s1/t09BlFVUUFf1yI0mB0LVOUm8POVW9wZFPHOk9UV1URd/IYg0b4YGJp1WgZ9wcmYdGrD6d++K7TzpISCNRBtxR3Z78x9DDqyYVOMrB65ewZZLkaZ//Aesf7OrtiaGzS6adERvy6i8qKclIvxlBWXNxh7abEnOdObg7Dxk1ssoyunh4Bjz3B7dQUEk6f6LDYBAJN0y3FXd/QEJcxY0kMO01pUZGmw+FKaAhWfe2xGeBQ77iunh6DRviQfD6805qe3cnLJeb3Q9gMcKC6qrJD0x+xwb/T09yCwSN8my3n7B+IrcNgTu/YSlVlRQdFJxBolm4p7gAeE6ZQWVFOfEiwRuMoys0h7VIsTv6BDQYEAYb4+FFaWMD1KwkaiK5lIvbtoaqigul/ex0jM3OSwjtmALi4IJ/kyLO4BI5DV0+v2bKSjg6Bc58i/9ZNLhw51CHxCQSaptuKu53DYHoNvo+LRw9pdGD1SthpkGWGjg5s9HmH4V7o6Op1ytRMcX4e0Yf3MzQgCOt+/RniPYprUREd0ju+FBJMdVUVw8ZOUKq8w3Av+ru6E7Z7e5cYoBZoLz+teoOY3w+ovZ1uK+5QYwWclfYHmYmXNRbD5dAQbPoPxNp+QKPPG/TsSX83904p7hH79lBZXs6ohx8Datw3y0uKSYu7qNZ2ZVnm4rHD9HF0xqb/QKXOkSSJMXOfojg/j/P796o1PoGgKQpu3yL1YjRVlZVqb6tbi/vQgCD0DQy5eEwzA6sFWbe5fjm+wUDqvQzxGUVuZgY519M7KLKWKS7IJ/q3/QwdXdNrBxjgPhw9AwO1p2ZuJieSnZ7a7EBqY/R1Goqjrx/hv+7qdIvYBN2DtPiajo+9yzC1t9Wtxb2HUU+GBgSRcOZkh87yqOXKXQdI5yZSMrUM8a5ZrdpR+WxliNz/MxXlZfjd7bUD6PcwYNBwb5IjwtQ67TD2+O/o9TBo8bo1xpg//4WK0jKxoYdAI6RfisXQ2ATbeyZPqINuLe5Qs2K1sqyMhNPHO7zty6Eh2DkMwbJPo3ubKDCzscXOYQjJkec6KLLmKSksIOrQPpz8xjRIJzn6+lGUm8ONq4lqabuirJRLp07gNGo0Bj2NW32+tf0AXO9/gOjf9lGQJSyBBR1Lenws/VyGIemoX3q7vbj3HuKE7cBBHT7nPf/WDW4kXVG69znEZyTXr1zqFOmE8wf2UlFagn+dXnstg7x8kXR01HaXkXgulPKS4lanZOoy+tFaS+AfVBiZQNA8hdlZ5N3MpL+r+lMyIMQdSZJwHz+ZW9eSuXk1qcPavRx6NyXjP0ap8kN8/ECWuarh3ntpURHnD/7KfaNGN5iXDzWGXf1dh6ltADg2+HfMe/VuV87SzMYOz0nTiD9xjOz0VBVGJxA0TXptvt3VvUPa6/biDuAyZix6PQy4cLTj5kBfDg2ht6MT5na9lSpv5zAYE2sbkiM1O2sm8sBeykuK8X9kbpNlhvj4k52eSs511W7IlXfzBmlxFxg2dmK7b2tHznwUfUNDfnzzNbavXM6xb7/iwtHfyEy8TEVpqYoiFgj+R1r8RQx6GmM70KFD2mt+9Uc3wdDYBGf/MSScPsH9Tz5DD0MjtbaXe+M6t64lc/+Tzyh9Tq2RWNyJI1SUl6Hfw0CNETZO6Z0iog7+gqOvP7YDBzVZztFnFMFbviI5IgyrGY+orP24E0dAknC7f3y76+ppZs6s19/i0qnj3E5NIfb4ESpq579LEha9emM7YBA2AwbW/DvQAQu73h2SKxVoJ+mXYunn4oaOjm6HtNflxF2WZUqiounpNUKl9bo/MJm4E0c5v38v940KwMjMDEMTE7W8EZfPhADg5KdcSqYWR59RxBzeT+rFGIZ4j1R5XC0RdfBXyorv4D+76V47gJmtHXYOQ0gKD8NXReJeXV1F3PGjOAz3wtTaRiV12rsMU6R35Opq8m/fIis1hdup18j6I4XbqSkkhofC3UVutgMcmPf+2hZXxAoE91KUk01u5nU8xk/psDa73Kc0b+dObrz5FgO2bMHYb5TK6u3r7ILNAAdO79jK6R1baw5KEkYmphiZmmFkZo6RqRk9zcwxMjPDyNQc24EODBg2vNVtXQ4Noa+zK2Y2tq06z97VnR5GRiRHnu1wcS8rvkPkgZ8Z4jMKO4fBLZZ39PXjzM4fuJOXi7GFZbvbT70YQ2H27Vbd7bQGSUcHi169sejVG0dfP8XxirJSstPTSI4II2z3dlJjYxjk6a2WGDSJLMsknQvFYYS3Ru4KtZ20S7EA9Hfz6LA2u5y4mz/0ENkbv+bGO+8w+Oc9SD16qKReSZJ49M3V3E65RnFhPiUFBZTU/luQT3FhPrmZGVy/comSwgLFPG7vaTMJeuJppXv42elpZKWmMG7+olbHqKevj8Nwb65GnkOuru7QFEHUoX2U3bnTbK69Lo6+fpz5aRvJkWdV0luJDf4dQxNTxQ5VHYW+gSG9h9yHzQAHzh/8lctnQrRS3DMuxfHLp+8xes48pd9jgfKkx1+kh1FPbB2aTmeqmi4n7jqGhvR+8w3SFj1H9uZvsXn+OZXV3dPMnIEeni2Wk6urKS2+Q+jOH4jc/zM5GWlMW/p/Ss27vhx6EiQJp1EBbYpxiM8oroSd4kZyIn3uc25THa2lvKSYyP0/M9jLl16DHZU6x2aAA+Z2vUiOaL+4lxQVkhQeisfEqejp67errraip6+Po68fSeGhVFUuRldPM3Goi6S7A/UXjhxi1Mw56Oh2TF64u5AWH0u/oa4dlm+HLjpbxiQoCNPJk8n68kvK09I6vH1JRwcjE1MemP8cE599iT8uRvPDG8vIvXG92fNkWebymRDsXdwwsbJuU9uDRvgg6eh06KyZqEP7KC0qbFWPrmYfWD/+uBhNeUn7Vv8mnDpOVWVlvQ2wNYHz6EDKiu+QEtO5N09pC1cjz2FoakZRTrbY2lHF3MnLJfd6Ov07aApkLV1S3AF6/ePvSLq63Hj3XY26OnpMmMLsf75LcX4eP/zzVVJjY5osm5WaQs71dJz9g9rcnpGJKfZD3TrMSKy8tISI/T8zyNOb3o5OrTrX0dePqooKUmLOtyuG2OAj2A0aolSuX50MdPfE0NiEy6EhKquzqrKS/yxfStShX1VWZ2vJuZ5ObmYG/o/MxdTGlpjD+zUWizai8JPpoMVLtXRZcdfv1QvbpUu4czKEwsO/azSW/m4ezHvvXxhbWLJz9ZtEH27czvNy6CkkSQenUaPb1d4Qn1Fkpf1B3k3171ka/dt+SgsL8GtDHrafsyuGpmbtWq16I+kKt1KS27UiVVXo6unjOHI0yRFhVJaXq6TOq5HnuHUtmbDd21VWZ2up7Sg4+o5i+ISppMZeIDu94++ItZX0+Fj0DY3oNUi5lKaq6LLiDmA5bx4GLi7cfO89qoruaDQWi169mfvuGgZ5enN00xcc2fRlPVtPWZa5HHqS/sM86Glu0a62ao3E1N17rygtJWLfHgZ6jKCv09BWn6+jq8sQr5FcPR/eJovTqsoKfv/6c4zMzHEZM7bV56sD59GBlJeUqGzHqZgjB9HrYUBxfh6XNOBvBJAceRZbh8GY2djhPn4yunp6RIveu8pIv3Q3397B4xhdWtwlPT36vL2Cylu3yPrsM02Hg0HPnvzptTfwnfEIMYf3s/v9tygpKgTg1rVk8m5ktmjvqwwWvftgbT9A7Xn3mN8PUFKQj//sx9tch6OvH2XFd0iPj231uWG7t3MrJZmJi17C0NikzTGokgFuHhiZmSvWKrSH/Fs3+ONCFL4zHsZ24CAi9/3c4SnG4oJ8rl9OUHQYepqZ4+QfSPzJo2JTExVQnJ9Hdnpqh1j83kuXFncAo+HDsZgzh5ytWym9dEnT4aCjo0vQvKeZ8uLLZCTE88M/XyE7PY3LoSHo6OpyXztTMrUM8RlF+qVYxY+HqinMySL8190McPekn7NLm+sZ6OGJXg+DVg/SZSZd5uyeHbjdP577fP3b3L6q0dHVxWnUaJLPn2u3TcGFo78hSTq4PzAZn4ceJjs9lZToSBVFqhzXoiKQ5Woc60wx9Zz0IOUlJVzS8BaU2kB67fz2Dh5MBS0QdwC7V15G19ycG2+vVKuPeGtwu388c1a8T3lJCT+88Sqxx48w0N0TIxNTldTv6OOHXF1NSpTqN6S+Fh3Jf/5vCRWlpQT++S/tqkvfwJCBHiNIighTuldaUVbKwc//hYmldZvWA6gbZ/9AKsvKuBoV3uY6qioriQ3+nUFePpha2+DsH4iJlTUR+/aoMNKWSY48i4mlFXaDhiiO9blvKHYOQ4j+bb9GJytoA2nxF9E3MFR6CrEq0Qpx1zU3p9fr/0dJTAx5P+3UdDgK+jq5MO+9TzHv1ZuSgnycR7d9lsy99B5yHz3NLUhSoUtkdVUVIT9sYff7KzC2tGLe+/9q9QyZxnD09aMoO4tb15KVKh/y43fkXk9n8gtL2+TZrm76ubhhbGHZrtTM1chzFOfnKdYA6OrpMWLKQ6TGxnAr5aqqQm2WyooKUmKiGOw9st7m7JIk4Tl5Gllpf5CRENchsWgr6fGx9HV20YhlhVaIO4DZjBn0HDmSW598QmV2tqbDUWBmY8fclR8xbclrKh0UlHR0GOI9kpToCCor2r8hdWF2Fjve+Tvn9u7EffxkHl/9iWL7vPYy2MsXSdIhKTy0xbKpsTFEHfyVEVMeYqB7ywvKNIGOji5OfmO4FhXR5jn8MUcOYmJtU2+1q8eEKegbGnVY7z097gIVpSWNrvodGhCEgbEx0b+JgdW2UlyQT1baHxpJyYAWibskSfRe8RbVJSXc+uhjTYdTD31DQ4YG3K/y0fIhPn6Ul5QofKLbytWocL5/fQm3Uq7x4JLXmLToryr1F+lpZk4/F9cWp0SWFd/h0BdrsezTj8DHn1JZ++rA2T+QyoryNs1Yqh1IdR83qd5nwtDYBPcHJnH5zEkKs7NUGW6jJEWeQ8/AgAFuDf2R9A0MGTZ2AonnzlCUm6P2WLSRjEs1dz2aGEwFLRJ3AIMhQ7BesID8vXu5c7ZzbEmnTga4D0evhwFxJ462qQdZVVnJyW3fsueDlZhaWfPE+2txCbhfDZGCo49/zdz8G5lNlgne8jVFOdlMXfwK+gaGaolDVfR1GoqJtQ0JbVjQ9L+B1EkNnvOaOgO5Wlb7oiZZlkmOPIuDhxd6TfgzDZ/4INVVVRrbQL6rk3bpIno9DOjteJ9G2tcqcQewef459O3tubFyJbKGFoV0FPo9DHD2DyTh9Ak+f2YuO1bWpFVupVxtcSCsIOsWO1b+nfBfdjF84lTmrlqDVd/m93JtD46+dzf5bmLWTFJ4GHEnjjBy5qMd5pnTHiQdHZz9xpASfZ7SO0VKn3fvQOq9mNv14j6/AC4cOdRu24bmuJVylaLsrGbdRS379GOgxwguHDlEdVWV2mLRVtLjLtLXaajGfIi0RtzLSyvJSi8i5XIROY/+g1gdL35+4xA7P4wg5aL6b3E1xaTn/sqcFe/jPX0WpXeKCPlhC/95fQlfvfAUh75cy+XQEEqL6otPcuQ5/vP6UrLSUpi29P+YsHCx2m1eze16YzvAodHUTHFBPr9/vR5bh8H4z/6zWuNQJc6jA6muqmzVCtx7B1Ibw2f6TMqK73DxmPpWXl+NPAeSxGAv32bLeU6eLvxm2kBJUSG3NZhvhy7oClmQVUJqfA4FWSUUZJVSmF3zb+md+oOKuvaBGGXeAsmCw5vieHS5D5a9O9/Mi/aio6tLf1d3+ru6E/T4fIpyskm5EEVKdCTJ4WHEHT+CJOnQ5z5nHDy9avZAPbAXW4fBPPS317Hso77e+r0M8fXn7O7tFOfnKVbpyrLM7xvXU3aniEffWNWl3BZ7D3HC3K4Xl0NDGDZ2glLnNDaQei99HJ3pN9SN8wf3MmLKdLWsbEyOPEvf+4a2uFp6sJePwm+mrU6mnQFZlokN/h1Taxschnupvb2MS3Egyx3uJ1MXpcRdkqQpwL8BXeAbWZY/uOf5+cDHQO2mmetlWf5GhXEquJ1ayIkfLqOjK2FqZYiZjSGDvWwxszbEzMYIM2sjzGwM0S3K5tq06WAUyCmzRzj4VSyzX/emh2GX+z1rFSZW1gwbO4FhYydQXVXFjeQrXIs+T0pMJGd++gFkmeGTpjH2yWeazLWqC0dfP8J2/Ujy+XO4j6vJN18KCSYpPJSgeU83uuF2Z0aSJJz8A4nct4eSwgKMTM2aLV87kOo/+/EWBdtn+iz2rlnFlbOnGarCKbRQs0Dt5tUkAh+f32JZHR1dhk+Yyqn/fk92ehrW9qqZQdWRlN4p4tAXa0mOCMOiVx8W/Htjvamf6iAt/iJ6+j3o7ai5FGOLSidJki7wOTARSAfCJUn6RZbl+HuKbpdl+SU1xFiP/q5W/OW90RhbGKCj08wbZNoHm0fHcuu7A4x5ZgBHr3pyfGsCE59xU/sb21nQ0dWlr5MLfZ1cCJgzj+KCms1HNPUFtXMYjKmNLUnhYbiPm0RB1m2OffsV/Ya64j19pkZiai/O/oGE791J4rkzLfrWNzeQei9DvEdi2acvkfv24OwfqNLP7NXIcEUbyuA+fjKhO38g+vB+xi94XmVxdAS3Uq7y66fvU5B1C4fhXqTEnCcnIw1r+wFqbTc9PpY+TkM1tv8AKJdzHwkkybJ8VZblcuC/wJ/UG1bT9DDUw9TKsHlhryiFfS9jVfINBpYVSD98je+E3iRG3OLi8fSOC7aT0dPMXKM9L0mScPT1I/VCNOWlJfy24d9UV1Ux5YWXO3QTA1Vi5zAYyz59W1zQ1NJA6r1IOjp4T5vJjeRElS8kSo48i0WvPlgpuY6hq/rNxAb/zo9vLKOyopw5Kz5g0vNLANrlUqoMpUVF3PrjqsamQNaijLj3A+r6f6bfPXYvj0iSdEGSpJ2SJGlOQbKS4JsJELEZKXApfd9YRlWpTN/f3sHBw4bTPyWRmZyvsfC6O44+flRWlLNv7YekXozm/iefwaJ3H02H1WYkScLZP5C0uIvcycttspwyA6n34hr0AIamZkTs+1kVoQI1/vypsTEM8RnZqruBruQ3U1Fexm8b1vHbhn/T19mFJz/4N/2cXTC1sqHX4PvUbriXcbkm397fTXODqaC62TK/Ag6yLHsAvwPfNVZIkqRFkiRFSJIUcfv2bRU1XYeLO2Hj/VCQAY//BBPfwXDqIqx9DCk4GYO/Yw4m1ob8tvEixQXaPU2ys9JvqBuGxiZci4rAwdMbjwkdtxu8unD2D0SWq0k8e6bJMsoMpN6LvoEhnpMeJDnyLDnXM1o+QQn+uBBFVUWFwgVSWbqK30zejUx+fPM1YoMP4/fwYzzyz3fqDRo7+owiM+lKsz/E7SUtPhZdfX36aDDfDsqJewZQtyduz/8GTgGQZTlbluWyuw+/ARr9BMuyvFGWZR9Zln1sbW3bEm/jVJTAL0tg1zPQaxg8fwqc7uY1dXSwWfoaPcwqyF75dyY9OYTS4koOfxNLdVXnMBnrTujq6eE4cjSGJqZMeu6vWjH+YTPAAWv7AU3u0NTUilRl8Jw0DV09Pc4fUE3vPTnyHAbGxvR1dm3VeV3BbyYpPIytf/8bhbdvMev1FQQ89mSDdN8Qn1FwdwGXukiPv0if+5w7fMLCvSgj7uHAfZIkDZIkqQfwZ+CXugUkSap7Xz0D6Djv3dtX4OvxcP47GPMKzN8P5vWzRjoj5tD3AUMqs/Oo3vYFYx93JuNKHmF7O8agqbUUR0VRla+9qaMHnl7E059+ialVy7nnroKzfyDpCXEU5TT0NWrNQOq9GFtY4ho4jrjjRykuaN9norq6iqvnwxnk6dMmI6vW+s1UVlRw6dRxtr+9nO9fX0LiuTNq6fVXV1Vx8oct7F2zCovefXjig383OX/fZoADZra91LbRTVnxHW5du4q9i2ZTMqCEuMuyXAm8BPxGjWjvkGU5TpKkdyRJmnG32BJJkuIkSYoBlgDz1RVwPWK2w8axUHQDntgFE1aAbiMfWl19jB5eipVzEXnbt9NfJw23wL5EHU7lapQa0kNtpDInh/SXX+aPuY+TumiR1q6w1TcwbPduVO1BLi8nb+dOkh+cRtrzL6jkOjuPDgRZ5srZ0/WOt3YgtTG8p82isqKcmCa2b1SWzMQrlBTkN2oUpgzK+s3kZmZwYutmNr7wFAc+W0NRTjZV5eX88sl7bH/7dTITL7f1JTTgTl4uO1e9QfjenXhMmMKfV36EuV2vJstLkoSjzyhSL8a024+/MTIS4qkJEZQAACAASURBVJHlavprcH57LUrl3GVZPiDLspMsy0NkWV5999hbsiz/cvf/f5dl2U2W5eGyLI+TZTlBnUFTXgx7F8OeRdDXsyYN49jCIhLPediO6oG+ZQ8y33yTgBn9sRtoytHv4sm7qb5l3sogyzIFBw5wddp0Co8cxfxPMyiNucDNTmaA1tWpLi4me8sWkiZOIvONN5GBwuPHub58ebv3AbDqa4/twEENZs20ZSD1Xqzt+zNohA/Rh/e3a5/V5Miz6Ojqtirvfy9N+c1UVVZy5expflr1Bpv/9hyR+3/G3nUYs/+5igVrv+KpNZ8z8dmXyM28zg9vvMq+tR+Sf6vtewBnpaZw/D+b2LJsMZlJV5jy4stMfPYlpVIhQ3xGUVlRTsrFqDa33xRp8RfR1dPrFBYaXW9Fz60E+Gk+3E6AoNfg/uWN99bvpUdPdMa8QJ+0D0kNLifn8/VMeW4pO1aHc/Cri8x+3Qd9g46fjld5+zY33nmHwt+PYOjuzoDVqzB0ckLXwpKc776jp9cIzB58sMPj0iaq8vLI2baN3P9spSovj56+vvT8v3c5GdkDXbcsXA68ga7le/R645/tGgNw9g/k1H+/pyDrNmY2NWNKbRlIbQyf6Q/z07v/ID4kGI/xk9tUx9XIc9i7DGuXR35dv5lRM+dQlJPNhaO/ERt8mDt5uZha2xIw5wmGjZuIiZW14jyJGkvjoQFBhP+6h4h9u0kKD8Vz8nRGPfyYUpvYlBQWkHD6BHEnjnLzalLND9UIXwIeewLbViyA6zfUDQNjY5LDz6p8l6/0+Iv0dnTqFMZ3XU/ci25ASS48uRuGPNC6c30XYnxqLRa+vcj5/nvMpk5h4jOu/PpZDMe3JTDhadcOG+CTZZmCX37hxnvvI5eUYLfsVazmz0fS06MotwyT5/5KyYULZL7xJgZDh2IweHCHxKVNVNy6Rc6W78j773+pLi7GZOxYrBct4qauPQe+jUfSLaOipCcXJ7zDsO0r0LW2wvbFF9vcXq24XwkNweehh1u1IrUl+ru5Y+cwhMj9P+M+biKSTusmuuXeuE52eqpKZid5Tp7O3o/f5Yc3lnHzWhIAg0f44DFhKoNGeDe7ZqGHUU8C5sxj+IQpnN6xjcgDe4k7foRRDz+G5+TpDRb9VFVWkhITSdzxoyRHnqO6qhI7hyGMm7+IoQH309PMvNXx6+rpMcjTh6vnz1FdXaWyNRblJcXcvJbMqJmPqqS+9tL1xH3wWFgaDfpGrT/XyAJ8F2BXvJ6iP9y4/s9/Mmj3bkZOH8S5X6/Re7A57mPtVR1xAypu3uTGircpOn4cI09P+ry3WiHesSfSCdmeSHW1jJXTYkzKg8n5v7V4frUKI+vml7cLaihPSyP7m03k796NXFWF2YMPYv3ssxjcdx/h+68Rvv8itgNMmfq8O1lphRzaGMvF+9+CL95Fz8oKyz+3zbzMoncfeg125PJdcW/PQOq9SJKEz/SZHFj/CdeiI1s0/LqXq3d37FJ2VWpzDPbywbKvPYXZtxk1cw4e4ydjZmvXqjpMrKyZ/PwSvKY+xMlt33LiP5uI/m0fgY/Px8lvDFmpKcSdOMqlU8cVXkQjpkzH7f7x2A4c1O7X4OjrR8LpE1y/koD9ULd21weQcfkScnV1pxhMha4o7tA2Ya/F70V0wzbQ56EBpG06T9aXX+Lz1yXcTCng1E+J2PQ3pc+Q1vcGlEGWZfJ37+bmBx8iV1Rgt/x1rJ58EklXl6rKakK2XyEu5DoD3a3pM8Sc9IRcMnqPJrUKzv8zHDsHM+ydrbAfaknvIebo91BfGik1Ppuwn69iamVIf1cr+rtYYm7bU23ttRe5upris2fJ+2knBYcOIenqYv7ww1g/s4AeAwZQVlL5/+2dd3gcxfnHP3tVV1ROvcuSLbk3yb0DwRhDsHEBG/hBIIAJgRAILQlJSCFAAgRCKKaXYIduG9vEBoMbNi6SbbnLRZbV20l3ul52fn/sWbZcJdu4KPd5nn1mdm+1t7N7+s7MO++8w+JXijmwtZEew5IZe0N3NDo1kbERXHlXX/47eytbRv0W8cSTqGMsRE04PdNH9+GjWfnB21irKs94IPVo8oaPZuXcd1n5wdvEZ2R1SFD3bVxHfGYXohOTz/g+VCo1N//tRVQq1Rn3SBKyspn6mz9xYEsRK//9Fguffxpj9GxctmZUag1dC4bQe9xldOlfcFaXquvSvwCVWsO+jevOmriX79gaCvnR46xc70yRzteEhEGDBomNG8/+4s7t4otfwuYPqGqaiW3pt2R/8jFkdOXjJzfgaPLS/7IMBk3scsIgY+7Nm2l47XVchYVoLBbU8XFo4hPQxMeHtjjU8fGhY3FoYmMJNDRQ/fs/4Fy9GuOgQaQ88Rd0WVnK9Vp8fDl7K9V7beRfkcXQSTmt4RWCfpndz79H6bJtOPv9iEaHHlkWqDQSydnRpPewkDMwgbhU81l5NHJQZv0XpRQuKSMqLgI5IONoVgbxomJ1pHWLJD03irQcE/oINQihbIA6Pv6c+637Dhyged48bAsWEKiqRhUZScz06cTecgvaJEX8rFVOFr9aTEuDh1HX5dJnbNox93lwRyOLXynG6GlkQOFzdH3pWUzDO26PtdfX8fo9t5GS14Pqkl1Mfvj3Z6W1fIjSzYUsfP4pVGoNE+99sF22fI/Dwct33MCQSdMYdYYLnv+QyHKQHSu/Zc/6NXTpN5AeI8eeMhjbmfDJE7/DXl/Hbc/PPivXm/PYr5AkFTP//MM6QkiSVCiEGHTK8/4nxb1xH/xrEMH+s9j3zHdoEhPI/vBD3G7B2nn72LWmGlO0jhFTu5E7OAlJkhBC4Fq7lobZr+Fatw51dDSR4y9HdjoJ1DcQaFA2uaXl+N+pViPp9ST+6gEsM2e22kwbKlpY/PJWXC0+Lr25B3mDj21ZCVmm/K67cK39ntR3/k2TPo2K3U1U7m6ivrwFCegzNp2h12SjN546UFHQ4SRQV0egvv5wWl9PS42dDY7eWNVJpNavI3fnXFSyH7chEaulB9bYnjTF5BLUGEDIRLWUEWvdSWzTLqLspUSOGUXGv/6FdBaDJdWU2ljz6V7S8ix0K0gkNtWE7HBg//JLbPPm4y4qApUK08iRxFw7GfOll6KKODyYtW9THcve2YlGp2LCnX1JzT2xC2b5LiuLX9qC3tVA/raXyH3jXxj6dtylbc5jv6J6z27McfHc8eKbp27dykHogN23qbqSL557kvryMoZNuZ7h02ae1G68c/VyFr/4DDf85dkLwovjQmHzkkUse+sVfvLcK2e8XrDP4+Zft17P4GumMnrmD7tEZFjcT8XHt8Ker7D3fZHKh35Dwv33Ez/rTgBq9ttY+Z8S6g+2kNotmoIuTQQ/fA3P1q1oEhOJve1WLNOnozId63Uge70EGw6LfaC+gUBjA8LtJmbGDHTph236ewvrWPbuDvRGLRN/1pfErBO3UgJNTZROnYqERPZnn6KOUUTK3eJjw6IDbFtRQUSkjpFTupI3NPmYlmmgsZGGl17CNn8BstN5zPUbkwewI/dGZJWGAapCusTUo4nSI2l1oNGBRo+k1iGr9TR6IqlpMVPdHIG1RYtAQqMK0qVkHn0HRZHy5F/PSgve4/Dz4RPr8boDBLxBhACz5CC+fA2J1RuwJBuwXDuZqB9f09pKP4QsC9Yv2E/hf8tI7BLFlbP6YLac2oOhsqSJhf/ags5RT8GeN+j+3qvosztm4y1cNJ/l773O8Gk3MGL6DSc+0dkI378M61+D3PEw+WXQtG/RFL/Xw7K3XmX78q/J7NOfq37x0AnnDix8/mnKd2zlrlff6/BAbGfG3lDP6z+/ldE3/IQhk6ad0bUObCni07/+nqm/+dMPHi8+LO6noroYZo+GS39HxSflOJYvJ3ve560Dm0Gfn02vfUXRFoFfpSfTXsSQq7NJmD4JlU6ntP4ProWskRDbsX9+IQvWLypl46IDJGVHceVdfTFFn/qf2l1czIEbb8I8YgTpr7zc5h+1/mALK+buprbUTmpuDGNm5BGXZkb2eLC+9z6Ns2cje9xED++BPkGPJiKIRu9DpfFQaB3KFusY4rQHmBD9d2I0Ve0riMaAVx1Hhb8/O+yjOejoTt9ts+kxfQSJ993XoWdyNEIWLHqlmPIdVi5N243/6y+oIZW6lCE0ReYAEjFJRroVJLa26A9VKB6nn6/e2s7B7VZ6jUxhzIzuqLXtF7Xqvc188c9NaFoaGFz5AT3en4026cQTY47G43Cwau47jLz+/47vzdFSC2v+CRvfAr8bW9JETDVfockeCjM+gIj2j/ls/XYp37z5KhFmM1f/8lHSerQNKxAM+Hn59hvpPnwU42f9ot3X/V/h/UfvQ6PVnbEpZfV/3mP9/E+45+0P0UWcwZhgOwiLe3v491So2kzg/5azb/J09NnZZL71Jrb582l84038lZVIeX0oH34Hew5qiDBrGTa5Kz2NK5AWPwD+0OSn+Dyl5ZV3BWQOB/WJzRI+T4Bl7+xk/+Z6egxPZtwNPQ4Lj8cGVZugZhuk9Icuo+CoFrB1zhxq//TnNj2NQwhZsHNNNWs+34vPHaBHlp+Upf+AyjLM/dJJzNqO3uQEJDBYsGu6srTqFmqdafTJ3MfI/Eo0kRYwxoEhVvEukgPKpDF/aGuTdypxffwuAg0H+XzLJKz+LAo2PE3Xh+/CMuP60341RUvLWPvZPno1f0ty8WeYR48m+tprMV96CR4P7N9cz97CWqpKmhGCVqFP6hLFqo9KcDR5GTMjj96jT2+lqZr9NhY8X4TG3sCQ5nn0fOel1t7SaWOrgO9egMJ3QfZD3+kcTJjFwjnNJEW1cK3xDlQJuXDTJxCV2u7LHopZbquvZcyNt1Jw1eTWiq5s62Y++ctjTHrod3Q7ycxU38GDVD3yKDHTphIzdeqZlfMiYs3Hc1j76Vx+Nvv9M5o1Pfd3DyFkmRueePYs3t3xCYt7ezjwHbwzESY+g606kapHHkVlNCK7XBj69ydu1izM48YiqVTUH2xh5dyd1JQ6SNSWMCZvPUmTfwYVG2DPEuVash/0UdD1Esi9AnIvB/Nhc4G9wc3iV4qxVjkZMTmL/j0bkKo2QVURVBZC496295c+GEb/CvImtIq8EIKqBx/C/uWXZL71FqZhx/7DNq5cx3dvbqDc0At9wM4w44f0tixG6jsVxv0aYnPYX2zlm/d2IsuCS27qQe6g9rdMj4scxPHBvXy09jJUfkH+90+R/cLfibz0kg5fqnqfjc+fKSTRvoM+u94h418vnnBw02X3sX9THXuL6lqF3hil4co7+5LczXJGRaorszP/2Y2o7FaG+76ix5vPozKcRqvMWgqr/wGb5wAC+s+EUfdTtTfAF2+WovK78Olj6Cq2cVnC39DGxSjhNBLabx/3upz89+Xn2bthLd0GD2fC3b9EbzTxzTuz2fr1Eu5+c84JJ9bITicHZszEu2cPANFTppD8u8dOr6wXGXUH9vP+I79g/F2/aF0drKP4PR7+ddsMCq6ezJh2rG6Fx640moyxp/V9YXFvD0LAm+PBUYO4p4jq3/2BQEMDcbffjnHI4LZ249odiI9+QklFCmu8P8Pl1ZHdLx6zJQJdhBqdLoiuZR866xZ0dRvQeSvRSW50yTno8kbQoB/Mks/9iGCQ8ZlzyfQsVF4wQGQKpOZD2kAlTewFuxbCd/8E20FI7A2jH4Bek0GtQXY6Kb3ueoLNzWR/9lmrvdm7v5S6Z57B8c03aOItSP0TKZSuoiHQlfRsNWNuGURUvIG1n+1jyzflJGRGcsUdvc+ei2PAR/XrDzJvy1XEucvou+VVurz7Nob+/dt9CY/Dz9w/rEZYGxi6fzZdX32RiJ49T/2HdbtwLX2eyq1lpOm2Y9S6lOcalRJKUw+nh/KRKaA9uR2+/mAL8/6+HqmlmRGqleT++WG0ae3sDdSXwOrnoPgjUGkg//9g5H2I6AyqP/iMxd9IgMRVV0ewdaODPdY4eu/9gL6pq4jt4UW66SPIbH8cGCEEhYvmsfKDt4lOTOLH9/+a+c88QXxmFtc+/PsT/k3lAw9gX7KEiDEJGLxBmtc2ou/enfR/vtDq0dVZEULw+j23kdilK5Mfeuy0rlG8bAlfvfbiye3tQiCXrMS98FXcG9diGH0VprtfOq3vC4t7e9n9JcydAVNeh37XHfu5EFD0Hnz5sNIqn/IavtTRbFhUyr5N9fjcAXzuAO15jBZ1BROTXyQmKw3S8kOCnn/iLnjQD9s+hVXPQcNusGTDqF9C/5l4yyoonX4dET17kvaP52h8dTZNH36ISq8jboiJ2PhiVIk5yJf+kW31A1m3oJSAL0hUvIHmWhf9LklnxJRuHbJFtwu/h23/+DMr9l9GTsMKulZ+RZe5c9B16XLKPxWyYP4fl1FVLTOsdi59XvoLuvRTCGnDXljxNGz9GHQmGHIHRKWBvUrZWqrAXq3k/ccOJJPSH4b+DPpMOeFgZkNFC/OeXodwOui/8w2ypowl/s47UUcfxzYuBJSvg3WvwvZ5oImAQbfBiHshKoVAUxPlv/szK5oH4jUlMPnnPUnql0UwKDP/6e+pLXNQUPh34nWVJBXYMP38Nehx1Smf3ZFU7NrOouefxmW3IwcDXH7nvScMWdD45pvU/f0ZWoYPZ4P+JlIi9jHW9Vca1xsRKj0pTz9N1OWXd+j7LzaWvfUq2779irvf+KDDYQMc1kbe+dXdJHTJ5rrf/bV1HEwIge/AAdzrVuNePh/3tp14G4MglAZj3A2TSPz9Uye79AkJi3t7kWV4ZYRi9rjrOzjSm8DbAgvvV4Qje6xSAUQea74QQhDwyfg8gZDYB5X8oX2bHWwV9BidhT612zF29Hbd4+5FsOpZxSYfmQIj7sVWm0LVo78FtRoQxAxOISG5CI0lCsY+AoN+qni6oJgv1ny2l4M7rIyb2Z2cgWcxnv7R+Jx8++fZ7KgfQL/975NCFV3mzkETF3fSP1v91Dy2HIiit+s7Rr1wDxrLScwq1lJY+XfYMlcR0CF3wIj7wHSC7xACvHZF6A8Jvq0Ctn8O9TvBlAiDb1eE2Hzss7FWOfnihULcNi89t79Nsm8/8bNmYbnxBlR6PbibofhD2Pi2cj19lHK94T8HkzKJyfn991Q88hs2plyH3ZLL1fcOIKPX4ft12X18/OQGhNfLkOJnkCpKicpyk/jgA2gv79hgqMvWzKJ//p2qPbu4/Z9vYIo59lk616zh4O13QK8urEz4OdEJJpqsMtEmD1dq/0jzN/V4rDpip00g8Q9/O6surhcSZcWb+eSJU49LHI0QggXPPsGBzUXc+PhTRNTW4968GffmLXg2FxJsUcbkVFoZQ6oBQ/4gdJdMp97UnZiseOLTT29uSljcO8KWD5UIkzM/hO6h2BvVxUqAsqZSGPcbxSxyvtf5FAL2f6u05A+sAoOF+rpheKubSEjfhj7SB0PuhDEPguH4wiiEOCcTjYKOJub9YQENzgTyt7xAfHo0We++g8p4rAlICMHOJ15jeXkOKVI11zw3DfVxzgOg+aAi6pvnKKaOwbfDyPvajG10iEPP9PtXYM9SUOuh33SlNZ/c1sfdZfex+JViakvt9AhuIWXVa+iS4kkYl0CUZg1S0A2pA5UKos9UpSeBEmK47oUXaHzrbXYX/Iwqc29+9JOedB927PKCdWV2Pvt7EcnZZoZLK2h6800kKUj8pMHEPv52hwRWyDJel4sI87Ei4quo4MDUaajMOop63ESLJpsZfxxDc62Lxa8Uo41Qc9WIHQTffoamnWoMGQbS/vYU2oFnHkrhQuOQR1HesFFccVf7K9Hda1ez8PmnGDb6MuJefxfh9YIE+hgwWJwYUrQYRk1AO34Wdd4sStbVsGdjHR6nn75j0xgz8/TmHITFvSME/fDPkHnktv/Cxjfhv79RBjymvqF4rVxolK9XRL7kS2W/9xS47Pcddsv8IXFW1/DRE9+jCngYuOYZYoYXkPHSS0hHTCMXfj8HfvMnljUMQGOMYMZTlxEReZyusa1S6bkUvaf0fApuhVH3Kzb1s0XDHkXkt8xVvIGyx8Cwu5XB8VCPLuAL8s3bW9mzyUo33Ua6bnwDv1WNPi2SpF/ei+nH/9fmkt79+6l68CE8O3ZQedUj7HZmMvSaHAZN7HLC29i5pppv3tvJgB9lMDhfTc39N+Pc3Yg+2UTy0//COHTYGRVTdrs5cMON+MvLqB8xii2+SVw5qw85A5UKsqHCwcIXN+P3yUy8PRfzf5+g+u2vUWlk0qZ1xXTrXyDtzKJcXmgsfOFvlG8vZtar77YrkJi7xc47v7obc4yFIeuK0QRsJPerJsLiRd1jLOTfjC3+R5QUWtm9rgZbnRu1VkV2/3i6D00mo1csavXpmUTD4t5R1r8Oix9U/NbLvlPiw187u7VLfcFSv1sZmE06O/ExzjY120r5/KU9JAZ202P1q8ROm0Lyn/6EJEkEHU4q7ruPtfb+NMX3ZOqjQ9pO5BICarbCpn9D4TsgZGVQcvSvIPoHDPDmsiqVyPrXlPV4Y3Ng6F1Kq3zTvxHFn7Ch6Wo2OGeQmuRmRA8XLa+/ir+qCtPIkSQ+9CD67t1p/uhjap98ElVEBC0//QvfF0r0Gp3KuBu6n7L3tPI/JWxdXsHlP+1FbkEijufvpubfywi4NERfPZHEX//mlGau4yGEoOrhR7AvXIjm8jyW+u6h1/BELrmlX5vz7I1uFr64BVuDm8tv7U1GRDUVP5+Fr7aZhL4txE0aiTTkTsga3tpDuZAJNDbSsnQpui5djut5tfO7FSz+59+Z+ee/k5p36gH8L196jl3frWB8cjaqL5fQZXwzhmvuxtN9Jnv3GyhZV0P1PhtIkJYXQ/ehyeQMTERvOPP4OGFx7yg+FzzfVwknfNnvYcQv2trfw5w2O77axref1pHnXkr6uvnE/+JeLNOnUz7rLnY7M9iXfQ1jZ+bRZ2w6BLyKyWn3l7D7v2CvAEkNA25Q4vdbzqH3RtAPOxcorfmKDcoxjUExuQy6jZLKNL55fxcmi56Jd/SAZfNpePVVZLsdfV4e3t27MY0Ygf/W37BkThmZfeKYeFdfVO1osQWDMvP/sYn6shamPlJAfHok8nezaXj2LzTuMqM2R5L4yCNET5nSITOb9d13qX3yKaIu7cpX3ILWksB1vx993LUMPA4/i17eQk2pndHX5dF7sIWa3z6KfcnXmNODJPZrRBspoeoyRBmTyhmrtOhPMs/jXBJ0OHEs+xrbwkU416yBYBBJryfrgw8w9GnbGPI4Hbxyx40UXH3tKd0ZSzdt5LOnHmfgwCGkvDOX2F4tuK77M7uaB3FgWwNyQGBJMdF9aBJ5Q5KJjD27sd07rbi3+FqoaKkgz5KH+mzbwGu2KWny+V8iq7Ox8u0NbF3XwiDrm0QVF6GOi8OqSqSozz107RfN+GElSCWLYd834HOA1qjE6+9+pWIWOc4g5zmlfIMyD6H7lcrkrhDV+2x8+WoxclBwxZ19SE1R0/j66zR/Po+4228nMG4y8/6xGUuyickPDDxhMLrj4bL7+OivG1CpJa779WAizFrYtRjvW7OoXmfCXSthHDSI5D8+jr5r11Nez/n9Og7+9KeYB+awLW4IB/zDmPboUBIyT7xQht8XZOkb2zlQ3EDBhCyGXJNN85y51D71FAQCIIHGpEIb4UFrCqCLVqPNykHbczDagZej6TWSpgY/jiYvphg9ZosevVHzg437CJ8Px3ffYf9iIS3ffIPweNCmphJ19dWYx46h8qGHQBZkf/Ixmvi2vfKP//IYDmsjtz73ygmv73O7eOdXP0er1TJsfTF6yYr9quv4rvxSDFE68gYl0X1YMvEZ5h+sjJ1W3P9b+l8eWvkQkdpI8pPyGZQ0iEHJg+gR2wON6uKMYPy/QDAos+Bva6g72MKIg88hNQfYUPAQGsnN9TH3opOcihdQ3gToPhGyR59ZaOdziL3BzaKXi2mucTFm5uFZsbZ6N5/+bSManZqpDxe0K8TE0dSW2vns2UJSu8Xw43v7K63+up2IOTOwbWqkdlsCsi9I/B23EzdrluK5cxz8VVWUTp2GOsqEp38sy+2zGD45h/wJXU55D3JQZsV/StixqkqZVX1TDwIHy3Bv2YK/ohJ/RQX+8jJ8ZaUEGpvxacytgeaslh749G1nfqpVQcyGIOYYLeYEA2aLAXOcCXN8JOY4MyaLHr1R2xoZ9VQIWca9aRO2+fNpWbKUoM2GOspM5KiBRI/ogSEzGslnB48Nj+jKgYeeI6J3b7LefgvpiGX5ir78gm/fmc1tz8/GknJ8F9xlb73C5qWLuTQyCcOaNUROiGOB549k9o7nyll92tUrO1M6rbg3uhtZW72WjTUbKawt5ID9AAAmrYkBiQMUsU8aRO/43mhVF0b3MIyCy+7j47+sAWcd0apKavw9mZr3BgkD8xVRTxlw0ZrCvO4AS9/YxsHtVvr/KIOCK7L47Jki3C0+pj5cgCX59O3SO76r4tv3dzFwfCYjpnRTDrqs8OlPCWxfTm15AfbCSnRZWSQ//odjbMqyx0PZjTfhKzuA5ao4Pq+7j6SucUx6YAhSewVUCDYsOsCGhaVk9o5jwp19Wk05AX+Q6n02yndYKd/eSEOlMp9Ap/KRJO8l3lpIRFMVzqAFlyoWr96CV2/Bo4/BG2HBq4sG6dj3rhNOtLjQC5eSF250wqWkshOd7EYTcGLYX4LU7EJSy0SmeYjq4sac7D3OJSVQqbGlPkTVM+8SM+N6Uh5/vPXTQ+Gax9x0G4N/POWY+6nYuY0PH3+U3nm9yfp4AZYBXr7OepWAZGLGY0OUntU5oNOK+9HUu+oprC1kY+1GNtZsZJ9tHwAGjYH+Cf0ZlDSIyd0mk2Q6w+n1Yc4KiqvfRoIBGDs5gT4TLoxVa84GclBm9cd72bq8Al2EmmBAcM0vGfJRAwAAG85JREFUB5Da7Qxj0gAr5uxm28pKxt/e+3CoiGAAlj0Oa17EKfKp/k6Nv6KS6EmTSHzkYTSxsQghqH7019jmzyf15v4srb4EmzqXGX8Y0a4omUezfVUlK+bsJiEzktzBSZTvtFJV0kzAL6NSSyTnRJPRK5bMXrHEZ0QqrW8hlLhJfhdyixX/wQP4y8vxVVbhr6rBW9OAo9GHo0WFm0i8umj8WiMBjQm/xkhAawylh/eFdNgkK4kgKYYqevb1kNNDjS4yCiJilABsR25yAN69GupLqPPdQONHX5L8xz9iuf7w5MX3Hr4XncHIjD8+3abcfp+X9x++l6DXy4gN2zAZbOy95EFK6vOYdP9A0vLOLNRFR/ifEfejsXqsitjXbGRj7UZKmkowaAzc0fcObu59M3p1x7vGYc4uB7Y20FjpIP+KrHO+uMe5oPjbCtYt2M8lN/WgW8Fp+t8fRTAQGmAtb2Hqw4PaToAp/ggW3Iusj6fBO4nGDxeiNhpJfPhhZJeL2ieeIH7aOPa5JQqd05hwZx+65p/+fZVuqWfJG9sJ+mUsyUYyesaS0SuW1NyYDo0pHLecdjuB+nqQVEhaDZJG2dBokLRaJa9W4/crvSWXzcf+zfXs2VCLo8mLRqciu38CeUOSju9u6KiHtycgWuop3zUS56btZL37DsZ8JWzAdx99wLrPPuSu195vE9Fz1Zx3WD//E0ZrI4nespnAhHyWt/yUQVd1YeiPz+36xv+z4n40FS0VPLvxWb4++DXp5nQeHvww4zLGdUpRCXPh8ENMFnPavHz81w14XQG6DU6iz5g0krqEXEerNsF/bgSXFW/B76l+/ztlIRPAPHIwqtQK5tX/lp4jUrj05l4n+Zb234scFGfdE+R0EbKgep+NkvU17C2qw+sMEGHS0q0gkdwhSaTkRB82QTUfhLcmEHT5KV2eiezxkf3Jx2iTk6ndv5d///qXTLj7fnqPvQyA2v17+eC3D9AtNYvcRV9jHKJnSfSzxGfFMPn+gefEzn4kYXE/irVVa3l6/dPss+1jROoIHhn8CDkx57bGDXNh4Qv6kCTpohqbsdW7KFp6kJL1tQS8QRIyI+kzJo3cwUlo/Y3w4f9B+feIEffT3NwT19rvsWRu5eO9s1BbkrnuseFn3Lq+0AkGZA7usLJnfQ2lWxoI+GUiYyPIHZxE79GpRMUblPkhb03A6zRzYKEWXU5Xsv79PpJez2s/v5XknFwmPfhbgoEAH/z2AZyNDYzauJ1Ii5P1g5/GHkjg+seGnJfKLSzux8Ev+/lo90e8tOkl3AE3M3vO5Gf9f0ak7sSuYOcKm9fG5rrNFNYVUlxfTEAOYNKaMGlNmLXmtnld22PRumjiDfFE66PPSmsxKAdp8jbR7GkmJiKGuIi4TtPTqXPVsaJiBcvLl7Oueh0mrYlpedO4vvv1JBrPjgnlXOBzB9i9roZtKyuxVjnRGTR0H5ZMnxGJxG7+gzLpK3c8IjKVpd/Esd83kikPDz7c0v8fwecJULqlgZL1tZTvtKLRqhh3U3dlOcvKQnj3GloaU6hY5CR60jWkPPWUssLViq/5+RtzKVw0j9X/eY+hQQ2Je3ZTc/k0tjou4cq7+pIz4Py454bF/SRYPVZe3PQin5Z8iiXCwn359zG522RUxxmx/6GodlRTVFdEUW0RRXVF7G1WYrlrVBp6xfbCqDXi8rtw+B04/A5cfhdOvxPBid+XRqUh3hBPgiGBOEMcCYYE4g3xrVuCIQGT1oTVY6XB00Cju5EGd0Pr1uhupN5dj9VjRRZy63V1Kh0p5hSSTcmkmFIOb2YlTTYl/6BjGbKQT/vdCCHYZd3F8orlLC9fzo7GHQCkmdMYmz6WKkcVKypWoJbUjO8ynpt63kTfhHM7yNvkacKsNaM9jck/QijmiG0rKtm3qQ45IEjNjaFPegk5u+5jj2s4y2y/YNjkHAra4fbYmWmxevjqre1U77XRa3Qqo6fnoqlYBR9Mp740h4Y1dhIffYSWAX359Mk/MPam21j9n/dIj4mn97LvYGQm32gfoc/YNMaeZlyYs0FY3NvBjsYdPLnuSTbXb6ZXXC9+PeTXDEgc0Pq5EAJv0Isr4MLld+EOuNvkfbIPjaRBLalRq9RoVBpl/6i8WlLjl/1srd+qCHpdETXOGiDkwpkwgIGJA8lPyqdPfB8MmuP7d8tCxh1w4/A5cAacOH1OHH4HNq+Nend9G6Gud9fT6G7E6rGe9BloJA1xhrg2lcCh/Rh9DE2eJmqcNVQ7q6lyVlHjqKHeXX9MJRMbEUtBUgHXdruWEakjzniCmV/2s7J8JR+XfMza6rVY9JbWCuXoSibZlExsRGxr78Ib9LK+en1rC73WVYuERL+EfozLGMfY9LF0i+nWen65vZw5u+bw+d7Pcfqd9Ivvx409b+TyLpf/YCYbl9/FV2VfMW/vPDbWbiTFlMId/e5gctfJpyXyoKynu3NNNdtXVWJv8GAwgt8bIDE7lkkPFLTbb7wzIwdl1i0opWhJGXHpZibc0YeYxq8RH91C5aZcWvY4SXv1Fd5960V8bjf6CAOjNu0iMk1iRd7fMMZbmPbIIDS68xdEsNOK+6aDTTz55S7G5iUwNi+BXilRZ/SjFUKwuHQxz218jjp3HRmRGXgCHlwBRcCPbMGeDeIN8eQn5pOflE9+Yj65ltwfdPKVX/ZjdVtbRd/hdxAbEdsq5NH66A63iv1BP7WuWqqd1dQ4a6hyVFHhqGBF+QqavE0kGZOY3G0yk7tNJj2yYzFgqh3VfLLnEz7f8zn17nqSjElcnnU57oC7tZKpdlbjDrjb/J1erSfFlEJsRCw7rTtxB9wYNAZGpI5gbPpYxqSPIc5w8lgsTr+TeXvnMXfXXMrsZSQaErmu+3VM7z6d2IjTWzXnSGQhU1hbyPy981lathR3wE1mZCYTsiewrnodW+q3kGpK5Y5+dzCp26TTrliELDi408q2FZU0VTuZdP/AU9qGqxxVLNq/iGRTMgVJBaSa27/M38XIga0NfP3ODuSA4JL/60GuagnBT39B2aocAl4dO68Zz97iIvLdMmkVpewY9wC1/lym/2YwsSnnN5ZOpxX31Xsa+OvineyotgMQb9YxJjeBMXkJjM6NJ858euYBl9/FuzvepdRWilFjxKAxYNQaT5rXqrQERZCgCBKQA63pkfmgHCQgAkhI9IztSXpkeqexXx+NP+jn2/Jv+WzvZ6ypXINAMCxlGFNyp3Bp5qUnNN0E5SCrKlfxccnHrKpYBcCotFFc1/06RqWNOqbyE0Jg89pahf5QJVPtrKbOVUeeJY+x6WMZkjLktMxFspBZXbmaD3Z+wJqqNehUOibmTOTabteSGZVJbERshyrESkclC/YtYMHeBVQ4KjBpTUzoMoFJ3SYxIGEAkiQhhGBN1Rpe3vwyxQ3FpJnTuLPfnfy4649/0AHfcns5b2x7gwV7FxAQgdbjyaZk8hPzKUgqID8xn5yYnHNqtjwXtFg9LH1jOzX7bfQZk8bI9CXI8x+n9Jt0XBlZNGWlk/zVcmzjxlDE9VxyUw96jTq20gvKQYobigEwaowYtUZMWhNGjRG9Wn/W/987rbgfoq7Fw6qSBlaU1LNqTz1NLj+SBH1Soxmbp4j9wMwYtOfYTSmMQrWjmnn75jFvzzyqnFVE66O5Oudqru12Ld1jFXtlrbOWz/Z8xqd7PqXWVUuCIYFrc69lau7UC6bluK95H3N2zuGL/V+09ha0Ki1JxqRW81CyKfmYTSNpWHZwGfP2zmN9zXoAhqYMZVLXSVyWeRlG7fHj1QshWF25mpc3v8y2xm2kmdOY1W8WV3e9+qyK/H7bft4ofoPFpYtRS2qm5k3llt630OJrobC2sHUsqMHdAEC0PpqBiQMpSCxgYNJAesX1atf9CCGQhYwkSRdk5RAMyqybt59NXx0kPsPMFb2WoVnyD8pXxIMQBHslszLpMbrmJzH+9t7HCPXmus38dd1f2WndedzrqyV1a2PwkOCbtCYm5kxkSu6xs2DbQ6cX9yMJyoJtlTZWltSzoqSeooNNyAIi9RpGdIvjsp5J/KhnErEm3akvFuasIguZ76u/5/M9n7Ps4DL8sp8+cX2IN8SzqnIVQRFkROoIpudNZ2zG2AvWLdHmtVFUW0SNq6a1p3Boq3PVERTBNuerJBWykEk3pzOp2ySu6XpNhyosIQSrKlfx8uaX2d64nXRzOrP6z+LqnKvPyIxX0lTCa8WvsfTAUiI0EUzPm85Pev+EBOOxnh9CCMpbyhWxrytiU90myuxlAESoI7BEWJSeqxxEFjIBEUAWMkE52NqjPXpg3qA1EKGOwKAxtG4RmojW9NBnGpWmddOqtK1jWEcf06q0WCIsJBoTSTImnbDSPBUHikNmGllwac+1xK16AWtVHN/1+TNEJnH9Y0PahOutc9Xxj8J/sHD/QhKNidwz4B4SjYm4Aorjg8vvapN3+p2t43WugIuJ2ROZ0WPGad3r/5S4H43N7WfNXqVVv6KknmqbB5UEg7vEckXvZMb3TiLdcpYWhQ7Tbpo9zSwqXcRnez6jydPEj7v+mGm508iIyjjft3ZGBOUg9e56RexdNdQ4arD5bIxMHUlBUsEZdcuFEKysWMnLW15mR+MOMiIzmJI7hayoLNLMaaSZ04jWH2ct16PY0biD2Vtm8035Nxg1Rmb2mMnNvW/u8FhCg7uBolpF6O0+OxqVBpWkUpwKQo4Fh/IqSdX6uRACd9CN2+/GE/TgCXhwB9yt6dGf+WV/q3mzI0RqIxWhNyWRaExsFf0ko7LfJbrLCR0WWqwelry+jdpSO33Td+GytrDfM5gpDxaQnKM8Y1/Qx/s73md28WwCcoCf9P4Jt/e9/bQrldPhf1rcj0QIwbZKO0u217B0Rw0ltQ4AeqdGcUXvZK7onUxe0snDcwohaHT62FvnYF+9g311TvbWO7A6vUQbtMQYdViMWixG3eG8SYcllI8x6ojUa3D5g9jcfmwuPza3H7snlLqPSj0BtGqJyAgtURFaogyaUD6UGjTK8VBepZJweYO4fAFcviBObyj1BVqPO31K6vErLSkJZUEjSZKUlEMprfsqCXQaFXqNmgitCr1WjV6jIuKI9Mh8QqQes/7cT5DxB2U8/iAe/6E02Fpety+Iy3f42bh8QdyhZ+P2BTHo1KTFGEiNMbSm8WbdDzouIoSgoslNYVkTG8usbCm3kRipZ3jXOIblxB3XSUAIwYqKFbyy5ZVWd85DRGojSYtMaxX79Mh0JTWnY/PZeL34dVZVriJSG8mNvW7kpp43tatCuBAQQhCQA4rYi0DrmNahzRf0YfVYqXXVUuuqpc5V17rVOmtp8DS06T3o1XqGpQxjXMY4xmWMI97QNuxvMCCzdt4+tnxdDtDGhXRlxUqeXv80B1sOMi5jHA8Pevi8NEzC4n4CShucLN1ew5LtNWwqb0YIyIozKi36XknEm/Xsq3e0CrmSOrG5/a3XMGjVdE00kWDWY3P7aXb5aXL5sLn9yKf5OCUJoiK0RBu0REZoCAQFLR5F6B3ewKkv0A7UKokITWh1dpR4TgKhpEfmOWQr7fh3JEbqyY43kZNgIjveRHa8mex4E5mxRnSaE9tchRDY3H6qmj1U29xU2TxUN7uptnmoa/Hg9oXEOxDEe4SIewIywQ7eqEoCo06DQafG5VUqviPRaVSkRke0Efy0GAPJ0RHEm/XER+qIM+lRt9NLyxeQ2V5lo7CsqXWra/ECYNKp6ZceQ43dQ2mDEk0x2qBlaHYsw7vGMbxrHHmJkW3EvsXXQqWjksqWSiocFVS0VFDpUPJVjiq8QW+b74/WR3Nzr5uZ2WNmmwl7hyqZdaVW1u1vZGuljWiDlqw4I1lxyjvLijOSFWsi2nhhmstORUAO0OhuVMTeVUthbSHfln9LpaMSCYm+CX25JOMSLsm4hJzonNZK/UBxA7VldoZclU1ZSxlPb3ia1ZWr6RLVhUeGPMKoNGXpzVq7h3WlVrZX2tCoJUx6DWa9BpNOg0mvwaRXHz6m12DWaTDq1Wc0FnhWxV2SpAnAC4AaeEMI8dRRn+uB94ACoBG4Xghx4GTXvBD83OvsHr7eWceS7TWs2deAP9j2WcSb9XRLNNE1wUy3RHNrmhwVcVz3S1kW2D1+mkJi3+zyYXX6aXb5sHsCmPVqog3aVhGPMhxOI/WaE7p0BmWBwxPA7lFa+3a3km/xBLC7/chCYNQpPySjToNJp8agU35URp0aU+gHpVOrOtQiFULgDwq8AUVYj0n9QbyBkNAGglTbPJTWOyltULZGp6/1WioJMmKNIcE3EanXUG3zUG3zUGVzU93swe1vK7IalURSVARJUXqMOk1r7yEi1JNQeg6HexYRoc8MOjXGQ89Ap2nNG0N5vebwcxBCYHcHqGx2U9XspsrmprLJfXi/2UNti4ej/00kCWKNulaxjzfriTMdzht1arZV2ikqa2JLRTPegNJ6TLcYKMiyMCjLQn6WhR7JUa2VRLXNzff7G1m7r5G1+xsptyoDuLEmHcNyYhmeo4h914QT9zRlIdPobqTSUUl5SzkBOcAVXa7AqDUihKC0wcn6UmuroFfZPADEGLUMyIihxROgrNFFg+OoCiIk+kcKfpRBi0pSGg0qlYRKklBLEioVSj50TCWBVq1Cq1ah04S2UF4fyh/vtx8Iyji9QVq8fhzeAI5QQ+fovFatwqBVH/OeD/0OjFrN4XzIP31P8x6+Pfgt35Z/y/bG7QBkRGa0Cv2AxAF4Ah5eK36N93e+j16t52f9f8aoxMkUldlZX2pl/QErZY0uAHRqFQFZbneD6K6xXXn0yh7tO/kozpq4S5KkBkqAy4EKYAMwUwix44hz7gb6CSHukiRpBnCtEOL6k133QhD3I2nx+FlRUo/LF1SEPN580bZWLhSaXb5WoS9tcLK/wdkq/p5AkASznpQYA6nREaREG0iNUVrLKaFWc7y5/a3jHxJfQKbWrlREDQ6vsrV4qXf4Du87vDS0+NpUUBqVRO+0aAZlWSgIbUlR7Y9FUtHkahX67/cdFuIIrYpYo45Ys2L6iw2ZAONMOiwmZf/QFpQFG8uaWLe/kfWl1tYeQ7xZx9DsOIZkxzI0J/aY3oHTG+Cg1UVZo4uDVmcoVfYrm90d7i2dCo1KahV+tSThPMKEeDbRaVQkRelJjoogKSqC5KgIzCYnDfIm9rvWs7O5iIDsJ1ofjVpSY/VY6Rv1IyLd17DlgEyN/XBlOKRLLEOyla1XilJJewMyDm8AZ6jicfmCrfvKpphNB2ZaGJV7euszn01xHw48LoS4IrT/awAhxJNHnLMkdM5aSZI0QA2QIE5y8QtN3MOcOxQ7quiUbqouX4CGFh92j59uiWYitGdnJqMQgoNWRez3NzixOn2tW5NLSVs8JzbfJUdFMDQntlXQuyaYTntcwR+UqW720OL1I4TSswwKgRCCoAyyEMihY7JQerT+oIw/KPAFg/gCMr6AjDcg4wvKrfu+0H5AFphDpozWLeKoNJQ36TT4g7IytuIP4j5qbOXQeIvbH8TpDdLk8lFj81Bj91Br91Bj87T2qgBQedGYSjBadiMkJ87aS5A9mSRG6hmaE6oMs2PplmA+bzN+2yvu7Rn9SgPKj9ivAIae6BwhRECSJBsQBzS073bD/C8hSRJa9flvkf8QGHUaMuPO/qCyJElkxZnIijvx7EhfQFZMgS4fVoeSBoKC/EwLGbGGszZIrFWryIy7cLzN1CplYP90lss4NNZTExL6OruXGntfauwegkFBwWALQ7NjyYw1XnSTD8+pa4MkSXcCdwJkZmaey68OE6bTo9OoSIyKILEDpp//dSRJIibk5dYjuXNFzGxPv7gSONLfJz107LjnhMwy0SgDq20QQrwmhBgkhBiUkHCeV7MPEyZMmE5Me8R9A5ArSVK2JEk6YAaw4KhzFgC3hPLTgG9OZm8PEyZMmDA/LKc0y4Rs6PcAS1BcId8SQmyXJOlPwEYhxALgTeB9SZL2AlaUCiBMmDBhwpwn2mVzF0IsBhYfdez3R+Q9wPSze2thwoQJE+Z06Xy+aGHChAkTJizuYcKECdMZCYt7mDBhwnRCwuIeJkyYMJ2Q8xYVUpKkeqDsNP88ns4/+7Wzl7Gzlw86fxnD5Ts/ZAkhTjlR6LyJ+5kgSdLG9sRWuJjp7GXs7OWDzl/GcPkubMJmmTBhwoTphITFPUyYMGE6IReruL92vm/gHNDZy9jZywedv4zh8l3AXJQ29zBhwoQJc3Iu1pZ7mDBhwoQ5CReduEuSNEGSpN2SJO2VJOnR830/p4skSQckSdoqSdJmSZI2ho7FSpL0lSRJe0KpJXRckiTpn6EyF0uSlH9+7/74SJL0liRJdZIkbTviWIfLJEnSLaHz90iSdMvxvut8cILyPS5JUmXoPW6WJGniEZ/9OlS+3ZIkXXHE8QvyNyxJUoYkSd9KkrRDkqTtkiTdFzreKd7hScrXad5hG0RoeayLYUOJSrkPyAF0wBag1/m+r9MsywEg/qhjfwMeDeUfBZ4O5ScCXwISMAxYd77v/wRlGgPkA9tOt0xALLA/lFpCecv5LttJyvc48OBxzu0V+n3qgezQ71Z9If+GgRQgP5SPRFk7uVdneYcnKV+neYdHbhdby30IsFcIsV8I4QP+A0w6z/d0NpkEvBvKvwtMPuL4e0LheyBGkqSU83GDJ0MIsRIl5PORdLRMVwBfCSGsQogm4Ctgwg9/96fmBOU7EZOA/wghvEKIUmAvyu/3gv0NCyGqhRBFoXwLsBNlCc1O8Q5PUr4TcdG9wyO52MT9eOu5nuzlXMgIYKkkSYWh5QcBkoQQ1aF8DZAUyl/M5e5omS7Gst4TMku8dchkwUVePkmSugADgXV0wnd4VPmgE77Di03cOxOjhBD5wJXAzyVJGnPkh0LpF3YqV6bOWCbgFaArMACoBp49v7dz5kiSZAY+BX4phLAf+VlneIfHKV+ne4dw8Yl7e9ZzvSgQQlSG0jrgc5SuXu0hc0sorQudfjGXu6NluqjKKoSoFUIEhRAy8DrKe4SLtHySJGlRhO8DIcRnocOd5h0er3yd7R0e4mIT9/as53rBI0mSSZKkyEN5YDywjbZr0d4CzA/lFwA3h7wThgG2I7rJFzodLdMSYLwkSZZQ93h86NgFyVFjH9eivEdQyjdDkiS9JEnZQC6wngv4NyxJkoSyZOZOIcRzR3zUKd7hicrXmd5hG873iG5HN5QR+hKU0erfnu/7Oc0y5KCMsG8Bth8qBxAHLAP2AF8DsaHjEvBSqMxbgUHnuwwnKNdclG6tH8UO+dPTKRNwG8rg1V7g1vNdrlOU7/3Q/Rej/IOnHHH+b0Pl2w1ceaH/hoFRKCaXYmBzaJvYWd7hScrXad7hkVt4hmqYMGHCdEIuNrNMmDBhwoRpB2FxDxMmTJhOSFjcw4QJE6YTEhb3MGHChOmEhMU9TJgwYTohYXEPEyZMmE5IWNzDhAkTphMSFvcwYcKE6YT8PzmHoXsrsjkeAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_gradients(good_trial)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also print inputs and outputs from the model. For instance, let's print the 83th sample of the 2700th batch, as seen by the network. \n", + "\n", + "Notice that we have to reshape the input data from a (784,) array to a (28,28) array and multiply by 255 - the exact inverse of the transformation we did above." + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "ename": "AttributeError", + "evalue": "'NoneType' object has no attribute 'step'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# The raw tensor\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mraw_t\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgood_trial\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'sequential0_input_0'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m2700\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m83\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0;31m# We have to undo the transformations in 'transformer' above. First of all, multiply by 255\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mraw_t\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mraw_t\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0;36m255\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;31m# Then reshape from a 784-long vector to a 28x28 square.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mAttributeError\u001b[0m: 'NoneType' object has no attribute 'step'" + ] + } + ], + "source": [ + "# The raw tensor\n", + "raw_t = good_trial.tensor('sequential0_input_0').step(2700).value[83]\n", + "# We have to undo the transformations in 'transformer' above. First of all, multiply by 255\n", + "raw_t = raw_t * 255\n", + "# Then reshape from a 784-long vector to a 28x28 square.\n", + "input_image = raw_t.reshape(28,28)\n", + "plt.imshow(input_image, cmap=plt.get_cmap('gray'))\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also plot the relative values emitted by the network. Notice that the last layer is of type `Dense(10)`: it will emit 10 separate confidences, one for each 0-9 digit. The one with the highest output is the predicted value.\n", + "\n", + "We can capture and plot the network output for the same sample." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "ename": "AttributeError", + "evalue": "'NoneType' object has no attribute 'step'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mplot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgood_trial\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'sequential0_output'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m2700\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m83\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'bo'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshow\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m \u001b[0;34m'The network predicted the value: {}'\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgood_trial\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'sequential0_output'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m2700\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m83\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mAttributeError\u001b[0m: 'NoneType' object has no attribute 'step'" + ] + } + ], + "source": [ + "plt.plot(good_trial.tensor('sequential0_output').step(2700).value[83], 'bo')\n", + "plt.show()\n", + "print( 'The network predicted the value: {}'.format(np.argmax(good_trial.tensor('sequential0_output').step(2700).value[83])))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Vanishing Gradient" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have now worked through some of the basics. Let's pretend we are debugging a real problem: the [Vanishing Gradient](https://en.wikipedia.org/wiki/Vanishing_gradient_problem). When training a network, if the `learning_rate` is too high we will end up with a Vanishing Gradient. Let's set `learning_rate=1`.\n", + "\n", + "Notice how the accuracy remains at around ~10% - no better than random." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "# Temporary bug workaround, we will remove soon\n", + "from tornasole.mxnet.mxnet_collection import CollectionManager\n", + "from tornasole.mxnet import mxnet_collection\n", + "mxnet_collection._collection_manager = CollectionManager(create_default=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[Epoch 0 Batch 100] Training: accuracy=0.115644\n", + "[Epoch 0 Batch 200] Training: accuracy=0.109701\n", + "[Epoch 0 Batch 300] Training: accuracy=0.105880\n", + "[Epoch 0 Batch 400] Training: accuracy=0.104339\n", + "[Epoch 0 Batch 500] Training: accuracy=0.103653\n", + "[Epoch 0] Training: accuracy=0.103467\n", + "[Epoch 0] Validation: accuracy=0.100900\n", + "[Epoch 1 Batch 100] Training: accuracy=0.108713\n", + "[Epoch 1 Batch 200] Training: accuracy=0.104080\n", + "[Epoch 1 Batch 300] Training: accuracy=0.103588\n", + "[Epoch 1 Batch 400] Training: accuracy=0.103641\n", + "[Epoch 1 Batch 500] Training: accuracy=0.102555\n", + "[Epoch 1] Training: accuracy=0.102350\n", + "[Epoch 1] Validation: accuracy=0.101000\n", + "[Epoch 2 Batch 100] Training: accuracy=0.099406\n", + "[Epoch 2 Batch 200] Training: accuracy=0.100597\n", + "[Epoch 2 Batch 300] Training: accuracy=0.100332\n", + "[Epoch 2 Batch 400] Training: accuracy=0.100299\n", + "[Epoch 2 Batch 500] Training: accuracy=0.101058\n", + "[Epoch 2] Training: accuracy=0.101033\n", + "[Epoch 2] Validation: accuracy=0.102800\n", + "[Epoch 3 Batch 100] Training: accuracy=0.098614\n", + "[Epoch 3 Batch 200] Training: accuracy=0.100050\n", + "[Epoch 3 Batch 300] Training: accuracy=0.101063\n", + "[Epoch 3 Batch 400] Training: accuracy=0.102419\n", + "[Epoch 3 Batch 500] Training: accuracy=0.102974\n", + "[Epoch 3] Training: accuracy=0.103133\n", + "[Epoch 3] Validation: accuracy=0.103200\n" + ] + } + ], + "source": [ + "net = create_net( tornasole_save_interval=100, base_loc='./ts_output/', run_id='bad')\n", + "train(net=net, epochs=4, ctx=mx.cpu(), learning_rate=1, momentum=0.9)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tornasole:Loading trial myrun at path ./ts_output/bad/\n", + "INFO:tornasole:Loaded 4 collections\n", + "INFO:tornasole:Loading 28 new steps\n" + ] + } + ], + "source": [ + "bad_trial = LocalTrial( 'myrun', './ts_output/bad/')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can plot the gradients - notice how every single one of them (apart from one) goes to zero and stays there!" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAW4AAAD8CAYAAABXe05zAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzsnXl4VPX1/1939ux7yEoCUlSQsAUFWdTagojFBZVatWrrT22tWq2t1i5q1S6WWr6orUVwp1or4gKoKIIsQllDgIBsCUkg+77MPp/fH5N7mSQzyUySSSbkvp6H54GZO/ce7tw599zzOed9JCEEKioqKiqDB81AG6CioqKiEhiq41ZRUVEZZKiOW0VFRWWQoTpuFRUVlUGG6rhVVFRUBhmq41ZRUVEZZKiOW0VFRWWQoTpuFRUVlUGG6rhVVFRUBhm6YOw0MTFRZGdnB2PXKioqKmclu3fvrhZCJPmzbVAcd3Z2Nrt27QrGrlVUVFTOSiRJOunvtmqqREVFRWWQoTpuFRUVlUGG6rhVVFRUBhmq41ZRUVEZZKiOW0VFRWWQoTpuFRUVlUGG6rhVVFRUBhmD1nFXFp2gtODAQJuhoqKi0u8MWsf91ZvL+exf/zfQZqioqKj0O0HpnOwPqktOYm5qxOmwo9XpB9ocFRUVlX5jUEbcrQ31tDbUI1wuGiorBtocFRUVlX5lUDru6pJi5e91ZacH0BIVFRWV/meQOu4zWiz15arjVlFRGVoMSsddU3ISU2QUxogINeJu4+j/vqa1sWGgzVBRUekHBqXjri45SWJmFnEpadSpETdNNdV89Nwf2f/luoE2RUVFpR8YdI5bCEF1yUkSMrOITUlTUyVAReFxAFrqagfYEhUVlf5g0Dnu5toabOZWd8SdmkZjdRUOm22gzRpQKmXHXV83wJaoqKj0B4POccsLk4mZw4lLSQMhaKgsH2CrBpbKohOA6rhVVIYKg9ZxJ2RmEZuaBqglgZVF7oi7tUF13CpnB6e+OcTx3TsG2oyQZdA57pqSk0TGxRMWGUVcSjrAkF6gNDc10lRdhVanUyNulbOGr999i0//uRjhcg20KSHJoHPc8sIkgCkyElNUNPVDOOKuLHSnSdLPG4vNbMZusQywRSoqvaehqgJLU6OSBlRpz6By3C6Xk5rSEhIzhyuvxaWkDumIW06TjJgwGYCWhvqBNEdFpde4XE6aqqsAKMrfO8DWhCaDynE3VFbgsFmViBsY8rXclUUniEpIIrHtnKglgSq9wdzUSMnB/AG1obm2FpfTCUDxftVxe2NQOe4zFSVnHHdsahrNNdXYrUMzRVBZeJzkESMJj40DoEVdoFTpBdtWvs1/n/4t1taWAbOhsU04Lml4NqcOFwzZ33ZXDCrHXdMmLpWQ4ZEqSXUvUNZXDL2SQJvFTG3ZKZKzRxIhO251gVKlFxTv34dwuag4cWzAbGiocjvucZfPwelwUHro4IDZEqoMKsddXXKSmORhGExhymtxKe6SwKG4QFl1sgiEIDn7HMKio5EkDa2q41bpIa0N9dSUuoOjsmNHBswOWar5/BmXodXpOKnmuTsxqBx3jUdFiUxcWy13bdmpgTBpQJEXJpNHjESj0RIeE6NG3Co9pqRgPwBanY7yAXTcjVWVRMbFY4qMJP28MZzcnzdgtoQqg8ZxOx12ak+XkuiRJgEwhIUTHhM7JDVLKgtPYIqKJiohCYDw2DjVcav0mJKD+RjCwjhnyjTKTxwdMDsaqyqITk4BICtnEtXFRep13QG/HbckSVpJkvZKkrQ6mAb5oq7sNC6ns93CpExcatqQ7J6sLDpOcvZIJEkCIDI2jpZ6tRxQpWcUH8gn/byxpI8+j+aaapprawbEjoaqCmKSkgHIGjcBQI26OxBIxP0AcChYhnSHZ6t7R4aiSqDTYae6+CTJ2SOV18Jj49SqEpUe0VxbQ13ZKTLH5pAyajQA5cf7P+p2OZ001VQTkzwMgOTskYRFRat57g745bglScoA5gHLgmuOb2pKTiJpNMSnZXR6Ly4ljZb6Omzm1gGwbGCoKS3B5XSQPOIc5bWI2Dha6+vVNmGVgJFrt4ePzSEpeyQarZby4/2f526qqUa4XEQnuR23pNEw/ILxnNyfhxCi3+0JVfyNuBcDvwJ8egRJku6SJGmXJEm7qqqq+sQ4T6pLThKXkobOYOj0nrxAWVde1ufHDVVkKVfPiDsiNg6X04GlpXmgzFIZpBQf3I8xIoKk7BHoDUYSh2cPSGWJXFES3ZYqAcjKmUhLXS01HiMLhzrdOm5Jkq4CKoUQu7vaTgixVAiRK4TITUpK6jMDZWpKi73mt8GdKoGhNX+ysugEOqNRuWkBg6qWWwjByj/+nkObNwy0KSpASUE+GedfgEajBSB11Ggqjh/t96e3xrYa7pi2xUmArBw1z90RfyLu6cB8SZKKgHeAb0uS9FZQreqA3WalrrzMa34bztRyD6UFysqi4yRnjVR+aAARMYPHcdeVnaJo3x6O79k50KYMeRqrK2moKCdzTI7y2rBzvoW1taXfn2IbqiqQJA1RCQnKa9GJycSlZah5bg+6ddxCiF8LITKEENnA94EvhRC3BN0yD2pLS0CIduJSnuhNJiLj4odMxC1cLiqLCkkeMbLd63Lb+2BowiktOAC4HbjKwFJy0F2/nTl2nPJa6jnyAmX/pksaKyuIjE9Aq9O3ez1r3ARKDh3AYbf3qz2hyqCo4+6qokQmdgiVBNaVl2G3mEnOPqfd64MpVSI3e9SVnVYXnQaYkoP5mKKiSRqerbwWn5GJ3mjq90achqpKpaLEk6yciTisVsqODFhhW0gRkOMWQmwUQlwVLGN8UV1yEq1Op6REvDGUVAKVjsns9hG3ISwMncFIc4g7biEEJQX70Wi12C1mVdFwABFCUHwwn8wxFyBpzrgDjUbLsHNGDYDjrmi3MCmTOWYckkaj5rnbGBQRd03JSeLTM9FotT63iU1Jw9zYMKCqZv1FZdEJNFptpycQSZKIiI0N+VRJfflpWupqGTVlGqCmSwaShsoKmqqryByb0+m9lHNGU1l0HKejf9ITToed5toarxG3MTyc1G+dR9G+0M1zF+XtZt/nn+ByOYN+rEHhuKtLfFeUyMQNofmTlYXHScjMQqfXd3pvMLS9y2mScZfPAYbGdxaqFB/YB7jrtzuSOmo0TofDLWbWDzRWV4EQSg13R7LGTaCi8BjmpsZ+sSdQ8j7/hB0f/hdJCr5bDXnHbW1toammqsv8NnhUlpzl6RIhBJVFJzqlSWQiYkLfcZcWHCAiNo6sC8aj0xuoPV060CYNWUoO5hMeE0t8eman91LO6d8OysbKSgCl3b0j2eMnghAUHxjYQQ/ecDoclBzcR3bOJEWCIpiEvOOWZSa7i7hjUlKBs1/etbm2BnNjQ6eFSZmI2LiQHl8m57czznfnVN2LymqqZCCQv4vMMeO8OpuoxCTCY2L7Lc/d4KWG25OUc0ZjDI/gZAhOxSk7chib2Uz2+En9cryQd9zept54Q28wEpWYdNZH3J5Srt6IiI3D0tTYb3nJQKmvKKO5tkYpPYtPTVdTJQNEXdkpWupqvea3wb1mknLOt/qtJLCxqgJJoyEyPsHr+xqtlsyx4ziZH3rt70X5e93t+ePG98vxBoXj1htNRCd2340Zl3L2R2+VhSdAkkjOGuH1fbkksLWhoT/N8hu5fjtjjNtxx6Wl01BZjtPhGEizgsI327aw4fWXB9oMn8j6JL4cN0DKqNHUnCrB2hp8HaCGygqiEpK6LELIGjeRxqoK6itCS96iaN8eUr91HsbwiH45Xsg7bvfwhOHtSpV8EZeadtanSiqLjhOXkoYhLNzr++EhXstdUrDfnVNtEwuLS03H5XQqGhVnE/lfrCXvs9XK4NtQo/jgfiLj4tvJJnQk9ZzRIES/jDJr9FHD7YnS/p4fOmWBrY0NVBQec+fg+4mQd9z+VJTIxKakYWlpDtlV576gq4VJcGtyQ2g6biEEpQUHyPDIqZ6pBjq7npSEy0X58aNtMqV9L7rWW9zfxX4yx+Z0uZg2bFT/dVD6quH2JDYljeikZE7m7wm6Pf5ycn8eCNFv+W0Iccfd2lBPa0O93477bC8JNDc10lhV2U7KtSOhHHE3VFbQVFNF5vkXKK/FtUXeZ5vjrj1dis1sBqC+PPQGWdeUFtPaUN9lmgQgLDKK2GGpQV+gdNhstNTVdhtxS5JE1rgJFB/ID5knmZP79mCKjGLYyFH9dsyQdtzV8lT3ACJuOHtVAiuLTgCdOyY9CY+JBUJTr6SkQM6pntHECIuMwhQVTd3ps8txlx39Rvl7fUXoXY/+5LdlUkaNDnpJYGO1XArYteMG9zgzm7l1QPTCOyKEoCh/L8PHTWgn+BZsQtxx+1dRIhM7LAVJ0py1lSWK4+4i4tbp9Zgio0JyEk5pwQHComM61QzHnYUlgWXHvsEQFo5Wr6e+IvQi7pKD+4lOSu42wgV3GV5TTRXNQZQmaPSiw+2L4RfkgCSFRJ67uriIlrrafs1vQ4g77pqSk5gio5RKie7Q6vREJyWdtamSysLjRCYkEh4d0+V2ESHaPVlSsJ/M8y/olFONT804Cx33EVJGjSYmOYX6EBvwIVyutvrtrvPbMv0xyqyhqi3i9lHD7UlYVDTDRowKiXruon3uXHt2Tv/ltyHEHXd1aTEJGcMD6kQ6m+dPdrcwKRMRG0tLXWg5blkTI8MjTSITl5pGc10tNot5ACzre+xWC9XFRaSOOpfYYSkhV7pWVVyEpbmpXcqqK5JHjETSaIKa526oqkCj1RER51+QlpUzgbKj3/RLmWJXFOXvJSFjOFEJif163JB13EIIakpO+p0mkZEnvodagX5vsVss1J4u9dkx6Ul4TN8NDXY5nViaez8KTdYn8VyYlIlLSwfoszx3zakS1r/y0oA1IVUUHke4XKR+azSxKWk0VJSH1PUYSH4b3M1tScNHBDWn3FhZQXRikt954qxxE3E5ncp1NRDYrRZOHT7Yr9UkMiHruJtra7C2tgTuuFPSsJlbMTeGZgNKT6kqLgQhfHZMeiKnSvrCWez55COW3fdjzM1NvdpPacEBwqKiScjoPAwjLrXNcfdRuuTQ5g3kfbaaYzu398n+AqW8bWEy5ZzRxA5LwW610BpCMgTFB/OJHZbqV1ObjNxBGawbUENVBdF+5Ntl0s49H53ROKBTcUoLDuC021XH7UmgC5MysWdpSWBloXthcpgfEXdEbBwOqxV7H6QeKguPY21t4cCGz3u1H099ko7EpqSCJPXZdyY3i+xbt7ZP9hcoZceOEJ2UTERsHLHD3Bo6obJg7nI5OXXooN9pEpmUUaOxtrQELQ3ZWFXpU1zKGzq9nszzLxhQfe6ifXvQ6Q2knz+2348d8o47wce4Ml+crSqBlUXHMUVEEuVHlNSXk3Dk/Oy+dWt6rDPcWFVJY1WF0ubeEb3BSHRiUp+oBAohqCg8js5gpKRgPzWlJb3eZ6CUHfuGlFHnAm03JaAhRCpLKgtPYG1t8TtNIqMsUAYhzy0/kfizMOlJVs5E6k6XuuVgB4CifXvIGHMBeoOx348dso67puQkEXHxhEVFB/S56KRhSBrNWbdAWVl0guQR5/i1UNuXTTj1FeVExsXTUFlB4d5dPdqHkt8e0zm/LRPXR2JTTTXVmBsbyP3edWi0OvK/+KTX+wyElvo6mqqrSG1zdNFJyUhS6FyPgea3ZRLaRpmVBSHP3dhWUeJPKaAnWePk6e/9ny5prK6k9nTpgKRJIIQdd3UPFiYBtDodMcnDzqpUidPhoLq4qMv6bU/ORNy9y6vKawXjv3slkfEJ7P10dY/2U1KwH1NkVJffp1zL3dscamWhWz1xxIRJjJ46nYOb1mO3Wnq1z0Aoa4tIU9sibrlENVRquUsO5hOXlkFkXHxAn9NotAwbGZxRZrKcq68BCr5IyMwiIi5+QOq55TLArJz+rd+WCUnHLVwuakpLfE51746zbf5kTWkxTofDr1JA6LtUiexs4tIyGP+duZzM39ujdEbpoQM+89sycakZ2MytvV7Eqyg8hiRpSMoawfjvzsXa0sLhrzf1ap+BUH7sGySNpt0icsyw1JAoCXQ6HJQeLmB4gPltmZRRo6ksOtHn1TrKAIUAFiehrf39gvEU789DuFx9alN3FO3bQ2R8gtfF9v4gJB13Q2UFDpuVhIzAI25wP3bXn0UlgWda3f2LuMMio5A0Glp7WRIoO5vYYSnkfOcKtDodeZ+tCWgfjdWVNFSUd5kmAYiXF5V7WRJYWXic+PQM9EYT6eeNJSFjOPvW9V+6pOzoNyQNH4HeaFJeix0WGk04FSeOYbeYA06TyKSc8y2cdjvVxSf71K6Gqgq0ej0RbXINgZCVMxFzU6PyG+kPXE4nxfv3kT2+f6bdeEMKhnPLzc0Vu3b1LB8KcGzndj5c9DQ/ePpvpH7r3IA/v/ez1Xz5ykvc/c/XfYqyDya+fO1f7P9yHfe99q7fda7/uueHZE+YzJx7HvBre7vdTmlpKRbLmbSCtbUVa0szUQmJSBoN5qZG7FYrUQkJfs/Vs1ssmJsaiYiLR6vT+dzO5XTSXFuDKSoKgynMr317o6mmGp3BoKyN2MytWJqbiYiNQ+tlRmefIgSNNdUYjCZMUVHKyx3P40BhbW3B2tJCZEIimh7YoXxHkVEYwnr+HXWktbEBl8PRo9+qy+WiuaYaY0QkxnDvUsd9jdNup6W+jrDo6HY3aH8xmUxkZGSg73A9SpK0WwiR688+fP+SBhCloiSj8xw8f/CsLDkbHHdl4QmSskYEJGIT6NDg0tJSoqKiyM7OVqKIxqpKLC3NSorGZrFQe6qE6LaRVv7QUFmBtTWapKyRXUYnQggqC48THhPb4y40p8NBlUFHVGKSEr25nE6qigsxRUQF/CgeKA6bjWqjnpjkYe0W1S0tzdSXlykLfANF7elTuJyOHq0dgfs7qjpZiDE8ok/PZU1pMRqtVqnnD5TqEvfn49N69vlAaa6toTkijOTskV0OffCGEIKamhpKS0sZMcL7MBR/CMlUSXXJSaKThvkcFtAdZ5O8q3C5qDp5wu80iUxkXHxAjttisZCQkNDOuTrs9nZRqsFkQm800drY4HcaymYxozeFdftIKUkSWr0eh93mt80dsVutAO3KszRaLabIKCzNTUGXAZUXQTs6Z63OfQ4d9oEbJydcLuwWc49/U+D+jvRGU58v9jodDuUc9QRjWBh2ixlXP+W5reZW9CZTwE4b3OcwISGh3ZNtTwhJx+1ude950j8qMQmtThcyJVi9ob6iDJvZ7PfCpEx4TFzA0q4dnavTYe/0gwqPicFhsyla013hdNhx2u1+pz50egPOXjg3R5tD0Rnb19WGR8cghOh192d32C0WJI2mU0pG/ndv/m+9xW61IoTodYpDbzTisNl6XNPfEZfLicvpRKvv+cO/3mRCCIGzFzd9f3E5ndgtFoy9vAH2lpBz3E6HndrTpT1+nAN36VJMcspZEXHLiy7D/CwFlJGnvfd0tV0IgdNhR9fBCZkiItFotbQ2dl/9ITt3f52FVq/Habf3eFHZbrOiMxg65W/1RhN6kwlzAE8KPTq+1YLeaOr0w9RoNGh02gF13DazW4ypN+sH4HaSAI62p5ve4rS7Z41qehFxaw0Gt0224J9f5Tz2Uz7dFyHnuOvKTuNyOnvluMHd+n42RNyVhcfRaLV+D5OQiYiNRbhcPR7j5nTYQdApepQ0GsKio7G2tHT76G+zmNFoNej87CzT6Q3KDaMnOKxW5VjZ2dlUV1cDcPHFFxMe7X5SCFQG4LXXXuP06fbX0dtvv80zzzzT7jWXy4XdZkVv8v5/1eoM/SJ6tXjxYlo9FPOuvPJK6uvrsVnM6IxGr4/3nufqRz/6EcnJyVxwgfcqIPlppmO6pKioyOdnukI+J4GkSoQQ3H///YwaNYqcnBzy8/eDRJ9H3PK588Ta2opGo1HSYZdeeineCjHy8vJYuzZ4kgsh57jPtLr3znHHpaRRX17W7/WdXeF0OAKWoawsOkFCxvBOkW939LaWW44Ovf2gwqNiQKJbIS+72b/8tkx3KQVHF5PgnQ4HTocDvbGz4/z6668xRkSi0WpoDVB8zJvj/vTTT7niiiva22a1guic35bRtT1NBJuOjnvt2rXEREe7H+/9iLZvv/12Pv30U5/va7U6tHo9dksfRdxt32lXFUcd+eSTTzh69ChHjx5l6dKl/PTee9Hp9Dhsfeu4165dS2zsmUV4IQQ2cyuGsPBur+lgO+6QqyqpKS1GkjTKFPCeEpeahsNuo6m2JiAVtGDy2Uv/x9EdXzNj4Q+ZOPeqbqtEZN2NkROnBHwsz7b3pKzAVq+f/PggB0pqcdjtGIxV4OUiddhsCFep8ujsxXhsVgs6nR6N7iRj0qJ5/Htdi/H86dlnefON1xk+PIthKSlMnjyZ1atXc/HFF7N161bmz5/P6NGjefrpp7HZbCQkJLBixQqGDRtGRdlpbrrtDhqamrho6tR2KZHIyEiam5sJi4zmr4sW8ckX67HabFx77bU8+eSTFBUVMXfuXGbMmMHXX39Neno6H374IWvWrGHXrl3cfPPNhIWFsW3bNkwmE3l5eUyaNImvvvqKBx5wl1sKl4uVb75OktHEX//6V959912sVqtyDK1ex9/+8jyrVq8hMzOTpKQkJk+ezMMPP8yll17KokWLyM3Npbq6mtzcXIqKinA6nTz66KNs3LgRq9XKvffey913383GjRt54oknSExM5MCBA0yePJm33nqL559/ntOnT3PZZZeRmJjIhg0byM7OZuvmTWiF4Ad33MHp02VYLBYeeOAB7rrrrk7fwaxZsygqKurye9IbTdgtZnbv3s2PfvQjwsPDmTFjhvJ+IHb/c/HfkTQSj/3mN3z88cfodDpmz57NokWLqKqq4p577qG42D3CcPHixUyfPp0PP/yQH/7wh0iSxNSpU6mvr6eqro6k+M7doD/96U+54oormD9/Ptdeey1xcXG88sorLF++nMLCQp5++mneeustlixZgs1m46KLLuIf//gHWq2W7Oxsdu3aRWJiIk899RQr3nqLYUmJJKekctHUqTz88MMA/Pe//+WnP/0p9fX1LF++nIsuuojf//73mM1mtmzZwq9//WsWLlzY5TkNlNCLuItPEpuahq4tb9VTQm3+5Okjhzi0eQPh0TFsfONl3nn8kW4FkJrrajA3Nvgl5dqR3kbcwiWQwKvTBneEJBC4nN6jYHmFX9L6d4nt2rWLVatWsX7Nx7z5yvJ2j5/19fV89dVX/OIXv2DGjBls376dvXv38v3vf59nn30WgKeefpqLciezZ88e5s+fr/zYPdnyvx2cKCriy3WfkZeXx+7du9m0yd1VefToUe69914OHjxIbGwsK1eu5Prrryc3N5cVK1aQl5dHWFgYe/fuZfz48UiSxKJFi3jxxRfJy8tjzQfvExEZyfovv+To0aPs2LGj3TH27T/Ih6vXsON/23n//ffZuXNnt+dk+fLlxMTEsHPnTnbu3MnLL79MYWEhQgj27tnDk4/9mm0bN3D0yDd8/slafvzDW0lNTeGTjz/m0zWr3YMphMDWFoG/+sqr7N69m127drFkyRJqamr8+m46ojcacToc3HH77SxZsoRt27b5ZTfA3r17Wbx4MQUFBZw4cYKtX39NY3MLH3zwAQcPHiQ/P5/f/va3ADzwwAM8+OCD7Ny5k5UrV3LnnXcCcOrUKTIzz5QKZ2RkUFlV43V9ZNasWWzevFn5XEFBAQBbtmxh5syZHDp0iP/85z9s3bqVvLw8tFotK1asaLePXbt2sXLlSrZs3MjyF18gb9++du87HA527NjB4sWLefLJJzEYDPzhD39g4cKF5OXl9bnThpCMuE+SODy71/vxLAkcfsH4Xu+vNwgh2PjGMiLi4rlt0Ysc27mdDa8t5c1H7mPqgpuYMn+B10dFWco10IoS6J3jfvx7Y6krP43Tbve51iCEoKa0GCSJhPTMTo+OjVWVmJubSM7uun5bZsuWLVx99dVERkUjSRq+973vKe95XvilpaUsXLiQsrIybDabUgu7ZetWXnnpn2i0WubNm0ecl0kq6zdsYNPWr5k2YyY6g4Hm5maOHj3K8OHDGTFiBBMmuEWLJk+e7DPq/PTTT5k7dy4A06dP56GHHuLmm2/mkoumkJWdzbp161i3bh0TJ7o1LORj1NXWMnf2dzHq9ZgiIpk/f36352TdunXk5+fz3nvvAdDQ0MDRo0fRShITcsaRnBCPw2rh/NHncuTQIXLOHY3L6aKhqgKd052WcTqdtDY2EJ2axvMvvsiqVasAKCkp4ejRoyQkBN7noDeaaGxqoq6+nksuuQSAW2+9lU8++aRLuw0GAxdeeCEZGe6n6QkTJlB0sojcSZMwmUzceeedzJs3j6uuugqAL774QnG0AI2NjTQ1NXldYNYZ9G2VJfZ2Qd/MmTOVG8WYMWOoq6ujrKyMbdu2sWTJEl5//XV2797NlCnup1qz2UxycnuxK/na1OIiNj6+3bUJcN111wFdXzd9TUg5brvNSl15GedNv6TX+4qKT0SnN4SEZsmR7VsoO/oNs++5H4MpjDEzLyM7ZyLrX/0XW//zJke2b2HOPQ8wbOSodp+rLHILJiVlBe64DaYwd811D9venR1quDsiSRLhMbE0VlVit1g6VY7YLGYMAeS35R+jTq/vVGoYERGh/P2+++7joYceYv78+cqjt/x5QzeLoEIIfvXLh7l+3pXEpqRhattvUVERRo/cuFarxeyj3HHdunWsXLkSgEcffZR58+axevVq5l59Las/+hAhBL/+9a+5++67233u7889h4T3/L1Op1OeUDzre4UQPP/888yZM6fd9p+tXYvBYCA2JQ290UhUXBwR8QkkZ49Eq9ORkJ5BfHw8wuVCo9USlZhE3qFDfPHFF2zbto3w8HAuvfTSHtcS64xGBG1PZF7wZffGjRs7nWe7zYbRZGLHjh2sX7+ed955hxdeeIEvv/wSl8vFtm3bCOtwbWVkZFBScuZptbS0lIzM4eBy4LDb2jnu9PR06urq+PTTT5k1axa1tbW8++4uHeWlAAAgAElEQVS7REZGEhUVhRCC2267jT/96U8+/79CiLb8tpkwL/Ne5f+TVqvtch2mLwmpVEltaQkI0euKEnBXP8QMSxnwVInDZmPzv18jaXg2Yy+5XHk9PCaW7/38EeY//BtaG+pZ8ZuH2Pzv19otsFQWniA2JbXHrbzuSTiBizbJlR3drfSbIqPQaDov+DkdDhw2W0A1wzNmzODjjz/G7nTR2NDAmjXeNVEaGhpIT3d3yL3++uuAu7Z2am4uKz/6EHAvXtV5mbk5Z84c3lzxb8xWK+bGBk6dOkVlm8CRL6KiomhqalKO7XA4lCj1+PHjjBs3jgfvv4+ccRdwvLCIOXPm8Morr9DcNu5NPsasSy7hk8+/oLktavz444+VY2RnZ7N7924AJUqV7f3nP/+Jvc3ZHzlyhJaWFqV6wtNBSZLkdtJRUZitNgymMIzhEe4bbFQ0za1m4uLiCA8P5/Dhw2zf3vPpQBqNhoTEJKKiItmyZQtAu/SCL7s7IoRAuARmq5WGhgauvPJKFi9eTF6eW+1v9uzZvPDCC8r28uvz58/njTfeQAjB9u3biYmJIXO4u+/D2wLltGnTWLx4MbNmzWLmzJksWrSImTNnAnD55Zfz3nvvKddBbW0tJ0+212KZMWMGH3/0EWaLBbvL5fPa9MTzugkGIRVx91VFiUxcahq1fTTHsKfs/fRjGioruP43T3tdjPzWlGlknj+OjW8uY8eH73F0xzZm33M/GeeNpbLohCJg3xMCbXuXcTmdCJfotpJFo9EQFhVNa2NDW/eb+3KSh/4GUjM8ZcoU5s+fz8WzLiF12DAmTZxITEzn6OaJJ57ghhtuID09nalTp1JYWIjdauWh+37G/Y/8mkmTJnHJJZcwfHjnBq7Zs2dz6NAhrrr+RlxOJzFxcaxYsQJtFx1wt99+O/fccw9hYWH84he/4Dvf+Y7y3uLFi9mwYQMSMGpENlfNn09YWBiHDh1i2rRpgHth9K233mLy5MlcO/97zLjs24w45xzFcQA8/PDD3Hjjjbz55pt8+9vfVl6/8847KSoqYtKkSQghSEpK4oMPPsBhtyNpNF6fZu666y7mzp1LamoqGzZsUF6/4ooreOmll8jJyeHcc89l6tSpXv+/N910Exs3bqS6upqMjAyefPJJfvzjH3faTm80svgvf+bee+8lPDy8XXTty+6OyBVfrRYLC69bgMViQQjB3//+dwCWLFnCvffeS05ODg6Hg1mzZvHSSy9x5ZVXsnbtWkaNGkV4eDivvvoqGq0WrU7ntSRw5syZrFu3jlGjRpGVlUVtba1y/seMGcPTTz/N7Nmzcblc6PV6XnzxRbKyzvigKVOmcMXs7/Kdq+Yz4pxzyM3N9XptenLZZZfx5z//mQkTJgRlcVJ5DPD1BzABO4B9wEHgye4+M3nyZNETNr65XPz9B1cLp8PRo8/73J+zb/YXKC0N9eL5228UK//0uF/bF+7bI5bee4dYtPAq8dm/lohFN84T21e92+Pjf/i3Z8QrD97j17YFBQXK362traLs2BFhaWnu9nN2m1WUHTsimmqqldcaKitE+YljwuVyBWRvU1OTsFks4vj+fWLSxIli9+7dfn2uua5GlB074vd147DZRNmxI6Kxuiog+3784x+Lbdu2dXq95lSpqCo52e3n68pPi8qThUIIIR5//HHx17/+NaDjCyGEy+USFSeOiYbKioA/25e0NNSLsmNHhN1m7fE+zM1NouzYEWEzm/vEpppTpaK6pLhP9tWRwkMHRc2pUtHS0iImT57s97XpC8/fmwywS3TjW+U//kTcVuDbQohmSZL0wBZJkj4RQvT5JNaakpPEp2X0SAPAG3GpaTgdDpqqq4MuMOSNbe+9jc1i5pJbfuTX9tk5E7lt0YtsefsN9n7mHlowrAcLkzIRsXGUHMgP+HOBNEXo9AaM4eG0NjUQEReHJGkCzm/L3HXXXRQUFNDc1MgtN9/MpEn+TRexW61o9Xq/rxutXo8xIgJzUyOR8fF+Kx0uW7as02tCCOxWC2GRUV4+0eG4Oj3WlpZedW86HQ5cLlentv7+Rq5Xt1us6PQ9qwBTegX6SLVRZ9Bjblu87Eu5VafDzkO/eoRjhUXY7HZuu+02v6/NYNGt4267EzS3/VPf9icofcPVJcWknzemz/anTA8vP93vjrvmVAn7Pl9LzuVXBCS2bjCF8e077ubcaTM5uuNrn3Ma/SEiJg5LSzMOe+fW9a4484PyL5MWHhNLXdlpLC0tGExhOGw2wqK6d2Qd+fe//w1A1cnCgPLjdqs14Ll/4dEx1LW0YGlp8cvp+sJptyNcLr9U/7T6tsoHh0NZVA0UhyJk1X+O+95772Xr1q3tXrv//vuZd9ks902rB981uG9CkkbTJ1K3+/fv5+Yf/ACX0+G+kUgSRqOR//3vf73et63VzD/+/hwJGcP79bx3hV+/TEmStMBuYBTwohCi92ejA06Hg7jUVNLOPb/P9qnIu5adIrufRwxtWvEqeqORi2/4QY8+n37emF7fxOQmnNaGOqIT/Z/nJ6sC+huJGsLC0er1tDbUK5UGelPPtRy0eoPfSnoupxOn3R7wbFLZZnNjQ68ct6II6KPV3ROdR2dooJ2wZ45nRZKkHke5PeHFF1/0+nrtqdJeKQXKC+B9ER2PGzeOnTv+R93pU8SlpvepNrfV3IJGp+11b0lf4tcvUwjhFEJMADKACyVJ6iRKIEnSXZIk7ZIkaVdVVeBTl7U6HTf87o9MnHNVwJ/1RURcPHqjifp+FpsqPpDPid07uPCaG/3WrQ4G8lzBQBco3T8o/9etJUkiPDoGu8VCS0M9kkbTq8jE3R5u8yulYLe1SbkGeDy3zdHYzGYctp63b59RBOz+R90XKoF2mxWtwTCgAxlk9EYjjjbVwZ7gtDt6pQrYEflm1htp4I6ItjJAox9t7v1JQN++EKIe2Ahc4eW9pUKIXCFEblJSaLSYS5JEbEpqv9Zyu1xONr65jOikZCZfeXW/HdcbPR0a3F0NtzfCoqKRNJK7ptvUWSEvELR6Ay6nyy/pUFmlric5X1NUNJIk0drYMyEukBUBjX79fzVaHZIk4XT0zLEIIXD0IC0ULHRtcqo9UQoUfpacBoJGq0Wj1eDsQ80Sh9WKy+nEEBbR/cb9SLeOW5KkJEmSYtv+HgZ8BzgcbMP6CrfYVP857oJNG6gqOsGMm24b8Eer8DaBnJa6Wr8/I+sjB/oor9FqCYt0pyt6q/msM7RFpn7IdNqtVrQ6HVpt4JGbVqvDGBGJuamxRyL8LpcLh83m91QbeVhETyNul8OBy+kMmTyrwRQGElham7vfuAPC5UK4XAE92XWH+/wa+jTitioyrn03qq0v8CfiTgU2SJKUD+wEPhdCrA6uWX1HbGoaDZUVQZ9+Au7H5q3vvEHKqNGcd/GsoB+vO8Kj2xx3AKkSWR+5J5FQeEwsOoMBY3hkwJ/1JJCUgsNm7VWFRXh0jFv+1g99cW/HFkL4FtrygnvKT88ct5wWGuiKEhmtTocxLBxLc3PA6RKlcqmP54Dq9IY+VQm0tbaiNxp7FBgEk24dtxAiXwgxUQiRI4S4QAjxh/4wrK+IS0nD5XTSUFUR9GPtWr2K5rpaLr31zpDIh2l1OneDTABt770p0dIZDCRmZvX6SUNesOoucnK5nG0Rb2dH1lGP2xd6kwlTRATNtbVef/Bd6XHbLd5HlXWFTtfzYRFKWqiLVIkvPe6uCESPuyOmyGicdjtHv/kmID3uMwFC4A5RdNDj3rNnj/KezmDA5XQqgdrGjRsV7ZOOdHduhMuF3Wrp1bi3YDHwKxxBJrZNbCrYC5TNtTXs+Og9vnXRxX1a0thbIgLsnpTzr32ZewwUXymFjjoQDqvbVr2ha8f59ddfd3msqMRkJI1EQ1VFJ4falR633Wpxp2kCcD5ySWBPngDtVu8TfjzxpsftqSndHd3pcXfEGBGBpNFgbQksXdKTAQoyHfW4f/KTnyjvaQNYoOzu3Nht7gXyUElNeRJa8X8Q8Jz43vOZyt2z9d23cDmczPrBHUE8SuAE2vbutDuI3vUcmk2dZVF7Rco4mPvnLjd56qmnWLFiBZmZmURHRpAzdiwbtmz1qccdGxPDkr/8iaSsEdTU1HDTTTdRVVXFhRde6FWPG/CqlV1SWsoVc65k8oTx7MnPJzNzuF963B/99z/89g9PKU8ImzZtIioqyusxAJ555hlef/11UpISSU3PYMqFFwakx/3D7y/k/93544D1uGVN6WuuuYaSkpJe63HLyHrcRr2eKZPOlNv6o8edv28f48aM4b22VvhHH32Ujz76qMd63GVlZaSmpirrIw6bTZFcaGxs5Nprr+Wbb75h1qxZ/OMf/0Cj0XR7biytLTzwq0c4ePgbJI2GH/3oRzz44IN+nZtgc9Y77vCYWAxhYUGdP1lZdIIDG79g8pVXE5uSGrTj9ISI2DhOHS7ze3uH3Y7eTw3tvkTWPN67dy8Oh4MJ48dzwXnnAWf0uAHq6urYvn07kiTx/HN/4x/LlvPPi6bx5JNPMmPGDH7/+9+zZs0ali5d2ukY69atU7SyhRDMnz+fTZs2MXz4cI4dP87SF5/n3JEj+dmvHmXlypXccsstvPDCC4pTBdizZw/jx4/H5XTy4ktL+fuiRXznirk0NzdjMpl8HiMiIoJ33nmHXTt3UFFYyBXXLWDKhRd2eU48da1bW1qYNnUqc+e5H/v37t3LwYMHSUtLY/r06WzdupX777+f5557jg0bNpCYmNhpf6+88grx8fGYzWamTJnCggULeiTrKnPHHXfw/PPPM3VKLg8+8ICiPeJpt9VqZfr06cyePbud3WESXDF/Plu3bmXMmDGsWrWKw4cPI0mSkr6Q9bhnzJhBcXExc+bM4dChQ171uE+dOkVqaqpyE/V8WtuxYwcFBQVkZWVxxRVX8P7773P99dd3e24O7N5FeWUl+w8caGdXKHDWO253SWDwKkuEEHz15nJMEZFMve77QTlGb4iIdU9797cN2OmwY535O8KGpfSDdWeQNY9lCc8rr3RrXgshfOpxm1tbyWoTk9q0aRPvv/8+gE89bl9a2bIe9/RLv01N6UnGjB6tCP93RNbjtlutTJk8icd+93sKjhzluuuuIyMjw+cxmpqauPbaa4mKjqE1OpK5V8zxuv+O9sq61sLloq6+nqLiYiKiojrrWhcVtZtC440lS5b0iR43uJUS69v0uIUQ3LDgOjZs3tLJbnnbjnrc1SUnGXfBBRQVFTF16tRe63HL17ZSWeKxXnHhhRcycqRbOuKmm25iy5YtnRy3t3OTlppCcUmpu0t03jzl5hMKnPU5bnCnS4JVy124dxfFB/YxbcH3MUX2rpoiGETExOKw25Tp1F0hhKtXXX29oeOPUVZSFMLVSY/7Zz/7Gfv27ePZPzyJzSOX2d2NSbRpZefl5ZGXl8exY8cU5Tuj0YhWpyMqIQmEC7MXGVJwO6XZs2fjsFq47yd38/KyZZjNZqZOncrhw4e7PIYkSW7HotO3m4XanR53Xl4eW7/ayI6NXzJ33pWKvTL+6EBv3LhR0ePet28fEydO7LEet2ybp7M0hYUjXC63sqSH3Xl5eRQWFipOz2g0KjXcOp0eh8OBTqdjx44dLFiwgA8++ECZ5ynrccv7OXXqFFFRUV71uNPS0s6cT4O+XY6743XR8d/ezk1rSwtR4eFs27yJSy+9lBdffFGZwBMKDA3HnZpGY2UljdWBd3R2ReHeXXy+9HniUtMYP/vKPt13XxHIJBxlpX8AHLesx22xWGhububTzz4D3CPUPJH1uB02K++uWqW05c+aNUvRhO5Kj9ubVrYnpsgotHoDNnMrDrvNpx63zWqh5HQ548eP55FHHiE3N5fDhw/71uOeNYtVq1ZhNptptVr55LN1yjH90eN2WK2cLCnFbO7a2frSgW5oaOgzPW6A2NhYYmJiFD3ulW364paW5m71uGXZYHmsXXNzc6/1uFNTz6QodXoDTrtduRnu2LGDwsJCXC4X//nPfzo9mXg7Nw67jZraWrQGAwsWLOCpp55qV70y0Jz1qRKAkZMuZNfHq3jtoZ8wdcH3mTzv6l5VTbTU17Hh9Zf55utNxKdncuV9Dw9oFUZXeA4N7m4Ac29W+nuLrMc9fvx4srKyyM3NJTo6CjpE4rIed2rKMMaPHUtZlbuM7fHHH+emm27yS4+7o1a2px63JEmYIiNobKinsaqS2267rZMetxACh8XCstde4457foJWq2XMmDHMnTsXo9Ho9RiTJk1i4cKFTJgwgfTUVC6akqsc0x89bofNRmJiIqu7mRzeX3rcAK+++qoyLHj27NlIkoSlqalbPW55srt8021qauLqq6/ulR63J3I5qpznnjZtGo8++ij79+9n1qxZXHvtte2293ZunDYb5RUV3Hr3T5QbQFdTcvodf/VfA/nTUz3uYFJfUS4++OtTYtGN88Tyn98tivbtDXgfLpdL5H/5mXjhjoXi7z+4Wnz9338Lu80WBGv7jqriIrHoxnni0NavutyuoKBAtNTXibJjR4TDbu8n69rT1NQkhBCK5vH6tWtEzelSr9vWV5SLisLjAWt++4usN91SX6e8Jutx261uDfKWhvoe7bu5rlb84r6fiWf/8he/tnc6HKLs2BHRXFfbo+P1F021NW0a3V3/JsxNjW4dboslKHbYLBZRduyIaG1q7PE+ak+Xiqrioj60qj39ocd9VhCTPIyrH/4tJ/buZMOrS3nvmd8yeuoMLrn1x0Qndq+tUnv6FF+8/AIlBftJP28s373rZySkZ3b7uYFGTpW0+pUqsSsjsAYCWY/bYrEomse+1OfsNqvfGiE9ISwqGmtLM021NRjCI9Dp9Yoet7nJrW0SSMekJ3Iqyt9abrsfjTehgCkyiubaGizNTYrAmTfOPNkFx/0oKow97KAUQmC3WjGGh5Y+iSdDxnHLjJw4heFjx7Pr4/f536p3ObF3J9MW3OQzfeJ02Nn50ftsf/8ddAYD373rPsZd9t2QUGfzB1NEJBqtzq8ct8Mhy7kOTNenrMctIzsBl8vVrulEuFw4bTaMATSWBIokSUQnJlNdWkxjVQVxqenKeZEVAXsqrarV6Xn4gfuJ8bNyx2Hrfw1uT7zpcT/wwAPccUf7ngWdXo/BFIaluYmI2Dif15HT7mgThOq7AGH//v3ceuutyr8ddhsmo4ldPchLO+12tyZMD2/M/cGQc9zgzoFNXfB9zp95GRvfeJnN/36NAxu/4PI77iErZ4Ky3ekjh/l86fNUl5xk9LSZfPv2u5QIdrAgaTSEx8b6pRDYE1XAYKL1yFVqPJyWQ+5o66ZjstfH1+uJSkiksaoSc1Mj4W0TvgNRBPS1X/Bf3jXQCT99jS89bm+YoqJorKrEYbP6lAIIVDbYH8aNG6csXgLUlZ1WcumBomisByBl0N8MScctI6dPCvfu4stX/+VOn1w0nYtvvJm8dWvIW7eWqPhErvnV7zhn8kUDbW6PiYyNo6W+e4VAp92OMYR0GTwHD3hGm/0pthQWFY2luZmmmmq3JrNWi8Nm65XOukajQaPTBeS4Q0XKtTtMEZE0VVdhaWrqwnE7gj4IQmdwVwaJHowxs1vbnqhCaHBCR4a045YZMTGX28bmuNMnH/yXI//bCpLEpCu+x/SFt4SkyEwghMfG0dRNKaTL5UQIXWhF3D50J+xWizLVO9hIkkR0UjI1pcU0VFUSGR8fsCKgN2Sxqe7o6YSfgUKj1WIMj8Dc0kRkQmInpymEcAcIfTihxhs6veFMvXiANwm7JbjrJ32B6rjb8Eyf5K1bw+ip00kdde5Am9UnRMTGUX7sSJfbyAtloeS4NRoNWi+RqcNqRWfovx+WTq8nKj6RxupKmqrdpWG9fYzW6vV+NUU5ejjhZyAxRUZhaWnGZm7ttMAnN+gEu+RU66FZEojjFi4XDpt1QCdX+YPquDsQkzzM76nsg4WI2DjMjY24XE6lI7Ejwul2SKFWj+7Wrz4TcQvh6nWqoieERUdjaWnCZjYHrAjoDa1ej7PJ0WnhtSODpaLEE2N4OBqtBktzUyfH3V+9ArKzdgY4VOGMImDo5rdhiHRODnUiYuIQwoW5ixFdLqfcNRla93K5C07G4afUpr963N0hy7q6UybDkDQaPvzkE5555pke7xP8X6B0yBN+/LxR9Lcet0xRUZHyGUmjwRgRiaWlpdNkIXnBsDfX2eHDh5k2bRpGo5FFixZ53Uaj1aLR6XD4MUXJE8/hz3feeWc7rRRv3H777e26XWWKioo6VUn1JarjHgL40/bucjoDmuzeX2j1ekUY3+FwnIlAA0gddKXH3R2eetw6vZ6E9Aw2bd2m6Gn0FF1bxClHoL6wBzjhp7/1uH0RFhmNcLmwdtB8kW9Uml5E3PHx8SxZsoSHH364y+10HZ7W/MFukTXW3XX7Y8b0TFs/2I47tMIrlaAQ7o/jdrmUx9e/7PgLh2v7dqzoefHn8ciFj3S5jaced2JiIpMnT+ajDz9kwgVjyTtwkKuvuYaMYcn8ZdHfEJJEQkICK1asYNiwYb3S4y4qKmLu3LnMmDGDr7/+mvT09C71uPfl5zNp0iS++uorHnjgAQC/9bjfeOMN5f937ohsfvXIIz71uO12O7//w1P8b9du7E6nV13rUNDjDg8Pb6f/4XQ6eex3v2P95+uw2e3c/8DPFbt/+5vHiIuO4WhhoWK3JEkB6XEnJyeTnJzMmjVrurRPpzfw17/9jcTUNEUidt++fXz55ZesX7+eV199lbfeeot169bx+OOPY7VayUhN4R9LlgC0+16WL1/OX/7yF9LS0vjWt76F0WhUdFQ2bdrEc889R3l5Oc8++yzXX389jz76KIcOHWLChAncdtttfa7jHVrhlUpQ8DfiHghVQBlPPe7333+fXbt2Ae7H7sbGJj5bu4Zf/OIX5E6cwOdrPmbv3r18//vf59lnnwVQ9Lj37t3L/PnzlR+7J55a2Xl5eezevZtNmzYBcPToUe69914OHjxIbGwsK1eu5Prrryc3N5cVK1aQl5dHWFgYe/fuZfz48UiSxKJFi3jxxRfJy8tj8+bNhIWF+TzG7t27eeedd9r9/zrqRnfk5aX/Iioqiq1bNrNz505efvllRW527969LF68mIKCAk6cOKHocaelpbFhw4Z2OiUyr7zyCrt372bXrl0sWbKEmpqaXn1nd9xxB0uWLGHbtm3tXl++fDmxsbFs+vJL1q58j5eXLlXszt9/gD/+4cl2dtfW1rJq1SoOHjxIfn4+v/3tb4Ezetw7d+5k5cqVAavz6QwGpubmsnnzZsB9jTU3N2O329myZQszZ86kurqap59+mi+++IKdO3eQM3YsLy1f3m4/p0+f5qmnnmL79u18/vnnHD7cPqgpKytjy5YtrF69mkcffRSAP//5z8ycOZO8vLygDF9QI+4hQERM10ODra2t7onbbXnH7iLjYNBRj/t73/se4I5kr77qShx2d267pLiY//ez+6iqrsFmszFihHuuUV/ocU+Y4G6+mjx5ss+oU9bjBpg+fToPPfQQN998s9963OFtZXDz589H0mhxdJEq+Xzd5+Tn5/PZ+i9BkrzqWsPA63ED3HrrrXzyySfAGT3u//73vzjtNppbWhW7J47PIXN4JhqNRrE7UD3uqKgov2zU6g3kXDCW3bt309TUhNFoZNKkSezatYvNmzezZMkStm/fTkFBAdOnT3cPjW5tZfr06e32s2PHDi655BLi491t/DfccANHjpyp0rrmmmvQaDSMGTOGiorgz7YF1XEPCfQmE4awMJ96JfUV7gk5Wt3ANRx4pjY6EhUVjdNmx2Gz8dgTf+DBhx7ihoULlZSBjL963HfffXe714uKijrpW5vNZq/7WLduHStXrgTc47bmzZvH2rVrmTp1Kl988YXPYyxevLiTfRqtxq1/7kOP2+l08scnHufGH97W7rMbN27slR53eHg4l156aZ/pcXt77/nnn2fOnDnUlBYDEgkZmWzYsAGDXq+k5GS7ZT3u9evX88477/DCCy/w5ZdfKnrc8s08UHQGA3q9nuGZmbz66qtcfPHF5OTksGHDBo4fP87555/P8ePH+e53v8vbb79Nc20NzXW1JI84p9P/pys8v4vutu0r1FTJEKGrocENsuMewFRJRz1uz/ylRqfDYbfjsFlpampieFYWAK+//rqyTV/pcXfElx43wPHjxxk3blzAetxNTU18/PHHaLRanA47WVlZXvW4L505gzfefkdxyh11rbuz15Ng63HL5x7a64ibIqM4fPgQDXV1uFzeewUC1eP2F41Wi6TRcPHUi1i0aBGzZs1i5syZvPTSS0yYMEGZWbl161aOHTuG3WrB5nRx7Nixdvu58MIL+eqrr6irq8PhcCg37q7w9T30FWrEPUQIj4mjpcFXxF0OcUkDWgroTY87JsatDeJuwrFht1h4+Of3c9MPfkB6ejpTp05Vcqd9pcfdkdtvv72THrfM4sWL2bBhQ8B63FlZWcycOdNdUy/gwZ8/wA9uvqWdHrdwubhpwXWcLi/3qWvtjYHS454z58woto563DGRkbz7ztu4HG2Ou0NpY6B63OXl5eTm5tLY2IhGo1Fy/dHR7btLJUlCZzBw4eTJLPr7YqZNm0ZERAQmk4mZM2cCkJSUxGuvvcZNN91ES1MTkkbDn/78Z0aPHq3sJz09nccee4yLLrqItLQ0xowZo1ybvsjJyUGn0zF+/Hhuv/32vs9z+6v/GsifUNTjHup89NyfxPKf3+31vc/+tUT8b/OmfraoMx31uHfv3u3+d5s2dmXRCVFTWjJg9sl63H3B448/Lv70xz+KsmNHhKWludP7NrNZlB07Isxt52QwU3O6VFSeLBStjQ1uvW6rtd+OXV9RLiqKTnS7XXca6/K1abfbxVVXXSXef//9XtnVWz1uNcataZUAABykSURBVFUyRIiIjaOlzrvQVENF2YApz3ly1113MWHCBCZNmsSCBQuYNGkS4CE25XD0i7CUL5YtW+YzUu0JmrbRXd4qS/pTSCvYhEVG4bTbsTS7UweaftCYkdEZ9Lgcjm61z7tTBHziiSeYMGECF1xwASNGjOCaa67pc1sDQU2VDBEiYuOwmVvbJEnbX5z1FeVdtl33F74aFrQeWhODSbOjK5544gmEEFQWHvfquB1WKxqtpl+EtLrDXz1uXxgjIpE0lVhbW9HotEG51mpqarj88ss7vb7mow/R4hYqM2h9L3J2pwjoq0NzoBj4q0KlXwhv655rbagnJvmMgL/TYaepujokIm5fyItMwuU6KyJQGUmS3FosXkoC7VYrOoMpJBTqAtHj9oZGo8EYHomluSloGiUJCQleFy8dNhvVJSdx2uxg6sJxDwJFQE8GPsxS6RciY901qB0rSxoqKxHChUYbuvdwSZLQtU3mCbaOc3+j1XeWdxVCtA0iOHtuUmFttdf9LWImT3TqqvVdVgQMdWEpT1THPUTw1fYulwJKIRxxAxjDIzBFRg2aiMhfZF1u4VH/KwtpnU1PF4awcHQGA4Yuot5goDzVdOG47TZrn2is9yehG2ap9Cln2t7bK8XJzTehnCoBiIzvWYdfqKPV6xFC4HI6lGhUWSgbRFKu3SFJEomZWQNybJ3BoIiTecNukTXPB4/jViPuIUJ4dAxIUqeIu76iHJ3RGBKLk0ORM/KuZzofHTYrkkYTUkMtBjOyNLDoIDErY7da+kRjvT9Rf61DBI1WS3h0TKe294bKcmKT/Zs2Ppjoaz1uT95+++1e63HLeNPltlt7vlAWCnrcwcYfPW5Puy6a4W62cXRYS5D1tr1VWoU6quMeQkTExHbqnqwvLyNmWOoAWRQY3elx+KKv9LhlPv30017rcctodXqQwOFw52CFEMpotp4QKnrcwcRfPW6Fthtgx2k4y5Yt49xzR7uHUQ+i/DaoOe4hRXgHvRIhBA2VFWSPn9Ruu/I//hHrob7V4zaefx4pjz3W5Tbe9LhXr17NxRdfzNatW5k/fz6jR4/m6aefxmazDZged15eXq/1uJOSkpg8eTIPP/wwC26+laefeILL5lxBRdlppsy6lCOHCnA6nTz66KNs3LgRq9U6qPS4A7E7WHrcnvbc/8tfUfDNEc47/3zeeOMNRWjrmaee4py0FB785a/Ys3cvZrOZ66+/XvnOvNkVCqiOewgRERtH7elS5d8tdbU4bFZiQyDi9tTjdjgcTJo0icmTJwNQX1/PV199BUBdXR3bt29HkiSWLVvGs88+y9/+9jdFj/v3v/89a9asYenSpZ2O4amVLYRg/vz5bNq0ieHDh3P06FHefvttXn75ZW688UZWrlzJLbfcwgsvvKCI6QPs2bOnkx739OnTaW5uxmQy+TxGRESEosfd8f8nSZIy0stuc0eFOqOR5cuXExMTw86dO7FarUyfPp3Zs2cDbj3ugwcPkpaWxvTp0xU97ueee44NGzaQmJjY6f//yiuvEB8fj9lsZsqUKSxYsKDHsq7g1uN+/vnnueSSS/jlL3+pvB6o3WPGjGHVqlUcPnwYSZKU1I6sxz1jxgyKi4uZM2cOhw4dCtjOb775hkXPPM2MGdN56Ne/4R//+IcSrTttVpDgj3/6E4mJiTidTi6//HLy8/PJyMjwalco0K3jliQpE3gDSAFcwFIhxP8F2zCVviciNo7W+jpFklOuKIkdloKniGl3kXEw8KXHDbBw4ULl76WlpSxcuJCysrJBr8ctI0kaZeanUx7NpjcoutayYuBg0+P21+5g6XHLZGZmMv3ii3HY7Nxyyy3t0ix2uw2dwch7773H0qVLcTgclJWVUVBQwJgxY7zaFQr4k+N2AL8QQpwPTAXulSSpZ4PYVAaUiNg4nA6HMgewvqIcgJiUgY+4PVMbHYmIODMp/L777uNnP/sZ+/fv51//+lc7TWl/9bjz8vLIy8vj2LFjivKdv/rW69atU6LHRx99lGXLlmE2m5k6dSqHDx/u8hi+7NPp9Tjb9DSampqQJAlJkhRda3lfhYWFyrF7o8e9b98+Jk6cGHQ9bn/tlvW4FyxYwAcffKCsH8h63PJ+Tp06FbDTBlklUI+zbRiHp91Oq43T5RUsWrSI9evXk5+fz7x587BYLD7tCgW6ddxCiDIhxJ62vzcBh4D0YBum0vd0bMJpqChD0miITkweSLOArvW4PWloaCA93X35DXY9bpns7CzyDxzEYbfzwYcfKo7FU9caBqcetz92B0uPW6a4uJhde/MQQvDvf/9beTIRQuASLlqtViIiIoiJiaGiokJ5cvBlVygQUI5bkqRsYCLwv2AYoxJcImJkx11LQkYm9RXlRCcmhUT9ald63J488cQT3HDDDWeFHrfMQw/9goU33sj7q9cwbUoutAWEHXWtB6sed3d2B0uPW+b888/n3//5D1s3b+Hcc8/lJz/5CYBS1z15yhQmTpzI2LFjGTlypDK6zJddIYG/+q9AJLAbuM7H+3cBu4Bdw4cP741UrUqQqC4pFotunCcKNm8QQgjx1mMPinef+o0Qwrs+cH/jS487VOhrPe6//vWvQgghnE6nKDt2RFQVF4myY0eEzWLpk2OonMHpcIiyY0dEc12t8lpDZYUoP3FMuFyufrent3rcfoVakiTpgZXACiHE+z5uAEuBpQC5ubn9M3hNJSA6Tnuvryhn9EU9b07pa+666y4KCgqwWCzcdtttih53qLBs2bKg7Fejccu3Omw2JR+r0rdotFo0Wi0O25labnfjzeBRBPTEn6oSCVgOHBJCPBd8k1SChTEiAq1OR0t9HdbWFixNje0kXgcaX3rcZyOeQ47B3YjjdDjQGQxIUmj1xfVWj7s/8KXHvX79emVNQmcwKGJTLpcLh81GeIz/zUmhhD8R93TgVmC/JElydv4xIcTa4JmlEgwkSSK8rSSwvrytFDAEKkpU2lrfLeaQVATsrR53f+BLj9sTnd6ApaVJkc0Vg0wR0JNuHbcQYgvKconKYCcyNp6WhnqlFDAUmm9UzmiWDDbNjMGE1mDA1ejC5XRit3Q9qizUCa1nMpWgI7e9ezbfqAw88siss2l4QqghD+Fw2m2DUhHQE9VxDzEiYmNpqa+joaKMsOgYDGHhA22SCu5BEfHpGYM2AhwMyIu+DpvdrcA4SNMkoDruIUdEbBzmpkZqT59So+0QQpKkfp8OM9TQaHVIGg02S6tbEXAQ3yRVxz3EiIiNAyGoOHHsrM5vDxY97r5mKOhxr1ixgpycHHJycrj44ovZt2+fX5+TZ5bKkg/dOW5Zr7srbr/9dkWTxZOioqKgVkmpjnuIIbe9O2zWQaPDLXM26nH3NUNBj3vEiBF89dVX5Ofn87vf/c6rPK0vdAb3qDgkuq3gWbZsGWPG9EyWKdiOe3Bm5lV6jNz2Dr4XJje/e4TqkuY+PW5iZiQzbxzd5TZDVY/70ksvVaRjq6uryc3NpaioSNXj9qHH7fkENXXqVEpLSzvZJvPss89iMv3/9u4/Oqr6zOP4+wuETEgokIKWgIgobokliRA1EggEFSu6lFXK0V22gFBdfxy3tniWta5ShR7lIFup6+4GAREttucEttYfrSIpoZaUSUiMhojhl4iEgIFKfhES8t0/ZjIdkkwykIS5M/m8zpmTyc3NzPPMvXly7/fe+1wXjzzyCI8++ii7CvL59bq17NjpJvvJn/Haa6/x3nvv8dRTT1FfX8+VV17JunXriIuLO2e5rFmzhueee46EhARGjx5NdHS0r49Kbm4uK1eu5OjRoyxfvpxZs2axePFiSktLSUlJYe7cuTz66KNBfabB0hZ3D9N89SQ461RA/37cmzZtIj8/3/ez5n7cP/nJT5g4cSJ5eXkUFhZy9913s3z5cgBfP+7CwkJmzJjh+2P3598ru6ioiIKCAnJzcwEoKyvjoYceoqSkhIEDB5Kdnc2sWbNITU3l9ddfp6ioiJiYGAoLC1v14y4qKmL79u3ExMQEfI+CggJfP+5Nmzbhdrs7/Ez8+1q73W5Wr17t681SWFjo69Gxf/9+Xz/uhIQEcnJyzulT0mzt2rUUFBSQn5/PqlWrqKysvKBl1Wz+/PmsWrWKHTt2dCruEydOsHnzZkpKSiguLuaJJ54A/taP2+12k52dzcKFC9v8jJrb7LYlIyOD7du3A551rKa2joaGBty7ipg0aRJfffUVS5cuZcuWLezatYvU1FRWrjz3OsMjR47wzDPPkJeXx/vvv8+nn557k5Hy8nL+9Kc/8dZbb7F48WIAnn32WSZNmkRRUVGXF23QFneP089vtznQxTcdbRl3h57cjzsQ9eNuvx93Tk4Oa9as8XUobMv48eMpKCigqqqK6OhoUlJS+OjjT8hzu5kzbx55eXns3r3b11jqzJkzvgZhzXbu3MnkyZOJj48H4Pvf/z6fffaZ7+czZ86kV69eJCYmUlFRcUGf5/lS4e5hovpGE90vlqazZx11ua//0EZLLftx//jHP2bGjBm+Xe9mwfbjvv/++8+ZfvDgwVZ9ouvq6lr+OuApStnZ2YCnH/ftt9/OO++8Q1paGlu2bAn4Hr/4xS8C9+Pu04cmb6c6/x7Z1tvX2r/zHnh6a3emH3fzbbu6ux93sHE3973+4IMPeOONN3jxxRfZunWrrx938z9zf8XFxSxcuJB333233X8+UVFRjBw5knXr1jFhwgSSkpIoKi3lwMGDjBkzhn379nHLLbewcePGdnNtj39OHc3bVTRU0gP1GziIAZd+y1HNdXp2P+6RFBQUAJxzhoL6cbfdj/vQoUPceeedbNiwgauv7njvMCMjgxUrVpCRkcGkSZPIWr2alJQUjDGkpaXx4YcfsnfvXgBqa2vP2ZoGuP7669m2bRsnT56ksbHR94+7PYGWQ1fRFncPdPUN6Y67Qq8n9+NetGgRs2fPZsOGDUydOtU3Xf242+7H/fTTT1NZWcmDDz4IePZY/I+JtDRp0iSWLVvGjTfeSGxsLC6Xy/f5DxkyhFdeeYV77rmHeu9t45YuXXrOP4Rhw4bx+OOPc8MNN5CQkEBiYmKb66a/pKQk+vTpQ3JyMvPmzev6ce5g+7+ez2P8+PGdbVcrF5n6cXesu/pxi/M1r5sNDQ32jjvusJs2berU612UftwiF0NP7cctzrdkyRK2bNnC6dOnmTZtGjNnzgxpPMZ2w2B6amqqbW/XRZyntLSUMWPGhDoMcaBI6cftJG39vRljCqy1qcH8vra4RaRdkdKPO5LorBIRkTCjwi0iEmZUuEVEwowKt4hImFHhloikftwePb0fd3txBdNv26lUuCVsqB93x9SPO3id6bcdajodUFrJeSWLY5/v79LXvOTyUWTOa/8PTP241Y+7q/txg+cf/ty5cyksLOTqq6/m1Vdf9TXaav7cH3jgAdxuN3V1dcyaNcu3zNqKywlUuMUR/PtxNzY2Mm7cOMaPHw/8rR83wMmTJ8nLy8MYw8svv8zy5ct5/vnnff24n3zySd5++22ysrJavYd/r2xrLTNmzCA3N5cRI0ZQVlbGxo0bWb16NbNnzyY7O5s5c+bw4osv+v64AXbt2tWqH3d6ejrV1dW4XK6A7xEbG+vrx90yv0D8+1rX19eTnp7OtGnTAE9f65KSEhISEkhPT/f14165ciU5OTkMHjy41eutXbuW+Ph46urquO6667jrrrs6dXHK/Pnz+eUvf8nkyZN57LHHLjjuxMRENm/ezKeffooxxje009yPe+LEiRw6dIhbb72V0tLSVp9Re/24Afbs2cOaNWtIT0/n3nvv5aWXXmLRokXnzLNs2TLi4+M5e/YsN910E8XFxQwfPrzNuJxAhVta6WjLuDuoH3dr6sfd+X7cAJdddpmv3/acOXNYtWpVq8L9m9/8hqysLBobGykvL2f37t0kJia2GZcTqHCLI7TXekH9uNWPu6Vg+3FD6/Wi5fcHDhxgxYoVuN1uBg0axLx58zh9+nTAuJxAByfFEdSPW/24u6sf96FDh3y3V9u4cWOrPZNTp04RGxvLgAEDqKio8O05BIrLCbTFLY6gftzqx91d/bjHjBnD+vXruf/++xk9ejQPPPDAOT9PTk7m2muv5ZprrmHUqFG+YZVAcTmBugMK4IzugNXV1cTFxVFbW0tGRgZZWVmOau26cOFCFi5cGLDonY8lS5YQFxfXaqxVegZ1B5SIoX7cIsFR4RbH+NWvfhXqEC4a/4OqTqd+3M6jwi0+7Z0pID2X+nF3ra4YntZZJQKAy+WisrKyS1YqEWmbtZbKykpcLlenXkdb3ALA8OHDOXz4MMePHw91KCIRzeVy+S5CulAq3AJAVFSU7ypEEXE2DZWIiISZDgu3MWatMeaYMeaTixGQiIi0L5gt7lcAZzYfFhHpgTos3NbaXODERYhFRESC0GVj3MaY+4wx+caYfJ2ZICLSfbqscFtrs6y1qdba1CFDhnTVy4qISAs6q0REJMyocIuIhJlgTgfcCOwA/s4Yc9gY03ZzXhERuSg6vHLSWnvPxQgE4GxVFbaxkT5t3C9QREQ8HDNU0lRXR1nGZE6seyXUoYiIOJpjCnevmBhikpOpznHGzThFRJzKMYUboH/mFOrL9nLmiy9CHYqIiGM5qnDHZWYCUO13k1MRETmXowp33xEj6HvVlVSpcIuIBOSowg3QP3Mqte58zlZVhToUERFHclzhjsvMhMZGarZvD3UoIiKO5LjCHZOcRO9Bg6jaquESEZG2OK5wm969iZsyhercXGxDQ6jDERFxHMcVboC4zCk0nTpF7a7CUIciIuI4zizc6emYqCidFigi0gZHFu5esbH0S0ujKmcr1tpQhyMi4iiOLNwA/adm0vD5Ic4cOBDqUEREHMWxhTtuyhRAV1GKiLTkqMJ9dP/XfH28DoCooUOJThyj0wJFRFpwTOGur2vkty8UsfN3+33T+k/JpK6wkMaTJ0MYmYiIszimcEfH9GFs8lnK3BWcPFoDQNzUqdDURPW2bSGOTkTEORxTuKn7K9d++RC9TQPutw8C4LomkT6XXEJ1zh9DGpqIiJM4p3DHDCRm8gKSYn5HmbuCE+U1GGOIy8ykZvt2ms6cCXWEIiKO4JzCDZD2ICmX5tGn1xny3/acBhiXOYWm2lpqd7pDHJyIiDM4q3D37UfMLT/ybHXnV3DiSA2xaWkYl4vqrbqlmYgIOK1wAyTfQ8rle4jqVY/7rX30crmITU+n6o85uopSRAQnFu5evYmZvpixMW+xd9dxKo9U0z9zCo1HyqnfsyfU0YmIhJzzCjfAVTdzbeIJokw9+W9+5rmK0hhdRSkiglMLN+C6/ack9XuLvUUn+brehStpLFU6LVBExLmFm2+NJeX6PkSZOtz/t5v+mVM5XVxMw7FjoY5MRCSknFu4Add3/43kuN+z7+NqzoydCKCrKEWkx3NM4bbW8nz+8xQdK/rbxAHDSM4cSl9TQ3H+CaKGDaNaTadEpIdzTOH+uv5rcr7IYcEfFvD7g7/3TXdNfZikAVvZV9rA2fTp1OzYQVNdXQgjFREJLccU7oGugbx222t8Z/B3eGzbY7z88cue87ZdA0ienkhfU0OZHYU9fZqaHXmhDldEJGQcU7jBU7yzpmVx2xW38cKuF/jZjp/R0NSAa8Jckr75IZ8f/wY1Q0brtEAR6dEcVbgBontH8+ykZ/nh2B+SXZbNwx88TFVTPSl33UhfU8Ohb/+95yrKpqZQhyoiEhKOKtz/u20fefsrMRgeGfcIT094mp3lO/nBuz/gxFXXkTy0kPLeV/LXOhenS0pCHa6ISEg4pnBX1zfyP9v2cXdWHtP+M5f1fz7ITZfdwUs3v8TRmqP847v/RN87vk1fU8OBkdOpUtMpEemhgircxpjvGmP2GGP2GmMWd0cgcdF9+PPim1h+VxIxfXvz1JslpP38A97Mi+Op1JeI6hXFfaUrGDCimK8GJ/Plto+6IwwREcczHXXcM8b0Bj4DbgEOA27gHmvt7kC/k5qaavPz8zsVWPHhv/Ja3uf8tugI9Y1NJF1uqP/my1RWfcmCnU8QX7mXO1+YTdSwYZ16HxERJzDGFFhrU4OZN5gt7uuBvdba/dbaM8AbwPc6E2AwkoYPZPmsZHY+fjP/cUci1TX92FP4z5w5cwUlg//IV4OTOfjrN7s7DBERx+kTxDzDgC/8vj8M3NA94bQ2oF8UCyZewb3pI9mxr5JXdwznL4PWcE1FLVtLEtg27/WLFYqISLuMrWHB+vu6/X2CKdymjWmtxleMMfcB9wGMGDGik2G1EYQxTLhqMBOuGkzFqbFsXvoMfSriu/x9REQulDH1F+V9ginch4HL/L4fDhxpOZO1NgvIAs8Yd5dEF8Cl33DxL8uXdedbiIg4VjBj3G5gtDHmCmNMX+BuQIPLIiIh0uEWt7W20RjzMPAHoDew1lqrq19EREIkmKESrLXvAO90cywiIhIEx1w5KSIiwVHhFhEJMyrcIiJhRoVbRCTMqHCLiISZDptMXdCLGnMc+PwCf30w8FUXhuM0kZ4fRH6Oyi/8OTHHy621Q4KZsVsKd2cYY/KD7ZAVjiI9P4j8HJVf+Av3HDVUIiISZlS4RUTCjBMLd1aoA+hmkZ4fRH6Oyi/8hXWOjhvjFhGR9jlxi1tERNrhmMJ9MW5IfLEYYw4aYz42xhQZY/K90+KNMe8bY8q8Xwd5pxtjzCpv3sXGmHGhjb41Y8xaY8wxY8wnftPOOx9jzFzv/GXGmLmhyKUtAfJbYoz50rsMi4wx0/1+9u/e/PYYY271m+7IddgYc5kxJscYU2qMKTHG/Kt3eiQtw0A5RsxyPIe1NuQPPO1i9wGjgL7AR0BiqOPqRD4HgcEtpi0HFnufLwae8z6fDryL505DacBfQh1/G/lkAOOATy40HyAe2O/9Osj7fFCoc2snvyXAojbmTfSun9HAFd71treT12FgKDDO+7w/npt/J0bYMgyUY8QsR/+HU7a4Q3JD4ovse8B67/P1wEy/6a9ajzxgoDFmaCgCDMRamwucaDH5fPO5FXjfWnvCWnsSeB/4bvdH37EA+QXyPeANa229tfYAsBfP+uvYddhaW26t3eV9XgWU4rmXbCQtw0A5BhJ2y9GfUwp3Wzckbu9DdzoLvGeMKfDeixPgUmttOXhWMuAS7/Rwzf188wnHPB/2DhWsbR5GIMzzM8aMBK4F/kKELsMWOUIELkenFO6gbkgcRtKtteOA24CHjDEZ7cwbabkHyifc8vxv4EogBSgHnvdOD9v8jDFxQDbwI2vtqfZmbWNauOYYccsRnFO4g7ohcbiw1h7xfj0GbMaz+1XRPATi/XrMO3u45n6++YRVntbaCmvtWWttE7AazzKEMM3PGBOFp6C9bq3d5J0cUcuwrRwjbTk2c0rhjpgbEhtjYo0x/ZufA9OAT/Dk03wUfi7wW+/zN4EfeI/kpwFfN+++Otz55vMHYJoxZpB3d3Wad5ojtTjO8A94liF48rvbGBNtjLkCGA3sxMHrsDHGAGuAUmvtSr8fRcwyDJRjJC3Hc4T66GjzA8+R7M/wHNH9aajj6UQeo/Acif4IKGnOBfgm8AFQ5v0a751ugP/y5v0xkBrqHNrIaSOe3cwGPFskCy4kH+BePAeB9gLzQ51XB/lt8MZfjOcPd6jf/D/15rcHuM3p6zAwEc/ufjFQ5H1Mj7BlGCjHiFmO/g9dOSkiEmacMlQiIiJBUuEWEQkzKtwiImFGhVtEJMyocIuIhBkVbhGRMKPCLSISZlS4RUTCzP8D+xhIVzwW0+gAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "plot_gradients(bad_trial)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `VanishingGradient` rule provided by Tornasole alerts for this automatically." + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAD8CAYAAAC4nHJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAADJVJREFUeJzt3XuoXeWZx/Hvo7YS0iiRqo1pnNR6YQYD6RBk0EEzFkWHgvaPSkSGDBZTRGEKBUdEaEAGZOhlCkLhFEMTaG0LtRqhjBUpZBqGkESlps201RLt0ZC0KsZ4i5dn/jhLOKZnr73d95Pn+4Fw9l7Pujxs8jvv2metvd/ITCTVc9KkG5A0GYZfKsrwS0UZfqkowy8VZfilogy/VJThl4oy/FJRp4zzYBHh7YTSiGVm9LLeQCN/RFwTEb+LiGci4s5B9iVpvKLfe/sj4mTg98BVwCywG7gxM3/bso0jvzRi4xj5LwGeycw/ZuYx4EfAdQPsT9IYDRL+lcCf5j2fbZZ9SERsiog9EbFngGNJGrJB/uC30KnFX53WZ+YMMAOe9kvTZJCRfxZYNe/5p4EXB2tH0rgMEv7dwAUR8ZmI+DiwAdg+nLYkjVrfp/2Z+W5E3A48CpwMbMnM3wytM0kj1felvr4O5nt+aeTGcpOPpMXL8EtFGX6pKMMvFWX4paIMv1SU4ZeKMvxSUYZfKsrwS0UZfqkowy8VZfilogy/VJThl4oy/FJRhl8qyvBLRRl+qSjDLxVl+KWiDL9UlOGXijL8UlGGXyrK8EtFGX6pKMMvFWX4paL6nqIbICIOAK8B7wHvZua6YTQlafQGCn/jnzLzL0PYj6Qx8rRfKmrQ8Cfwi4jYGxGbhtGQpPEY9LT/ssx8MSLOAh6LiP/LzB3zV2h+KfiLQZoykZnD2VHEZuBoZn6jZZ3hHExSR5kZvazX92l/RCyNiGUfPAauBvb1uz9J4zXIaf/ZwM8i4oP9/DAz/3soXUkauaGd9vd0ME/7pZEb+Wm/pMXN8EtFGX6pKMMvFWX4paIMv1TUMD7Vpy5uuumm1vrNN9/cWr/nnnta6wcPHuxYe/PNN1u3ff7551vry5cvb62/8sorfW/fbVuNliO/VJThl4oy/FJRhl8qyvBLRRl+qSjDLxXlR3rHYOfOna31Sy+9dGTHPnLkSGt9165drfVzzz23td7tPoG27btt280tt9zSWn/uuecG2v9i5Ud6JbUy/FJRhl8qyvBLRRl+qSjDLxVl+KWi/Dz/GDzyyCOt9W7X+ffv399aX7lyZcdat+v877zzTmv9vPPOG6je5qKLLup7W4DbbruttX7HHXcMtP8TnSO/VJThl4oy/FJRhl8qyvBLRRl+qSjDLxXV9Tp/RGwBvgAczsyLm2VnAD8GVgMHgBsy0y9h7+DKK68caPtun1vv9n0B0+qNN95orS9ZsqS1ftpppw2znXJ6Gfm/D1xz3LI7gccz8wLg8ea5pEWka/gzcwfw8nGLrwO2No+3AtcPuS9JI9bve/6zM/MgQPPzrOG1JGkcRn5vf0RsAjaN+jiSPpp+R/5DEbECoPl5uNOKmTmTmesyc12fx5I0Av2GfzuwsXm8EXh4OO1IGpeu4Y+IB4D/BS6KiNmI+DJwL3BVRPwBuKp5LmkR6fqePzNv7FD6/JB7WbQuv/zy1voVV1zRWn/00Udb6/v27fvIPU2La6+9tmPt1FNPHWjfDz744EDbV+cdflJRhl8qyvBLRRl+qSjDLxVl+KWi/OruHp155pkda/fdd1/rthHtMybffffdrfVXX321tT7NzjnnnI61k04abOx54YUXBtq+Okd+qSjDLxVl+KWiDL9UlOGXijL8UlGGXyrK6/w9uvrqqzvW1qxZ07rt9u3bW+t79uzpq6fFoO3+iG6OHj3aWn/rrbf63rcc+aWyDL9UlOGXijL8UlGGXyrK8EtFGX6pKK/z9+ihhx7qWNu4cWPHGsDevXuH3c6isWHDhr633b17d2v92Wef7XvfcuSXyjL8UlGGXyrK8EtFGX6pKMMvFWX4paK6XuePiC3AF4DDmXlxs2wzcAvw52a1uzLz56Nqchq8/vrrHWvbtm0bYyd1vP3225Nu4YTWy8j/feCaBZZ/OzPXNv9O6OBLJ6Ku4c/MHcDLY+hF0hgN8p7/9oj4dURsiYjlQ+tI0lj0G/7vAp8F1gIHgW92WjEiNkXEnog4cb+oTlqE+gp/Zh7KzPcy833ge8AlLevOZOa6zFzXb5OShq+v8EfEinlPvwjsG047ksall0t9DwDrgU9GxCzwdWB9RKwFEjgAfGWEPUoaga7hz8wbF1h8/wh6kT5kZmZm0i2c0LzDTyrK8EtFGX6pKMMvFWX4paIMv1SU4ZeKMvxSUYZfKsrwS0UZfqkowy8VZfilogy/VJThl4oy/FJRhl8qyvBLRRl+qSjDLxVl+KWiDL9UVNev7pbanH766a31pUuXdqwdO3asdduXXnqpr57UG0d+qSjDLxVl+KWiDL9UlOGXijL8UlGGXyqq63X+iFgFbAM+BbwPzGTmdyLiDODHwGrgAHBDZr4yulY1jdasWdNaP//88zvWZmdnW7fdsWNHXz2pN72M/O8CX8vMvwX+AbgtIv4OuBN4PDMvAB5vnktaJLqGPzMPZuYTzePXgP3ASuA6YGuz2lbg+lE1KWn4PtJ7/ohYDXwO2AWcnZkHYe4XBHDWsJuTNDo939sfEZ8Afgp8NTOPRESv220CNvXXnqRR6Wnkj4iPMRf8H2Tmg83iQxGxoqmvAA4vtG1mzmTmusxcN4yGJQ1H1/DH3BB/P7A/M781r7Qd2Ng83gg8PPz2JI1KL6f9lwH/AjwdEU81y+4C7gV+EhFfBp4HvjSaFjXNbr311km3oD51DX9m/gro9Ab/88NtR9K4eIefVJThl4oy/FJRhl8qyvBLRRl+qSi/ulsDOeUU/wstVo78UlGGXyrK8EtFGX6pKMMvFWX4paIMv1SU4ZeKMvxSUYZfKsrwS0UZfqkowy8VZfilogy/VJQfxlarZcuWtdbXrl3b97537tzZ97YanCO/VJThl4oy/FJRhl8qyvBLRRl+qSjDLxXV9Tp/RKwCtgGfAt4HZjLzOxGxGbgF+HOz6l2Z+fNRNarJWLJkSWv9wgsv7HvfTz75ZN/banC93OTzLvC1zHwiIpYBeyPisab27cz8xujakzQqXcOfmQeBg83j1yJiP7By1I1JGq2P9J4/IlYDnwN2NYtuj4hfR8SWiFjeYZtNEbEnIvYM1Kmkoeo5/BHxCeCnwFcz8wjwXeCzwFrmzgy+udB2mTmTmesyc90Q+pU0JD2FPyI+xlzwf5CZDwJk5qHMfC8z3we+B1wyujYlDVvX8EdEAPcD+zPzW/OWr5i32heBfcNvT9KoRGa2rxDxj8D/AE8zd6kP4C7gRuZO+RM4AHyl+eNg277aD6ap020K7u3bt7fWV69e3bG2fv361m0PHz7cWtfCMjN6Wa+Xv/b/ClhoZ17TlxYx7/CTijL8UlGGXyrK8EtFGX6pKMMvFdX1Ov9QD+Z1fmnker3O78gvFWX4paIMv1SU4ZeKMvxSUYZfKsrwS0WNe4ruvwDPzXv+yWbZNJrW3qa1L7C3fg2zt7/pdcWx3uTzVweP2DOt3+03rb1Na19gb/2aVG+e9ktFGX6pqEmHf2bCx28zrb1Na19gb/2aSG8Tfc8vaXImPfJLmpCJhD8iromI30XEMxFx5yR66CQiDkTE0xHx1KSnGGumQTscEfvmLTsjIh6LiD80PxecJm1CvW2OiBea1+6piPjnCfW2KiJ+GRH7I+I3EfFvzfKJvnYtfU3kdRv7aX9EnAz8HrgKmAV2Azdm5m/H2kgHEXEAWJeZE78mHBGXA0eBbZl5cbPsP4GXM/Pe5hfn8sz89ynpbTNwdNIzNzcTyqyYP7M0cD3wr0zwtWvp6wYm8LpNYuS/BHgmM/+YmceAHwHXTaCPqZeZO4CXj1t8HbC1ebyVuf88Y9eht6mQmQcz84nm8WvABzNLT/S1a+lrIiYR/pXAn+Y9n2W6pvxO4BcRsTciNk26mQWc/cHMSM3Psybcz/G6ztw8TsfNLD01r10/M14P2yTCv9BXDE3TJYfLMvPvgWuB25rTW/Wmp5mbx2WBmaWnQr8zXg/bJMI/C6ya9/zTwIsT6GNBmfli8/Mw8DOmb/bhQx9Mktr8nJoJ7aZp5uaFZpZmCl67aZrxehLh3w1cEBGfiYiPAxuA9tkexyQiljZ/iCEilgJXM32zD28HNjaPNwIPT7CXD5mWmZs7zSzNhF+7aZvxeiI3+TSXMv4LOBnYkpn/MfYmFhAR5zE32sPcJx5/OMneIuIBYD1zn/o6BHwdeAj4CXAu8Dzwpcwc+x/eOvS2no84c/OIeus0s/QuJvjaDXPG66H04x1+Uk3e4ScVZfilogy/VJThl4oy/FJRhl8qyvBLRRl+qaj/ByIrn2iuQxcHAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "input_image = (bad_trial.tensor('sequential1_input_0').step(2700).value[83]*255).reshape(28,28)\n", + "plt.imshow(input_image, cmap=plt.get_cmap('gray'))\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYYAAAD8CAYAAABzTgP2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAFA9JREFUeJzt3W+MXXed3/H3xzZhO2y76xADqZ3xGK3bbrZqQ3vXbIugK/LPaFcxD4IaNIu8VdBopU27Ld12Q60qkqklKFVpH6Qts/xp2s5uCNlVsVBLGgJU+2RTjyEFnDSKCbEzJAsGBxZ1VqQm3z64x5s5s3fiGd87Pte+75d0de7vd35nzldX9nzmd/7ck6pCkqTztnRdgCRpvBgMkqQWg0GS1GIwSJJaDAZJUovBIElqMRgkSS0GgySpxWCQJLVs67qAi3HNNdfUzMxM12VI0mXl+PHj362qHRcad1kGw8zMDIuLi12XIUmXlSSn1jPOQ0mSpBaDQZLUYjBIkloMBklSi8EgSWoxGCbUwgLMzMCWLf3lwkLXFUkaF5fl5aoazsICzM3B8nK/fepUvw0wO9tdXZLGgzOGCXTo0MuhcN7ycr9fkgyGCXT69Mb6JU0Wg2ECTU9vrF/SZDEYJtCRIzA11e6bmur3S5LBMIFmZ2F+HnbvhqS/nJ/3xLOkPq9KmlCzswaBpMFGMmNIsj/Jk0lOJrl7wPr3JXk8yVeTPJJk94p1B5M81bwOjqIeSdLFGzoYkmwF7gXeAVwPvDvJ9auGfQXoVdVfAx4E/mWz7dXAPcCbgX3APUm2D1uTJOnijWLGsA84WVVPV9WLwP3AgZUDquqLVXX+yvk/BHY1728FHq6qs1X1AvAwsH8ENUmSLtIogmEn8OyK9lLTt5Y7gf9+kdtKkjbZKE4+Z0BfDRyY/ArQA/7ORWw7B8wBTHvBvSRtmlHMGJaA61a0dwHPrR6U5CbgEHBbVf1oI9sCVNV8VfWqqrdjxwUfWSpJukijCIZjwN4ke5JcBdwBHF05IMmbgI/SD4XvrFj1EHBLku3NSedbmj5JUkeGPpRUVeeS3EX/F/pW4BNVdSLJYWCxqo4CHwZ+Evh0EoDTVXVbVZ1N8gH64QJwuKrODluTJOnipWrgIf2x1uv1anFxsesyJOmykuR4VfUuNM6vxJAktRgMkqQWg0GS1GIwSJJaDAZJUovBIElqMRgkSS0GgySpxWCQJLUYDJKkFoNBktRiMEiSWgwGSVKLwSBJajEYJEktBoMkqWUkwZBkf5Ink5xMcveA9W9L8uUk55Lcvmrdj5M81ryOrt5WknRpDf1ozyRbgXuBm4El4FiSo1X1+Iphp4FfBX5zwI/4k6q6Ydg6JEmjMXQwAPuAk1X1NECS+4EDwJ8GQ1U906x7aQT7kyRtolEcStoJPLuivdT0rddPJFlM8odJ3jmCeiRJQxjFjCED+moD209X1XNJ3gh8IcnXquobf2YnyRwwBzA9PX1xlUqSLmgUM4Yl4LoV7V3Ac+vduKqea5ZPA18C3rTGuPmq6lVVb8eOHRdfrSTpFY0iGI4Be5PsSXIVcAewrquLkmxP8urm/TXAW1hxbkKSdOkNHQxVdQ64C3gIeAJ4oKpOJDmc5DaAJD+fZAl4F/DRJCeazX8WWEzyv4EvAh9cdTWTJOkSS9VGTgeMh16vV4uLi12XIUmXlSTHq6p3oXHe+SxJajEYJOkVLCzAzAxs2dJfLix0XdHmG8XlqpJ0RVpYgLk5WF7ut0+d6rcBZme7q2uzOWOQpDUcOvRyKJy3vNzvv5IZDJK0htOnN9Z/pTAYJGkNa33JwpX+5QsGgySt4cgRmJpq901N9fuvZAaDJK1hdhbm52H3bkj6y/n5K/vEM3hVkiS9otnZKz8IVnPGIElqMRgkSS0GgySpxWCQJLUYDJKkFoNBktRiMEhqmcRvE1Wb9zFI+lOT+m2iahvJjCHJ/iRPJjmZ5O4B69+W5MtJziW5fdW6g0meal4HR1GPpIszqd8mqrahgyHJVuBe4B3A9cC7k1y/athp4FeB31m17dXAPcCbgX3APUm2D1uTpIszqd8mqrZRzBj2ASer6umqehG4HziwckBVPVNVXwVeWrXtrcDDVXW2ql4AHgb2j6AmSRdhUr9NVG2jCIadwLMr2ktN30i3TTKXZDHJ4pkzZy6qUEmvbFK/TVRtowiGDOirUW9bVfNV1auq3o4dO9ZdnKT1m9RvE1XbKK5KWgKuW9HeBTy3gW1/cdW2XxpBTZIu0iR+m6jaRjFjOAbsTbInyVXAHcDRdW77EHBLku3NSedbmj5JUkeGDoaqOgfcRf8X+hPAA1V1IsnhJLcBJPn5JEvAu4CPJjnRbHsW+AD9cDkGHG76JEkdSdV6TweMj16vV4uLi12XIUmXlSTHq6p3oXF+JYYkqcVgkCS1GAySpBaDQZLUYjBIkloMBklSi8EgSWoxGCRJLQaDJKnFYJAktRgMkqQWg0GS1GIwSJJaDAZJUovBIElqMRgkSS0jCYYk+5M8meRkkrsHrH91kk816x9NMtP0zyT5kySPNa//MIp6JEkXb9uwPyDJVuBe4GZgCTiW5GhVPb5i2J3AC1X1M0nuAD4E/N1m3Teq6oZh65AkjcYoZgz7gJNV9XRVvQjcDxxYNeYAcF/z/kHgxiQZwb4lSSM2imDYCTy7or3U9A0cU1XngB8Ar23W7UnylST/M8lbR1DPWFtYgJkZ2LKlv1xY6LoiSWob+lASMOgv/1rnmOeB6ar6XpK/CfzXJD9XVX/8Z3aSzAFzANPT00OW3I2FBZibg+XlfvvUqX4bYHa2u7okaaVRzBiWgOtWtHcBz601Jsk24KeAs1X1o6r6HkBVHQe+AfylQTupqvmq6lVVb8eOHSMo+9I7dOjlUDhvebnfL0njYhTBcAzYm2RPkquAO4Cjq8YcBQ42728HvlBVlWRHc/KaJG8E9gJPj6CmsXT69Mb6JakLQx9KqqpzSe4CHgK2Ap+oqhNJDgOLVXUU+Djwn5OcBM7SDw+AtwGHk5wDfgz8WlWdHbamcTU93T98NKhfksZFqlafDhh/vV6vFhcXuy5jw1afYwCYmoL5ec8xSNp8SY5XVe9C47zz+RKane2HwO7dkPSXhoKkcTOKq5K0AbOzBoGk8eaMQZLUYjBIkloMBklSi8EgSWoxGCRJLQaDJKnFYJAktRgMkqQWg0GS1GIwSJJaDAZJUovBIElqMRgkSS0GgySpZSTBkGR/kieTnExy94D1r07yqWb9o0lmVqx7f9P/ZJJbR1GPJOniDR0MzTOb7wXeAVwPvDvJ9auG3Qm8UFU/A3wE+FCz7fX0H/P5c8B+4N+dfwa0JKkbo5gx7ANOVtXTVfUicD9wYNWYA8B9zfsHgRuTpOm/v6p+VFXfBE42P08TYGEBZmZgy5b+cmGh64okwWiCYSfw7Ir2UtM3cExVnQN+ALx2ndvqCnT++denTkFVfzk3ZzhI42AUwZABfbXOMevZtv8Dkrkki0kWz5w5s8ESNW4OHYLl5Xbf8nK/X1K3RhEMS8B1K9q7gOfWGpNkG/BTwNl1bgtAVc1XVa+qejt27BhB2erS6dMb69fk8VBjd0YRDMeAvUn2JLmK/snko6vGHAUONu9vB75QVdX039FctbQH2Av8rxHUpDE3Pb2xfk0WDzV2a+hgaM4Z3AU8BDwBPFBVJ5IcTnJbM+zjwGuTnATeB9zdbHsCeAB4HPgc8OtV9eNha9L4O3IEpqbafVNT/X7JQ43dSv8P98tLr9erxcXFrsvQkBYW+v/RT5/uzxSOHIHZ2a6r0jjYsqU/U1gtgZdeuvT1XCmSHK+q3oXGbbsUxUiDzM4aBBpserp/+GhQvzafX4khaex4qLFbBoOksTM7C/PzsHt3//DR7t39tjPMS8NDSZLGkocau+OMQZLUYjBIkloMBklSi8EgSWoxGCRJLQaDJKnFYJAktRgMkqQWg0GS1GIwSJJaDAZJUovBIElqMRgkSS1DBUOSq5M8nOSpZrl9jXEHmzFPJTm4ov9LSZ5M8ljzet0w9UiShjfsjOFu4JGq2gs80rRbklwN3AO8GdgH3LMqQGar6obm9Z0h65EkDWnYYDgA3Ne8vw9454AxtwIPV9XZqnoBeBjYP+R+JUmbZNhgeH1VPQ/QLAcdCtoJPLuivdT0nffJ5jDSP0+StXaUZC7JYpLFM2fODFm2JGktF3yCW5LPA28YsOrQOvcx6Jd9NcvZqvpWkj8P/B7wHuA/DfohVTUPzAP0er0aNEaSNLwLBkNV3bTWuiTfTnJtVT2f5Fpg0DmCJeAXV7R3AV9qfva3muUPk/wO/XMQA4NBknRpDHso6Shw/iqjg8BnBox5CLglyfbmpPMtwENJtiW5BiDJq4BfBr4+ZD2SdMVZWICZGdiypb9cWNjc/V1wxnABHwQeSHIncBp4F0CSHvBrVfXeqjqb5APAsWabw03fa+gHxKuArcDngd8esh5JuqIsLMDcHCwv99unTvXbALOzm7PPVF1+h+t7vV4tLi52XYYkbbqZmX4YrLZ7NzzzzMZ+VpLjVdW70DjvfJakMXb69Mb6R8FgkKQxNj29sf5RMBgkaYwdOQJTU+2+qal+/2YxGCRpjM3Owvx8/5xC0l/Oz2/eiWcY/qokSdImm53d3CBYzRmDJKnFYJAktRgMkqQWg0GS1GIwSJJaDAZJUovBIElqMRgkSS0GgySpxWCQJLUYDJKklqGCIcnVSR5O8lSz3L7GuM8l+X6Sz67q35Pk0Wb7TyW5aph6JEnDG3bGcDfwSFXtBR5p2oN8GHjPgP4PAR9ptn8BuHPIeiRJQxo2GA4A9zXv7wPeOWhQVT0C/HBlX5IAbwcevND2kqRLZ9hgeH1VPQ/QLF+3gW1fC3y/qs417SVg55D1SJKGdMHnMST5PPCGAasODbnvDOirV6hjDpgDmN7MZ9pJ0oS7YDBU1U1rrUvy7STXVtXzSa4FvrOBfX8X+Okk25pZwy7guVeoYx6YB+j1emsGiCRpOMMeSjoKHGzeHwQ+s94Nq6qALwK3X8z2kqTNMWwwfBC4OclTwM1NmyS9JB87PyjJHwCfBm5MspTk1mbVbwHvS3KS/jmHjw9ZjyRpSEM987mqvgfcOKB/EXjvivZb19j+aWDfMDVIkkbLO5818RYWYGYGtmzpLxcWuq5I6tZQMwbpcrewAHNzsLzcb5861W8DzM52V5fUJWcMmmiHDr0cCuctL/f7pUllMGiinT69sX5pEhgMmmhr3SvpPZSaZAaDJtqRIzA11e6bmur3S5PKYNBEm52F+XnYvRuS/nJ+3hPPmmxelaSJNztrEEgrOWOQJLUYDJKkFoNBGhPega1x4TkGaQx4B7bGiTMGaQx4B7bGicEgjQHvwNY4MRikMeAd2BonBoM0BrwDW+NkqGBIcnWSh5M81Sy3rzHuc0m+n+Szq/r/Y5JvJnmsed0wTD3S5co7sDVOhp0x3A08UlV7gUea9iAfBt6zxrp/UlU3NK/HhqxHumzNzsIzz8BLL/WXhoK6MmwwHADua97fB7xz0KCqegT44ZD7kiRdAsMGw+ur6nmAZvm6i/gZR5J8NclHkrx6yHokSUO64A1uST4PvGHAqlFcYf1+4I+Aq4B54LeAw2vUMQfMAUx7qYYkbZoLBkNV3bTWuiTfTnJtVT2f5FrgOxvZ+fnZBvCjJJ8EfvMVxs7TDw96vV5tZD+SpPUb9lDSUeBg8/4g8JmNbNyECUlC//zE14esR5I0pGGD4YPAzUmeAm5u2iTpJfnY+UFJ/gD4NHBjkqUktzarFpJ8DfgacA3wL4asR5I0pKG+RK+qvgfcOKB/EXjvivZb19j+7cPsX5I0et75LElqMRgkSS0GgySpxWCQJLVMTDD42ERJWp+JeLSnj02UpPWbiBmDj02UpPWbiGDwsYmStH4TEQw+NlGS1m8igsHHJkrS+k1EMPjYRElav4m4Kgn6IWAQSNKFTcSMQZK0fgaDJKnFYJAktRgMkqQWg0GS1JKq6rqGDUtyBjh1kZtfA3x3hOVc7vw8XuZn0ebn0XYlfB67q2rHhQZdlsEwjCSLVdXruo5x4efxMj+LNj+Ptkn6PDyUJElqMRgkSS2TGAzzXRcwZvw8XuZn0ebn0TYxn8fEnWOQJL2ySZwxSJJewUQFQ5L9SZ5McjLJ3V3X05Uk1yX5YpInkpxI8htd1zQOkmxN8pUkn+26lq4l+ekkDyb5P82/k7/VdU1dSfKPmv8nX0/yu0l+ouuaNtvEBEOSrcC9wDuA64F3J7m+26o6cw74x1X1s8AvAL8+wZ/FSr8BPNF1EWPi3wKfq6q/Avx1JvRzSbIT+AdAr6r+KrAVuKPbqjbfxAQDsA84WVVPV9WLwP3AgY5r6kRVPV9VX27e/5D+f/qd3VbVrSS7gF8CPtZ1LV1L8heAtwEfB6iqF6vq+91W1altwJ9Lsg2YAp7ruJ5NN0nBsBN4dkV7iQn/ZQiQZAZ4E/Bot5V07t8A/xR4qetCxsAbgTPAJ5tDax9L8pqui+pCVX0L+FfAaeB54AdV9T+6rWrzTVIwZEDfRF+SleQngd8D/mFV/XHX9XQlyS8D36mq413XMia2AX8D+PdV9Sbg/wITeU4uyXb6Rxb2AH8ReE2SX+m2qs03ScGwBFy3or2LCZgSriXJq+iHwkJV/X7X9XTsLcBtSZ6hf4jx7Un+S7cldWoJWKqq87PIB+kHxSS6CfhmVZ2pqv8H/D7wtzuuadNNUjAcA/Ym2ZPkKvonkI52XFMnkoT+8eMnqupfd11P16rq/VW1q6pm6P+7+EJVXfF/Fa6lqv4IeDbJX266bgQe77CkLp0GfiHJVPP/5kYm4ET8xDzzuarOJbkLeIj+lQWfqKoTHZfVlbcA7wG+luSxpu+fVdV/67AmjZe/Dyw0f0Q9Dfy9juvpRFU9muRB4Mv0r+b7ChNwB7R3PkuSWibpUJIkaR0MBklSi8EgSWoxGCRJLQaDJKnFYJAktRgMkqQWg0GS1PL/AciQpghOGPX4AAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The network predicted the value: 2\n" + ] + } + ], + "source": [ + "plt.plot(bad_trial.tensor('sequential1_output').step(2700).value[83], 'bo')\n", + "plt.show()\n", + "print( 'The network predicted the value: {}'.format(np.argmax(bad_trial.tensor('sequential1_output').step(2700).value[83])))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Data Analysis - Automatic\n", + "So far we have conducted a human analysis, but the real power of Tornasole comes from having automatic monitoring of training runs. To do so we will build a SageMaker-based system that monitors existing runs in real time. Data traces deposited in S3 are the exchange mechanism: \n", + "- the training system deposits data into s3://mybucket/myrun/\n", + "- the monitoring system watches and reads data from s3://mybucket/myrun/\n", + "\n", + "In this example we will simulate reading from that." + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "from tornasole.trials import create_trial\n", + "from tornasole.rules.generic import VanishingGradient\n", + "from tornasole.rules.rule_invoker import invoke_rule" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tornasole:Vanishing Gradient rule created with threshold: 0.000100\n", + "INFO:tornasole:Started execution of rule VanishingGradient\n", + "INFO:tornasole:Step 0 had 0 vanishing gradients\n", + "INFO:tornasole:Step 100 had 0 vanishing gradients\n", + "INFO:tornasole:Step 200 had 0 vanishing gradients\n", + "INFO:tornasole:Step 300 had 0 vanishing gradients\n", + "INFO:tornasole:Step 400 had 0 vanishing gradients\n", + "INFO:tornasole:Step 500 had 0 vanishing gradients\n", + "INFO:tornasole:Step 600 had 0 vanishing gradients\n", + "INFO:tornasole:Step 700 had 0 vanishing gradients\n", + "INFO:tornasole:Step 800 had 0 vanishing gradients\n", + "INFO:tornasole:Step 900 had 0 vanishing gradients\n", + "INFO:tornasole:Step 1000 had 0 vanishing gradients\n", + "INFO:tornasole:Step 1100 had 0 vanishing gradients\n", + "INFO:tornasole:Step 1200 had 0 vanishing gradients\n", + "INFO:tornasole:Step 1300 had 0 vanishing gradients\n", + "INFO:tornasole:Step 1400 had 0 vanishing gradients\n", + "INFO:tornasole:Step 1500 had 0 vanishing gradients\n", + "INFO:tornasole:Step 1600 had 0 vanishing gradients\n", + "INFO:tornasole:Step 1700 had 0 vanishing gradients\n", + "INFO:tornasole:Step 1800 had 0 vanishing gradients\n", + "INFO:tornasole:Step 1900 had 0 vanishing gradients\n", + "INFO:tornasole:Step 2000 had 0 vanishing gradients\n", + "INFO:tornasole:Step 2100 had 0 vanishing gradients\n", + "INFO:tornasole:Step 2200 had 0 vanishing gradients\n", + "INFO:tornasole:Step 2300 had 0 vanishing gradients\n", + "INFO:tornasole:Step 2400 had 0 vanishing gradients\n", + "INFO:tornasole:Step 2500 had 0 vanishing gradients\n", + "INFO:tornasole:Step 2600 had 0 vanishing gradients\n" + ] + } + ], + "source": [ + "vr = VanishingGradient(base_trial=good_trial, threshold=0.0001)\n", + "invoke_rule(vr, end_step=2700)" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tornasole:Vanishing Gradient rule created with threshold: 0.000100\n", + "INFO:tornasole:Started execution of rule VanishingGradient\n", + "INFO:tornasole:Step 0 had 0 vanishing gradients\n", + "INFO:tornasole:Step 100 had 5 vanishing gradients\n", + "INFO:tornasole:Step 200 had 5 vanishing gradients\n", + "INFO:tornasole:Step 300 had 5 vanishing gradients\n", + "INFO:tornasole:Step 400 had 5 vanishing gradients\n", + "INFO:tornasole:Step 500 had 5 vanishing gradients\n", + "INFO:tornasole:Step 600 had 5 vanishing gradients\n", + "INFO:tornasole:Step 700 had 5 vanishing gradients\n", + "INFO:tornasole:Step 800 had 5 vanishing gradients\n", + "INFO:tornasole:Step 900 had 5 vanishing gradients\n", + "INFO:tornasole:Step 1000 had 5 vanishing gradients\n", + "INFO:tornasole:Step 1100 had 5 vanishing gradients\n", + "INFO:tornasole:Step 1200 had 5 vanishing gradients\n", + "INFO:tornasole:Step 1300 had 5 vanishing gradients\n", + "INFO:tornasole:Step 1400 had 5 vanishing gradients\n", + "INFO:tornasole:Step 1500 had 5 vanishing gradients\n", + "INFO:tornasole:Step 1600 had 5 vanishing gradients\n", + "INFO:tornasole:Step 1700 had 5 vanishing gradients\n", + "INFO:tornasole:Step 1800 had 5 vanishing gradients\n", + "INFO:tornasole:Step 1900 had 5 vanishing gradients\n", + "INFO:tornasole:Step 2000 had 5 vanishing gradients\n", + "INFO:tornasole:Step 2100 had 5 vanishing gradients\n", + "INFO:tornasole:Step 2200 had 5 vanishing gradients\n", + "INFO:tornasole:Step 2300 had 5 vanishing gradients\n", + "INFO:tornasole:Step 2400 had 5 vanishing gradients\n", + "INFO:tornasole:Step 2500 had 5 vanishing gradients\n", + "INFO:tornasole:Step 2600 had 5 vanishing gradients\n" + ] + } + ], + "source": [ + "vr_bad = VanishingGradient(base_trial=bad_trial, threshold=0.0001)\n", + "invoke_rule(vr_bad, end_step=2700)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This concludes this notebook. For more information see the APIs as \n", + "- https://github.com/awslabs/tornasole_core: basic library\n", + "- https://github.com/awslabs/tornasole_mxnet: data generation for MXNet\n", + "- https://github.com/awslabs/tornasole_tf: data generation for TensorFlow\n", + "- https://github.com/awslabs/tornasole_rules: data analysis in Numpy" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + }, + "pycharm": { + "stem_cell": { + "cell_type": "raw", + "source": [], + "metadata": { + "collapsed": false + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/examples/mxnet/scripts/mnist_gluon_all_zero_demo.py b/examples/mxnet/scripts/mnist_gluon_all_zero_demo.py new file mode 100644 index 0000000000..d523a6a3b4 --- /dev/null +++ b/examples/mxnet/scripts/mnist_gluon_all_zero_demo.py @@ -0,0 +1,151 @@ +import argparse +from mxnet import gluon, init, autograd +from mxnet.gluon import nn +from mxnet.gluon.data.vision import datasets, transforms +import time +import mxnet as mx +import tornasole.mxnet as tm +from tornasole.mxnet import TornasoleHook, SaveConfig, modes +import random +import numpy as np + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a mxnet gluon model for FashonMNIST dataset') + parser.add_argument('--batch-size', type=int, default=256, + help='Batch size') + parser.add_argument('--tornasole_path', type=str, default='s3://tornasole-testing/all-zero-hook/trial-3', + help='S3 URI of the bucket where tensor data will be stored.') + parser.add_argument('--learning_rate', type=float, default=0.1) + parser.add_argument('--random_seed', type=bool, default=False) + parser.add_argument('--num_steps', type=int, + help='Reduce the number of training ' + 'and evaluation steps to the give number if desired.' + 'If this is not passed, trains for one epoch ' + 'of training and validation data') + opt = parser.parse_args() + return opt + +def acc(output, label): + return (output.argmax(axis=1) == + label.astype('float32')).mean().asscalar() + +def train_model(batch_size, net, train_data, valid_data, lr, hook, num_steps=None): + softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss() + trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) + # Start the training. + for epoch in range(1): + train_loss, train_acc, valid_acc = 0., 0., 0. + tic = time.time() + hook.set_mode(modes.TRAIN) + for i, (data, label) in enumerate(train_data): + if num_steps is not None and num_steps < i: + break + data = data.as_in_context(mx.cpu(0)) + # forward + backward + with autograd.record(): + output = net(data) + loss = softmax_cross_entropy(output, label) + loss.backward() + # update parameters + trainer.step(batch_size) + # calculate training metrics + train_loss += loss.mean().asscalar() + train_acc += acc(output, label) + # calculate validation accuracy + hook.set_mode(modes.EVAL) + for i, (data, label) in enumerate(valid_data): + if num_steps is not None and num_steps < i: + break + data = data.as_in_context(mx.cpu(0)) + valid_acc += acc(net(data), label) + print("Epoch %d: loss %.3f, train acc %.3f, test acc %.3f, in %.1f sec" % ( + epoch, train_loss / len(train_data), train_acc / len(train_data), + valid_acc / len(valid_data), time.time() - tic)) + + +def prepare_data(batch_size): + mnist_train = datasets.FashionMNIST(train=True, + transform=lambda data, label: (data.astype(np.float32) * 0, label)) + X, y = mnist_train[0] + ('X shape: ', X.shape, 'X dtype', X.dtype, 'y:', y) + text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat', + 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot'] + X, y = mnist_train[0:10] + transformer = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(0.0, 0.1)]) + mnist_train = mnist_train.transform_first(transformer) + train_data = gluon.data.DataLoader( + mnist_train, batch_size=batch_size, shuffle=True, num_workers=4) + mnist_valid = gluon.data.vision.FashionMNIST(train=False) + valid_data = gluon.data.DataLoader( + mnist_valid.transform_first(transformer), + batch_size=batch_size, num_workers=4) + return train_data, valid_data + +# Create a model using gluon API. The tornasole hook is currently +# supports MXNet gluon models only. +def create_gluon_model(): + # Create Model in Gluon + net = nn.HybridSequential() + net.add(nn.Conv2D(channels=6, kernel_size=5, activation='relu'), + nn.MaxPool2D(pool_size=2, strides=2), + nn.Conv2D(channels=16, kernel_size=3, activation='relu'), + nn.MaxPool2D(pool_size=2, strides=2), + nn.Flatten(), + nn.Dense(120, activation='relu'), + nn.Dense(84, activation='relu'), + nn.Dense(10)) + net.initialize(init=init.Xavier(), ctx=mx.cpu()) + return net + + +# Create a tornasole hook. The initialization of hook determines which tensors +# are logged while training is in progress. +# Following function shows the default initialization that enables logging of +# weights, biases and gradients in the model. +def create_tornasole_hook(output_s3_uri): + # With the following SaveConfig, we will save tensors for steps 0, 1, 2 and 3 + # (indexing starts with 0). + save_config = SaveConfig(save_steps=[0, 1, 2, 3]) + tm.get_collection('ReluActivation').include(["relu*", "input_*"]) + + # Create a hook that logs weights, biases and gradients while training the model. + hook = TornasoleHook(out_dir=output_s3_uri, save_config=save_config, + include_collections=['ReluActivation','weights', 'bias','gradients']) + return hook + + +def main(): + opt = parse_args() + + # these random seeds are only intended for test purpose. + # for now, 128,12,2 could promise no assert failure with running tornasole_rules test_rules.py and config.yaml + # if you wish to change the number, notice that certain steps' tensor value may be capable of variation + if opt.random_seed: + mx.random.seed(128) + random.seed(12) + np.random.seed(2) + + # Create a Gluon Model. + net = create_gluon_model() + + # Create a tornasole hook for logging the desired tensors. + # The output_s3_uri is a the URI for the s3 bucket where the tensors will be saved. + # The trial_id is used to store the tensors from different trials separately. + output_uri=opt.tornasole_path + hook = create_tornasole_hook(output_uri) + + # Register the hook to the top block. + hook.register_hook(net) + + # Start the training. + batch_size = opt.batch_size + train_data, valid_data = prepare_data(batch_size) + + train_model(batch_size, net, train_data, valid_data, + opt.learning_rate, hook, num_steps=opt.num_steps) + +if __name__ == '__main__': + main() diff --git a/examples/mxnet/scripts/mnist_gluon_basic_hook_demo.py b/examples/mxnet/scripts/mnist_gluon_basic_hook_demo.py new file mode 100644 index 0000000000..e9e61693b8 --- /dev/null +++ b/examples/mxnet/scripts/mnist_gluon_basic_hook_demo.py @@ -0,0 +1,146 @@ +import argparse +from mxnet import gluon, init, autograd +from mxnet.gluon import nn +from mxnet.gluon.data.vision import datasets, transforms +import time +import mxnet as mx +from tornasole.mxnet import TornasoleHook, SaveConfig, modes +import random +import numpy as np + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a mxnet gluon model for FashonMNIST dataset') + parser.add_argument('--batch-size', type=int, default=256, + help='Batch size') + parser.add_argument('--output-uri', type=str, default='s3://tornasole-testing/basic-mxnet-hook', + help='S3 URI of the bucket where tensor data will be stored.') + parser.add_argument('--learning_rate', type=float, default=0.1) + parser.add_argument('--random_seed', type=bool, default=False) + parser.add_argument('--num_steps', type=int, + help='Reduce the number of training ' + 'and evaluation steps to the give number if desired.' + 'If this is not passed, trains for one epoch ' + 'of training and validation data') + opt = parser.parse_args() + return opt + +def acc(output, label): + return (output.argmax(axis=1) == + label.astype('float32')).mean().asscalar() + +def train_model(batch_size, net, train_data, valid_data, lr, hook, num_steps=None): + softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss() + trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) + # Start the training. + for epoch in range(1): + train_loss, train_acc, valid_acc = 0., 0., 0. + tic = time.time() + hook.set_mode(modes.TRAIN) + for i, (data, label) in enumerate(train_data): + if num_steps is not None and num_steps < i: + break + data = data.as_in_context(mx.cpu(0)) + # forward + backward + with autograd.record(): + output = net(data) + loss = softmax_cross_entropy(output, label) + loss.backward() + # update parameters + trainer.step(batch_size) + # calculate training metrics + train_loss += loss.mean().asscalar() + train_acc += acc(output, label) + # calculate validation accuracy + hook.set_mode(modes.EVAL) + for i, (data, label) in enumerate(valid_data): + if num_steps is not None and num_steps < i: + break + data = data.as_in_context(mx.cpu(0)) + valid_acc += acc(net(data), label) + print("Epoch %d: loss %.3f, train acc %.3f, test acc %.3f, in %.1f sec" % ( + epoch, train_loss / len(train_data), train_acc / len(train_data), + valid_acc / len(valid_data), time.time() - tic)) + + +def prepare_data(batch_size): + mnist_train = datasets.FashionMNIST(train=True) + X, y = mnist_train[0] + ('X shape: ', X.shape, 'X dtype', X.dtype, 'y:', y) + text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat', + 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot'] + X, y = mnist_train[0:10] + transformer = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(0.13, 0.31)]) + mnist_train = mnist_train.transform_first(transformer) + train_data = gluon.data.DataLoader( + mnist_train, batch_size=batch_size, shuffle=True, num_workers=4) + mnist_valid = gluon.data.vision.FashionMNIST(train=False) + valid_data = gluon.data.DataLoader( + mnist_valid.transform_first(transformer), + batch_size=batch_size, num_workers=4) + return train_data, valid_data + +# Create a model using gluon API. The tornasole hook is currently +# supports MXNet gluon models only. +def create_gluon_model(): + # Create Model in Gluon + net = nn.HybridSequential() + net.add(nn.Conv2D(channels=6, kernel_size=5, activation='relu'), + nn.MaxPool2D(pool_size=2, strides=2), + nn.Conv2D(channels=16, kernel_size=3, activation='relu'), + nn.MaxPool2D(pool_size=2, strides=2), + nn.Flatten(), + nn.Dense(120, activation='relu'), + nn.Dense(84, activation='relu'), + nn.Dense(10)) + net.initialize(init=init.Xavier(), ctx=mx.cpu()) + return net + + +# Create a tornasole hook. The initialization of hook determines which tensors +# are logged while training is in progress. +# Following function shows the default initialization that enables logging of +# weights, biases and gradients in the model. +def create_tornasole_hook(output_s3_uri): + # With the following SaveConfig, we will save tensors for steps 1, 2 and 3 + # (indexing starts with 0). + save_config = SaveConfig(save_steps=[1, 2, 3]) + + # Create a hook that logs weights, biases and gradients while training the model. + hook = TornasoleHook(out_dir=output_s3_uri, save_config=save_config) + return hook + + +def main(): + opt = parse_args() + + # these random seeds are only intended for test purpose. + # for now, 128,12,2 could promise no assert failure with running tornasole_rules test_rules.py and config.yaml + # if you wish to change the number, notice that certain steps' tensor value may be capable of variation + if opt.random_seed: + mx.random.seed(128) + random.seed(12) + np.random.seed(2) + + # Create a Gluon Model. + net = create_gluon_model() + + # Create a tornasole hook for logging the desired tensors. + # The output_s3_uri is a the URI for the s3 bucket where the tensors will be saved. + # The trial_id is used to store the tensors from different trials separately. + output_uri=opt.output_uri + hook = create_tornasole_hook(output_uri) + + # Register the hook to the top block. + hook.register_hook(net) + + # Start the training. + batch_size = opt.batch_size + train_data, valid_data = prepare_data(batch_size) + + train_model(batch_size, net, train_data, valid_data, opt.learning_rate, hook, opt.num_steps) + +if __name__ == '__main__': + main() diff --git a/examples/mxnet/scripts/mnist_gluon_block_input_output_demo.py b/examples/mxnet/scripts/mnist_gluon_block_input_output_demo.py new file mode 100644 index 0000000000..e0b0ed76e7 --- /dev/null +++ b/examples/mxnet/scripts/mnist_gluon_block_input_output_demo.py @@ -0,0 +1,148 @@ +import argparse +from mxnet import gluon, init, autograd +from mxnet.gluon import nn +from mxnet.gluon.data.vision import datasets, transforms +import time +import mxnet as mx +import tornasole.mxnet as tm +from tornasole.mxnet import TornasoleHook, SaveConfig, modes + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a mxnet gluon model for FashionMNIST dataset') + parser.add_argument('--batch-size', type=int, default=256, + help='Batch size') + parser.add_argument('--output-s3-uri', type=str, default='s3://tornasole-testing/block-io-mxnet-hook', + help='S3 URI of the bucket where tensor data will be stored.') + opt = parser.parse_args() + return opt + +def acc(output, label): + return (output.argmax(axis=1) == + label.astype('float32')).mean().asscalar() + + +def train_model(batch_size, net, train_data, valid_data, hook): + softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss() + trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1}) + # Start the training. + for epoch in range(1): + train_loss, train_acc, valid_acc = 0., 0., 0. + tic = time.time() + hook.set_mode(modes.TRAIN) + for data, label in train_data: + data = data.as_in_context(mx.cpu(0)) + # forward + backward + with autograd.record(): + output = net(data) + loss = softmax_cross_entropy(output, label) + loss.backward() + # update parameters + trainer.step(batch_size) + # calculate training metrics + train_loss += loss.mean().asscalar() + train_acc += acc(output, label) + # calculate validation accuracy + hook.set_mode(modes.EVAL) + for data, label in valid_data: + data = data.as_in_context(mx.cpu(0)) + valid_acc += acc(net(data), label) + print("Epoch %d: loss %.3f, train acc %.3f, test acc %.3f, in %.1f sec" % ( + epoch, train_loss / len(train_data), train_acc / len(train_data), + valid_acc / len(valid_data), time.time() - tic)) + + +def prepare_data(batch_size): + mnist_train = datasets.FashionMNIST(train=True) + X, y = mnist_train[0] + ('X shape: ', X.shape, 'X dtype', X.dtype, 'y:', y) + text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat', + 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot'] + X, y = mnist_train[0:10] + transformer = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(0.13, 0.31)]) + mnist_train = mnist_train.transform_first(transformer) + train_data = gluon.data.DataLoader( + mnist_train, batch_size=batch_size, shuffle=True, num_workers=4) + mnist_valid = gluon.data.vision.FashionMNIST(train=False) + valid_data = gluon.data.DataLoader( + mnist_valid.transform_first(transformer), + batch_size=batch_size, num_workers=4) + return train_data, valid_data + +# Create a model using gluon API. The tornasole hook is currently +# supports MXNet gluon models only. +def create_gluon_model(): + # Create Model in Gluon + child_blocks = [] + net = nn.HybridSequential() + conv2d_0 = nn.Conv2D(channels=6, kernel_size=5, activation='relu') + child_blocks.append(conv2d_0) + maxpool2d_0 = nn.MaxPool2D(pool_size=2, strides=2) + child_blocks.append(maxpool2d_0) + conv2d_1 = nn.Conv2D(channels=16, kernel_size=3, activation='relu') + child_blocks.append(conv2d_1) + maxpool2d_1 = nn.MaxPool2D(pool_size=2, strides=2) + child_blocks.append(maxpool2d_1) + flatten_0 = nn.Flatten() + child_blocks.append(flatten_0) + dense_0 = nn.Dense(120, activation="relu") + child_blocks.append(dense_0) + dense_1 = nn.Dense(84, activation="relu") + child_blocks.append(dense_1) + dense_2 = nn.Dense(10) + child_blocks.append(dense_2) + + net.add(conv2d_0, maxpool2d_0, conv2d_1, maxpool2d_1, flatten_0, dense_0, dense_1, dense_2) + net.initialize(init=init.Xavier(), ctx=mx.cpu()) + return net, child_blocks + + +# Create a tornasole hook. The initialization of hook determines which tensors +# are logged while training is in progress. +# Following function shows the hook initialization that enables logging of +# weights, biases and gradients in the model along with the inputs and output of the given +# child block. +def create_tornasole_hook(output_s3_uri, block): + # Create a SaveConfig that determines tensors from which steps are to be stored. + # With the following SaveConfig, we will save tensors for steps 1, 2 and 3. + save_config = SaveConfig(save_steps=[1, 2, 3]) + + # The names of input and output tensors of a block are in following format + # Inputs : _input_, and + # Output : _output + # In order to log the inputs and output of a model, we will create a collection as follows + tm.get_collection(block.name).add_block_tensors(block, inputs=True, outputs=True) + + # Create a hook that logs weights, biases, gradients and inputs outputs of model while training. + hook = TornasoleHook(out_dir=output_s3_uri, save_config=save_config, include_collections=[ + 'weights', 'gradients', 'bias', block.name]) + return hook + + +def main(): + opt = parse_args() + # Create a Gluon Model. + net,child_blocks = create_gluon_model() + + # Create a tornasole hook for logging the desired tensors. + # The output_s3_uri is a the URI for the s3 bucket where the tensors will be saved. + output_s3_uri=opt.output_s3_uri + + # For creating a tornasole hook that can log inputs and output of the specific child block in the model, + # we will pass the desired block object to the create_tornasole_hook function. + # In the following case, we are attempting log inputs and output of the first Conv2D block. + hook = create_tornasole_hook(output_s3_uri, child_blocks[0]) + + # Register the hook to the top block. + hook.register_hook(net) + + # Start the training. + batch_size = opt.batch_size + train_data, valid_data = prepare_data(batch_size) + + train_model(batch_size, net, train_data, valid_data, hook) + +if __name__ == '__main__': + main() diff --git a/examples/mxnet/scripts/mnist_gluon_model_input_output_demo.py b/examples/mxnet/scripts/mnist_gluon_model_input_output_demo.py new file mode 100644 index 0000000000..ed220da9eb --- /dev/null +++ b/examples/mxnet/scripts/mnist_gluon_model_input_output_demo.py @@ -0,0 +1,135 @@ +import argparse +from mxnet import gluon, init, autograd +from mxnet.gluon import nn +from mxnet.gluon.data.vision import datasets, transforms +import time +import mxnet as mx +from tornasole.mxnet import TornasoleHook, SaveConfig, modes +import tornasole.mxnet as tm + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a mxnet gluon model for FashonMNIST dataset') + parser.add_argument('--batch-size', type=int, default=256, + help='Batch size') + parser.add_argument('--output-s3-uri', type=str, default='s3://tornasole-testing/model-io-mxnet-hook', + help='S3 URI of the bucket where tensor data will be stored.') + opt = parser.parse_args() + return opt + +def acc(output, label): + return (output.argmax(axis=1) == + label.astype('float32')).mean().asscalar() + + +def train_model(batch_size, net, train_data, valid_data, hook): + softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss() + trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1}) + # Start the training. + for epoch in range(1): + train_loss, train_acc, valid_acc = 0., 0., 0. + tic = time.time() + hook.set_mode(modes.TRAIN) + for data, label in train_data: + data = data.as_in_context(mx.cpu(0)) + # forward + backward + with autograd.record(): + output = net(data) + loss = softmax_cross_entropy(output, label) + loss.backward() + # update parameters + trainer.step(batch_size) + # calculate training metrics + train_loss += loss.mean().asscalar() + train_acc += acc(output, label) + # calculate validation accuracy + hook.set_mode(modes.EVAL) + for data, label in valid_data: + data = data.as_in_context(mx.cpu(0)) + valid_acc += acc(net(data), label) + print("Epoch %d: loss %.3f, train acc %.3f, test acc %.3f, in %.1f sec" % ( + epoch, train_loss / len(train_data), train_acc / len(train_data), + valid_acc / len(valid_data), time.time() - tic)) + + +def prepare_data(batch_size): + mnist_train = datasets.FashionMNIST(train=True) + X, y = mnist_train[0] + ('X shape: ', X.shape, 'X dtype', X.dtype, 'y:', y) + text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat', + 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot'] + X, y = mnist_train[0:10] + transformer = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(0.13, 0.31)]) + mnist_train = mnist_train.transform_first(transformer) + train_data = gluon.data.DataLoader( + mnist_train, batch_size=batch_size, shuffle=True, num_workers=4) + mnist_valid = gluon.data.vision.FashionMNIST(train=False) + valid_data = gluon.data.DataLoader( + mnist_valid.transform_first(transformer), + batch_size=batch_size, num_workers=4) + return train_data, valid_data + +# Create a model using gluon API. The tornasole hook is currently +# supports MXNet gluon models only. +def create_gluon_model(): + # Create Model in Gluon + net = nn.HybridSequential() + net.add(nn.Conv2D(channels=6, kernel_size=5, activation='relu'), + nn.MaxPool2D(pool_size=2, strides=2), + nn.Conv2D(channels=16, kernel_size=3, activation='relu'), + nn.MaxPool2D(pool_size=2, strides=2), + nn.Flatten(), + nn.Dense(120, activation='relu'), + nn.Dense(84, activation='relu'), + nn.Dense(10)) + net.initialize(init=init.Xavier(), ctx=mx.cpu()) + return net + + +# Create a tornasole hook. The initialization of hook determines which tensors +# are logged while training is in progress. +# Following function shows the hook initialization that enables logging of +# weights, biases and gradients in the model along with the inputs and outputs of the model. +def create_tornasole_hook(output_s3_uri, block): + # Create a SaveConfig that determines tensors from which steps are to be stored. + # With the following SaveConfig, we will save tensors for steps 1, 2 and 3. + save_config = SaveConfig(save_steps=[1, 2, 3]) + + # The names of input and output tensors of a block are in following format + # Inputs : _input_, and + # Output : _output + # In order to log the inputs and output of a model, we will create a collection as follows: + tm.get_collection('TopBlock').add_block_tensors(block, inputs=True, outputs=True) + + # Create a hook that logs weights, biases, gradients and inputs outputs of model while training. + hook = TornasoleHook(out_dir=output_s3_uri, save_config=save_config, include_collections=['weights', 'gradients', 'bias','TopBlock']) + return hook + + +def main(): + opt = parse_args() + # Create a Gluon Model. + net = create_gluon_model() + + # Create a tornasole hook for logging the desired tensors. + # The output_s3_uri is a the URI for the s3 bucket where the tensors will be saved. + output_s3_uri=opt.output_s3_uri + + + # For creating a tornasole hook that can log inputs and output of the model, + # we will pass the top block object to the create_tornasole_hook function. + hook = create_tornasole_hook(output_s3_uri, net) + + # Register the hook to the top block. + hook.register_hook(net) + + # Start the training. + batch_size = opt.batch_size + train_data, valid_data = prepare_data(batch_size) + + train_model(batch_size, net, train_data, valid_data, hook) + +if __name__ == '__main__': + main() diff --git a/examples/mxnet/scripts/mnist_gluon_save_all_demo.py b/examples/mxnet/scripts/mnist_gluon_save_all_demo.py new file mode 100644 index 0000000000..fd0d83eb0c --- /dev/null +++ b/examples/mxnet/scripts/mnist_gluon_save_all_demo.py @@ -0,0 +1,123 @@ +import argparse +from mxnet import gluon, init, autograd +from mxnet.gluon import nn +from mxnet.gluon.data.vision import datasets, transforms +import time +import mxnet as mx +from tornasole.mxnet import TornasoleHook, SaveConfig, modes + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a mxnet gluon model for FashonMNIST dataset') + parser.add_argument('--batch-size', type=int, default=256, + help='Batch size') + parser.add_argument('--output-s3-uri', type=str, default='s3://tornasole-testing/saveall-mxnet-hook', + help='S3 URI of the bucket where tensor data will be stored.') + opt = parser.parse_args() + return opt + +def acc(output, label): + return (output.argmax(axis=1) == + label.astype('float32')).mean().asscalar() + + +def train_model(batch_size, net, train_data, valid_data, hook): + softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss() + trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1}) + # Start the training. + for epoch in range(1): + train_loss, train_acc, valid_acc = 0., 0., 0. + tic = time.time() + hook.set_mode(modes.TRAIN) + for data, label in train_data: + data = data.as_in_context(mx.cpu(0)) + # forward + backward + with autograd.record(): + output = net(data) + loss = softmax_cross_entropy(output, label) + loss.backward() + # update parameters + trainer.step(batch_size) + # calculate training metrics + train_loss += loss.mean().asscalar() + train_acc += acc(output, label) + # calculate validation accuracy + hook.set_mode(modes.EVAL) + for data, label in valid_data: + data = data.as_in_context(mx.cpu(0)) + valid_acc += acc(net(data), label) + print("Epoch %d: loss %.3f, train acc %.3f, test acc %.3f, in %.1f sec" % ( + epoch, train_loss / len(train_data), train_acc / len(train_data), + valid_acc / len(valid_data), time.time() - tic)) + + +def prepare_data(batch_size): + mnist_train = datasets.FashionMNIST(train=True) + X, y = mnist_train[0] + ('X shape: ', X.shape, 'X dtype', X.dtype, 'y:', y) + text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat', + 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot'] + X, y = mnist_train[0:10] + transformer = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(0.13, 0.31)]) + mnist_train = mnist_train.transform_first(transformer) + train_data = gluon.data.DataLoader( + mnist_train, batch_size=batch_size, shuffle=True, num_workers=4) + mnist_valid = gluon.data.vision.FashionMNIST(train=False) + valid_data = gluon.data.DataLoader( + mnist_valid.transform_first(transformer), + batch_size=batch_size, num_workers=4) + return train_data, valid_data + +# Create a model using gluon API. The tornasole hook is currently +# supports MXNet gluon models only. +def create_gluon_model(): + # Create Model in Gluon + net = nn.HybridSequential() + net.add(nn.Conv2D(channels=6, kernel_size=5, activation='relu'), + nn.MaxPool2D(pool_size=2, strides=2), + nn.Conv2D(channels=16, kernel_size=3, activation='relu'), + nn.MaxPool2D(pool_size=2, strides=2), + nn.Flatten(), + nn.Dense(120, activation="relu"), + nn.Dense(84, activation="relu"), + nn.Dense(10)) + net.initialize(init=init.Xavier(), ctx=mx.cpu()) + return net + + +# Create a tornasole hook. The initialization of hook determines which tensors +# are logged while training is in progress. +# Following function shows the initialization of tornasole hook that enables logging of +# all the tensors in the model. +def create_tornasole_hook(output_s3_uri): + # Create a SaveConfig that determines tensors from which steps are to be stored. + # With the following SaveConfig, we will save tensors for steps 1, 2 and 3. + save_config = SaveConfig(save_steps=[1, 2, 3]) + + # Create a hook that logs all the tensors seen while training the model. + hook = TornasoleHook(out_dir=output_s3_uri, save_config=save_config, save_all=True) + return hook + + +def main(): + opt = parse_args() + # Create a Gluon Model. + net = create_gluon_model() + + # Create a tornasole hook for logging the desired tensors. + # The output_s3_uri is a the URI for the s3 bucket where the tensors will be saved. + output_s3_uri=opt.output_s3_uri + hook = create_tornasole_hook(output_s3_uri) + + # Register the hook to the top block. + hook.register_hook(net) + + # Start the training. + batch_size = opt.batch_size + train_data, valid_data = prepare_data(batch_size) + + train_model(batch_size, net, train_data, valid_data, hook) + +if __name__ == '__main__': + main() diff --git a/examples/mxnet/scripts/mnist_gluon_vg_demo.py b/examples/mxnet/scripts/mnist_gluon_vg_demo.py new file mode 100644 index 0000000000..d3cb991d71 --- /dev/null +++ b/examples/mxnet/scripts/mnist_gluon_vg_demo.py @@ -0,0 +1,148 @@ +import argparse +import random +from mxnet import gluon, autograd +from mxnet.gluon import nn +import mxnet as mx +from tornasole.mxnet import TornasoleHook, SaveConfig, modes +import numpy as np + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a mxnet gluon model for FashionMNIST dataset') + parser.add_argument('--output-uri', type=str, default='s3://tornasole-testing/vg-demo', + help='S3 URI of the bucket where tensor data will be stored.') + parser.add_argument('--random_seed',type=bool, default=False) + parser.add_argument('--num_steps', type=int, + help='Reduce the number of training ' + 'and evaluation steps to the give number if desired.' + 'If this is not passed, trains for one epoch ' + 'of training and validation data') + parser.add_argument('--tornasole_frequency', type=int, default=100) + opt = parser.parse_args() + return opt + + +def test(ctx, net, val_data, num_steps=None): + metric = mx.metric.Accuracy() + for i, (data, label) in enumerate(val_data): + if num_steps is not None and num_steps < i: + break + data = data.as_in_context(ctx) + label = label.as_in_context(ctx) + output = net(data) + metric.update([label], [output]) + + return metric.get() + + +def train_model(net, epochs, ctx, learning_rate, momentum, + train_data, val_data, hook, num_steps=None): + # Collect all parameters from net and its children, then initialize them. + net.initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx) + # Trainer is for updating parameters with gradient. + trainer = gluon.Trainer(net.collect_params(), 'sgd', + {'learning_rate': learning_rate, 'momentum': momentum}) + metric = mx.metric.Accuracy() + loss = gluon.loss.SoftmaxCrossEntropyLoss() + + for epoch in range(epochs): + # reset data iterator and metric at begining of epoch. + metric.reset() + hook.set_mode(modes.TRAIN) + for i, (data, label) in enumerate(train_data): + if num_steps is not None and num_steps < i: + break + # Copy data to ctx if necessary + data = data.as_in_context(ctx) + label = label.as_in_context(ctx) + # Start recording computation graph with record() section. + # Recorded graphs can then be differentiated with backward. + with autograd.record(): + output = net(data) + L = loss(output, label) + L.backward() + # take a gradient step with batch_size equal to data.shape[0] + trainer.step(data.shape[0]) + # update metric at last. + metric.update([label], [output]) + + if i % 100 == 0 and i > 0: + name, acc = metric.get() + print('[Epoch %d Batch %d] Training: %s=%f' % (epoch, i, name, acc)) + + name, acc = metric.get() + print('[Epoch %d] Training: %s=%f' % (epoch, name, acc)) + + hook.set_mode(modes.EVAL) + name, val_acc = test(ctx, net, val_data, num_steps=num_steps) + print('[Epoch %d] Validation: %s=%f' % (epoch, name, val_acc)) + +def transformer(data, label): + data = data.reshape((-1,)).astype(np.float32)/255 + return data, label + + +def prepare_data(): + train_data = gluon.data.DataLoader( + gluon.data.vision.MNIST('./data', train=True, transform=transformer), + batch_size=100, shuffle=True, last_batch='discard') + + val_data = gluon.data.DataLoader( + gluon.data.vision.MNIST('./data', train=False, transform=transformer), + batch_size=100, shuffle=False) + return train_data, val_data + +# Create a model using gluon API. The tornasole hook is currently +# supports MXNet gluon models only. +def create_gluon_model(): + net = nn.Sequential() + with net.name_scope(): + net.add(nn.Dense(128, activation='relu')) + net.add(nn.Dense(64, activation='relu')) + net.add(nn.Dense(10)) + return net + +# Create a tornasole hook. The initialization of hook determines which tensors +# are logged while training is in progress. +# Following function shows the default initialization that enables logging of +# weights, biases and gradients in the model. +def create_tornasole_hook(output_uri, tornasole_frequency): + # With the following SaveConfig, we will save tensors with the save_interval 100. + save_config = SaveConfig(save_interval=tornasole_frequency) + + # Create a hook that logs weights, biases and gradients while training the model. + hook = TornasoleHook(out_dir=output_uri, save_config=save_config) + return hook + + +def main(): + opt = parse_args() + + # these random seeds are only intended for test purpose. + # for now, 128,12,2 could promise no assert failure with running tornasole_rules test_rules.py and config.yaml + # if you wish to change the number, notice that certain steps' tensor value may be capable of variation + if opt.random_seed: + mx.random.seed(128) + random.seed(12) + np.random.seed(2) + + # Create a Gluon Model. + net = create_gluon_model() + + # Create a tornasole hook for logging the desired tensors. + # The output_uri is a the URI where the tensors will be saved. It can be local or s3://bucket/prefix + output_uri=opt.output_uri + hook = create_tornasole_hook(output_uri, opt.tornasole_frequency) + + # Register the hook to the top block. + hook.register_hook(net) + + # Start the training. + train_data, val_data = prepare_data() + + train_model(net=net, epochs=2, ctx=mx.cpu(), learning_rate=1, momentum=0.9, + train_data=train_data, val_data=val_data, + hook=hook, num_steps=opt.num_steps) + +if __name__ == '__main__': + main() diff --git a/examples/pytorch/notebooks/PyTorch-SimpleInteractiveAnalysis.ipynb b/examples/pytorch/notebooks/PyTorch-SimpleInteractiveAnalysis.ipynb new file mode 100644 index 0000000000..d58df8fcde --- /dev/null +++ b/examples/pytorch/notebooks/PyTorch-SimpleInteractiveAnalysis.ipynb @@ -0,0 +1,1567 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Simple Interactive Analysis in Tornasole\n", + "This notebook will demonstrate the simplest kind of interactive analysis that can be run in Tornasole. It will focus on the [vanishing/exploding gradient](https://medium.com/learn-love-ai/the-curious-case-of-the-vanishing-exploding-gradient-bf58ec6822eb) problems on a simple MNIST digit recognition." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First of all, we will import some basic libraries for deep learning and plotting." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import torch\n", + "import torch.utils.data\n", + "from torch import nn\n", + "import matplotlib.pyplot as plt\n", + "import torch.nn.functional as F\n", + "import torch.optim as optim\n", + "from torchvision import datasets, transforms\n", + "from torch.autograd import Variable" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's copy the Tornasole libraries to this instance, this step has to be executed only once. Please make sure that the AWS account you are using can access the tornasole-wheels-alpha bucket.\n", + "\n", + "To do so you'll need the appropriate AWS credentials. There are several ways of doing this:\n", + "\n", + "inject temporary credentials\n", + "if running on EC2, use EC2 roles that can access all S3 buckets\n", + "(preferred) run this notebook on a SageMaker notebook instance\n", + "The code below downloads the necessary .whl files and installs them in the current environment. Only run the first time!" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "#WARNING - uncomment this code only if you haven't done this before\n", + "#!aws s3 cp s3://tornasole-wheels-alpha/tornasole_core-0.1-py2.py3-none-any.whl\n", + "#!aws s3 cp s3://tornasole-wheels-alpha/tornasole_rules-0.1-py2.py3-none-any.whl\n", + "#!aws s3 cp s3://tornasole-wheels-alpha/tornasole_pytorch-0.1-py2.py3-none-any.whl\n", + "\n", + "#!pip install tornasole_core-0.1-py2.py3-none-any.whl tornasole_rules-0.1-py2.py3-none-any.whl tornasole_pytorch-0.1-py2.py3-none-any.whl\n", + "\n", + "# If you get a version conflict issue between botocore and boto3, you might need to run the following\n", + "# !pip install botocore==1.12.189\n", + "# !pip install boto3==1.9.189\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's import the Tornasole libraries, all we need is a `TornasoleHook` to use as a callback, as well as some ancillary data structures." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "from tornasole.pytorch.hook import *\n", + "from tornasole.core.save_config import SaveConfig\n", + "\n", + "import logging\n", + "logging.getLogger(\"tornasole\").setLevel(logging.ERROR)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can define a simple network - it doesn't really matter what it is.\n", + "Importantly - we **add the Tornasole Hook**. This hook will be run at every batch and will save selected tensors (in this case, all of them) to the desired directory (in this case, `'./ts_output/{run_id}'`.\n", + "\n", + "See the [documentation](https://github.com/awslabs/tornasole_mxnet/blob/alpha/README.md) for more details." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "class Net(nn.Module):\n", + " def __init__(self):\n", + " super(Net, self).__init__()\n", + "\n", + " # self.conv1 = nn.Conv2d(1, 20, 5, 1)\n", + " self.add_module('conv1', nn.Conv2d(1, 20, 5, 1))\n", + " self.add_module('conv2', nn.Conv2d(20, 50, 5, 1))\n", + " self.add_module('fc1', nn.Linear(4*4*50, 500))\n", + " self.add_module('fc2', nn.Linear(500, 10))\n", + "\n", + " def forward(self, x):\n", + " x = F.relu(self.conv1(x))\n", + " x = F.max_pool2d(x, 2, 2)\n", + " x = F.relu(self.conv2(x))\n", + " x = F.max_pool2d(x, 2, 2)\n", + " x = x.view(-1, 4*4*50)\n", + " x = F.relu(self.fc1(x))\n", + " x = self.fc2(x)\n", + " return F.log_softmax(x, dim=1)\n", + "\n", + "def create_net(tornasole_save_interval, base_loc, run_id):\n", + " model = Net()\n", + " # Create and add the hook. Arguments:\n", + " # - save data in './{base_loc}/{run_id} - Note: s3 is also supported\n", + " # - save every 100 batches\n", + " # - save every tensor: inputs/outputs to each layer, as well as gradients\n", + " hook = TornasoleHook(out_dir=base_loc + \"/\" + run_id, save_config=SaveConfig(save_interval=100), save_all=True)\n", + " hook.register_hook(model)\n", + " return model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And we create a simple training script. No Tornasole-specific code here, this is a slightly modified version of the [digit recognition](https://github.com/pytorch/examples/blob/master/mnist/main.py) example on the PyTorch github." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def transformer(data, label):\n", + " data = data.reshape((-1,)).astype(np.float32)/255\n", + " return data, label\n", + "\n", + "def test(model, device, test_loader):\n", + " model.eval()\n", + " test_loss = 0\n", + " correct = 0\n", + " with torch.no_grad():\n", + " for data, target in test_loader:\n", + " data, target = data.to(device), target.to(device)\n", + " output = model(data)\n", + " test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n", + " pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n", + " correct += pred.eq(target.view_as(pred)).sum().item()\n", + "\n", + " test_loss /= len(test_loader.dataset)\n", + "\n", + " print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n", + " test_loss, correct, len(test_loader.dataset),\n", + " 100. * correct / len(test_loader.dataset)))\n", + "\n", + "\n", + "\n", + "\n", + "def train(model, epochs, learning_rate, momentum, batch_size, device):\n", + " train_loader = torch.utils.data.DataLoader(\n", + " datasets.MNIST('./data', train=True, download=True,\n", + " transform=transforms.Compose([\n", + " transforms.ToTensor(),\n", + " transforms.Normalize((0.1307,), (0.3081,))\n", + " ])),\n", + " batch_size=batch_size, shuffle=True)\n", + "\n", + " val_data = torch.utils.data.DataLoader(\n", + " datasets.MNIST('./data', train=False, download=True,\n", + " transform=transforms.Compose([\n", + " transforms.ToTensor(),\n", + " transforms.Normalize((0.1307,), (0.3081,))\n", + " ])),\n", + " batch_size=batch_size, shuffle=False)\n", + " \n", + " # Collect all parameters from net and its children, then initialize them.\n", + " optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum)\n", + " model = model.to(device)\n", + " total_step = len(train_loader)\n", + " for epoch in range(epochs):\n", + " model.train()\n", + " count = 0\n", + " for batch_idx, (data, target) in enumerate(train_loader):\n", + " data, target = data.to(device), target.to(device)\n", + " optimizer.zero_grad()\n", + " output = model(Variable(data, requires_grad = True))\n", + " loss = F.nll_loss(output, target)\n", + " loss.backward()\n", + " count += 1\n", + "\n", + " optimizer.step()\n", + " if batch_idx % 10 == 0:\n", + " print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n", + " epoch, batch_idx * len(data), len(train_loader.dataset),\n", + " 100. * batch_idx / len(train_loader), loss.item()))\n", + " \n", + "# torch.save(model.state_dict(),\"mnist_params.pt\")\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Clear up from previous runs, we remove old data" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "!rm -rf ./ts_output/" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "At this point we are ready to train. For the purposes of this example, we will name this run as `'good'` because we know it will converge to a good solution. \n", + "\n", + "If you have a GPU on your machine, you can change the device line appropriately -- e.g for an NVIDIA GPU, it would be `device = torch.device(\"cuda\")`." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Train Epoch: 0 [0/60000 (0%)]\tLoss: 2.295261\n", + "Train Epoch: 0 [640/60000 (1%)]\tLoss: 1.748892\n", + "Train Epoch: 0 [1280/60000 (2%)]\tLoss: 0.695785\n", + "Train Epoch: 0 [1920/60000 (3%)]\tLoss: 0.629404\n", + "Train Epoch: 0 [2560/60000 (4%)]\tLoss: 0.355879\n", + "Train Epoch: 0 [3200/60000 (5%)]\tLoss: 0.461338\n", + "Train Epoch: 0 [3840/60000 (6%)]\tLoss: 0.465871\n", + "Train Epoch: 0 [4480/60000 (7%)]\tLoss: 0.286747\n", + "Train Epoch: 0 [5120/60000 (9%)]\tLoss: 0.262511\n", + "Train Epoch: 0 [5760/60000 (10%)]\tLoss: 0.276488\n", + "Train Epoch: 0 [6400/60000 (11%)]\tLoss: 0.234418\n", + "Train Epoch: 0 [7040/60000 (12%)]\tLoss: 0.523879\n", + "Train Epoch: 0 [7680/60000 (13%)]\tLoss: 0.582521\n", + "Train Epoch: 0 [8320/60000 (14%)]\tLoss: 0.257838\n", + "Train Epoch: 0 [8960/60000 (15%)]\tLoss: 0.162327\n", + "Train Epoch: 0 [9600/60000 (16%)]\tLoss: 0.202325\n", + "Train Epoch: 0 [10240/60000 (17%)]\tLoss: 0.204970\n", + "Train Epoch: 0 [10880/60000 (18%)]\tLoss: 0.517194\n", + "Train Epoch: 0 [11520/60000 (19%)]\tLoss: 0.138784\n", + "Train Epoch: 0 [12160/60000 (20%)]\tLoss: 0.174099\n", + "Train Epoch: 0 [12800/60000 (21%)]\tLoss: 0.358449\n", + "Train Epoch: 0 [13440/60000 (22%)]\tLoss: 0.234432\n", + "Train Epoch: 0 [14080/60000 (23%)]\tLoss: 0.239579\n", + "Train Epoch: 0 [14720/60000 (25%)]\tLoss: 0.143086\n", + "Train Epoch: 0 [15360/60000 (26%)]\tLoss: 0.464434\n", + "Train Epoch: 0 [16000/60000 (27%)]\tLoss: 0.213992\n", + "Train Epoch: 0 [16640/60000 (28%)]\tLoss: 0.198865\n", + "Train Epoch: 0 [17280/60000 (29%)]\tLoss: 0.426876\n", + "Train Epoch: 0 [17920/60000 (30%)]\tLoss: 0.236948\n", + "Train Epoch: 0 [18560/60000 (31%)]\tLoss: 0.019676\n", + "Train Epoch: 0 [19200/60000 (32%)]\tLoss: 0.365207\n", + "Train Epoch: 0 [19840/60000 (33%)]\tLoss: 0.177355\n", + "Train Epoch: 0 [20480/60000 (34%)]\tLoss: 0.101089\n", + "Train Epoch: 0 [21120/60000 (35%)]\tLoss: 0.217447\n", + "Train Epoch: 0 [21760/60000 (36%)]\tLoss: 0.122373\n", + "Train Epoch: 0 [22400/60000 (37%)]\tLoss: 0.083280\n", + "Train Epoch: 0 [23040/60000 (38%)]\tLoss: 0.044131\n", + "Train Epoch: 0 [23680/60000 (39%)]\tLoss: 0.164748\n", + "Train Epoch: 0 [24320/60000 (41%)]\tLoss: 0.217344\n", + "Train Epoch: 0 [24960/60000 (42%)]\tLoss: 0.168444\n", + "Train Epoch: 0 [25600/60000 (43%)]\tLoss: 0.094563\n", + "Train Epoch: 0 [26240/60000 (44%)]\tLoss: 0.160949\n", + "Train Epoch: 0 [26880/60000 (45%)]\tLoss: 0.190556\n", + "Train Epoch: 0 [27520/60000 (46%)]\tLoss: 0.049234\n", + "Train Epoch: 0 [28160/60000 (47%)]\tLoss: 0.106162\n", + "Train Epoch: 0 [28800/60000 (48%)]\tLoss: 0.104469\n", + "Train Epoch: 0 [29440/60000 (49%)]\tLoss: 0.053504\n", + "Train Epoch: 0 [30080/60000 (50%)]\tLoss: 0.110556\n", + "Train Epoch: 0 [30720/60000 (51%)]\tLoss: 0.322309\n", + "Train Epoch: 0 [31360/60000 (52%)]\tLoss: 0.086979\n", + "Train Epoch: 0 [32000/60000 (53%)]\tLoss: 0.134221\n", + "Train Epoch: 0 [32640/60000 (54%)]\tLoss: 0.201668\n", + "Train Epoch: 0 [33280/60000 (55%)]\tLoss: 0.062239\n", + "Train Epoch: 0 [33920/60000 (57%)]\tLoss: 0.107864\n", + "Train Epoch: 0 [34560/60000 (58%)]\tLoss: 0.082699\n", + "Train Epoch: 0 [35200/60000 (59%)]\tLoss: 0.187130\n", + "Train Epoch: 0 [35840/60000 (60%)]\tLoss: 0.436204\n", + "Train Epoch: 0 [36480/60000 (61%)]\tLoss: 0.216804\n", + "Train Epoch: 0 [37120/60000 (62%)]\tLoss: 0.295533\n", + "Train Epoch: 0 [37760/60000 (63%)]\tLoss: 0.151546\n", + "Train Epoch: 0 [38400/60000 (64%)]\tLoss: 0.109190\n", + "Train Epoch: 0 [39040/60000 (65%)]\tLoss: 0.089795\n", + "Train Epoch: 0 [39680/60000 (66%)]\tLoss: 0.111167\n", + "Train Epoch: 0 [40320/60000 (67%)]\tLoss: 0.148806\n", + "Train Epoch: 0 [40960/60000 (68%)]\tLoss: 0.086549\n", + "Train Epoch: 0 [41600/60000 (69%)]\tLoss: 0.166614\n", + "Train Epoch: 0 [42240/60000 (70%)]\tLoss: 0.076532\n", + "Train Epoch: 0 [42880/60000 (71%)]\tLoss: 0.207414\n", + "Train Epoch: 0 [43520/60000 (72%)]\tLoss: 0.057692\n", + "Train Epoch: 0 [44160/60000 (74%)]\tLoss: 0.135699\n", + "Train Epoch: 0 [44800/60000 (75%)]\tLoss: 0.070328\n", + "Train Epoch: 0 [45440/60000 (76%)]\tLoss: 0.287908\n", + "Train Epoch: 0 [46080/60000 (77%)]\tLoss: 0.181923\n", + "Train Epoch: 0 [46720/60000 (78%)]\tLoss: 0.109931\n", + "Train Epoch: 0 [47360/60000 (79%)]\tLoss: 0.082871\n", + "Train Epoch: 0 [48000/60000 (80%)]\tLoss: 0.336507\n", + "Train Epoch: 0 [48640/60000 (81%)]\tLoss: 0.132857\n", + "Train Epoch: 0 [49280/60000 (82%)]\tLoss: 0.124299\n", + "Train Epoch: 0 [49920/60000 (83%)]\tLoss: 0.064722\n", + "Train Epoch: 0 [50560/60000 (84%)]\tLoss: 0.102338\n", + "Train Epoch: 0 [51200/60000 (85%)]\tLoss: 0.081316\n", + "Train Epoch: 0 [51840/60000 (86%)]\tLoss: 0.023367\n", + "Train Epoch: 0 [52480/60000 (87%)]\tLoss: 0.009987\n", + "Train Epoch: 0 [53120/60000 (88%)]\tLoss: 0.021232\n", + "Train Epoch: 0 [53760/60000 (90%)]\tLoss: 0.234437\n", + "Train Epoch: 0 [54400/60000 (91%)]\tLoss: 0.076132\n", + "Train Epoch: 0 [55040/60000 (92%)]\tLoss: 0.099620\n", + "Train Epoch: 0 [55680/60000 (93%)]\tLoss: 0.153108\n", + "Train Epoch: 0 [56320/60000 (94%)]\tLoss: 0.008535\n", + "Train Epoch: 0 [56960/60000 (95%)]\tLoss: 0.069565\n", + "Train Epoch: 0 [57600/60000 (96%)]\tLoss: 0.345571\n", + "Train Epoch: 0 [58240/60000 (97%)]\tLoss: 0.502152\n", + "Train Epoch: 0 [58880/60000 (98%)]\tLoss: 0.153598\n", + "Train Epoch: 0 [59520/60000 (99%)]\tLoss: 0.009579\n", + "Train Epoch: 1 [0/60000 (0%)]\tLoss: 0.290723\n", + "Train Epoch: 1 [640/60000 (1%)]\tLoss: 0.145595\n", + "Train Epoch: 1 [1280/60000 (2%)]\tLoss: 0.069823\n", + "Train Epoch: 1 [1920/60000 (3%)]\tLoss: 0.064840\n", + "Train Epoch: 1 [2560/60000 (4%)]\tLoss: 0.006950\n", + "Train Epoch: 1 [3200/60000 (5%)]\tLoss: 0.000492\n", + "Train Epoch: 1 [3840/60000 (6%)]\tLoss: 0.194620\n", + "Train Epoch: 1 [4480/60000 (7%)]\tLoss: 0.240464\n", + "Train Epoch: 1 [5120/60000 (9%)]\tLoss: 0.073014\n", + "Train Epoch: 1 [5760/60000 (10%)]\tLoss: 0.177253\n", + "Train Epoch: 1 [6400/60000 (11%)]\tLoss: 0.048263\n", + "Train Epoch: 1 [7040/60000 (12%)]\tLoss: 0.036351\n", + "Train Epoch: 1 [7680/60000 (13%)]\tLoss: 0.011458\n", + "Train Epoch: 1 [8320/60000 (14%)]\tLoss: 0.341599\n", + "Train Epoch: 1 [8960/60000 (15%)]\tLoss: 0.373527\n", + "Train Epoch: 1 [9600/60000 (16%)]\tLoss: 0.047152\n", + "Train Epoch: 1 [10240/60000 (17%)]\tLoss: 0.124424\n", + "Train Epoch: 1 [10880/60000 (18%)]\tLoss: 0.138054\n", + "Train Epoch: 1 [11520/60000 (19%)]\tLoss: 0.314980\n", + "Train Epoch: 1 [12160/60000 (20%)]\tLoss: 0.244121\n", + "Train Epoch: 1 [12800/60000 (21%)]\tLoss: 0.194258\n", + "Train Epoch: 1 [13440/60000 (22%)]\tLoss: 0.092362\n", + "Train Epoch: 1 [14080/60000 (23%)]\tLoss: 0.205115\n", + "Train Epoch: 1 [14720/60000 (25%)]\tLoss: 0.202674\n", + "Train Epoch: 1 [15360/60000 (26%)]\tLoss: 0.189018\n", + "Train Epoch: 1 [16000/60000 (27%)]\tLoss: 0.168465\n", + "Train Epoch: 1 [16640/60000 (28%)]\tLoss: 0.075228\n", + "Train Epoch: 1 [17280/60000 (29%)]\tLoss: 0.024219\n", + "Train Epoch: 1 [17920/60000 (30%)]\tLoss: 0.249284\n", + "Train Epoch: 1 [18560/60000 (31%)]\tLoss: 0.055043\n", + "Train Epoch: 1 [19200/60000 (32%)]\tLoss: 0.199740\n", + "Train Epoch: 1 [19840/60000 (33%)]\tLoss: 0.264624\n", + "Train Epoch: 1 [20480/60000 (34%)]\tLoss: 0.145213\n", + "Train Epoch: 1 [21120/60000 (35%)]\tLoss: 0.182477\n", + "Train Epoch: 1 [21760/60000 (36%)]\tLoss: 0.181954\n", + "Train Epoch: 1 [22400/60000 (37%)]\tLoss: 0.041947\n", + "Train Epoch: 1 [23040/60000 (38%)]\tLoss: 0.165648\n", + "Train Epoch: 1 [23680/60000 (39%)]\tLoss: 0.075048\n", + "Train Epoch: 1 [24320/60000 (41%)]\tLoss: 0.091085\n", + "Train Epoch: 1 [24960/60000 (42%)]\tLoss: 0.267341\n", + "Train Epoch: 1 [25600/60000 (43%)]\tLoss: 0.419169\n", + "Train Epoch: 1 [26240/60000 (44%)]\tLoss: 0.397417\n", + "Train Epoch: 1 [26880/60000 (45%)]\tLoss: 0.059258\n", + "Train Epoch: 1 [27520/60000 (46%)]\tLoss: 0.678994\n", + "Train Epoch: 1 [28160/60000 (47%)]\tLoss: 0.097712\n", + "Train Epoch: 1 [28800/60000 (48%)]\tLoss: 0.078830\n", + "Train Epoch: 1 [29440/60000 (49%)]\tLoss: 0.083803\n", + "Train Epoch: 1 [30080/60000 (50%)]\tLoss: 0.373137\n", + "Train Epoch: 1 [30720/60000 (51%)]\tLoss: 0.317618\n", + "Train Epoch: 1 [31360/60000 (52%)]\tLoss: 0.076827\n", + "Train Epoch: 1 [32000/60000 (53%)]\tLoss: 0.125064\n", + "Train Epoch: 1 [32640/60000 (54%)]\tLoss: 0.057970\n", + "Train Epoch: 1 [33280/60000 (55%)]\tLoss: 0.167010\n", + "Train Epoch: 1 [33920/60000 (57%)]\tLoss: 0.026072\n", + "Train Epoch: 1 [34560/60000 (58%)]\tLoss: 0.160082\n", + "Train Epoch: 1 [35200/60000 (59%)]\tLoss: 0.046618\n", + "Train Epoch: 1 [35840/60000 (60%)]\tLoss: 0.050997\n", + "Train Epoch: 1 [36480/60000 (61%)]\tLoss: 0.370405\n", + "Train Epoch: 1 [37120/60000 (62%)]\tLoss: 0.106518\n", + "Train Epoch: 1 [37760/60000 (63%)]\tLoss: 0.101690\n", + "Train Epoch: 1 [38400/60000 (64%)]\tLoss: 0.064859\n", + "Train Epoch: 1 [39040/60000 (65%)]\tLoss: 0.079881\n", + "Train Epoch: 1 [39680/60000 (66%)]\tLoss: 0.110059\n", + "Train Epoch: 1 [40320/60000 (67%)]\tLoss: 0.067634\n", + "Train Epoch: 1 [40960/60000 (68%)]\tLoss: 0.208821\n", + "Train Epoch: 1 [41600/60000 (69%)]\tLoss: 0.088838\n", + "Train Epoch: 1 [42240/60000 (70%)]\tLoss: 0.079848\n", + "Train Epoch: 1 [42880/60000 (71%)]\tLoss: 0.193431\n", + "Train Epoch: 1 [43520/60000 (72%)]\tLoss: 0.171546\n", + "Train Epoch: 1 [44160/60000 (74%)]\tLoss: 0.178438\n", + "Train Epoch: 1 [44800/60000 (75%)]\tLoss: 0.169155\n", + "Train Epoch: 1 [45440/60000 (76%)]\tLoss: 0.055189\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Train Epoch: 1 [46080/60000 (77%)]\tLoss: 0.028144\n", + "Train Epoch: 1 [46720/60000 (78%)]\tLoss: 0.021224\n", + "Train Epoch: 1 [47360/60000 (79%)]\tLoss: 0.071242\n", + "Train Epoch: 1 [48000/60000 (80%)]\tLoss: 0.010806\n", + "Train Epoch: 1 [48640/60000 (81%)]\tLoss: 0.110852\n", + "Train Epoch: 1 [49280/60000 (82%)]\tLoss: 0.010513\n", + "Train Epoch: 1 [49920/60000 (83%)]\tLoss: 0.364887\n", + "Train Epoch: 1 [50560/60000 (84%)]\tLoss: 0.013409\n", + "Train Epoch: 1 [51200/60000 (85%)]\tLoss: 0.419877\n", + "Train Epoch: 1 [51840/60000 (86%)]\tLoss: 0.067204\n", + "Train Epoch: 1 [52480/60000 (87%)]\tLoss: 0.067301\n", + "Train Epoch: 1 [53120/60000 (88%)]\tLoss: 0.179633\n", + "Train Epoch: 1 [53760/60000 (90%)]\tLoss: 0.168005\n", + "Train Epoch: 1 [54400/60000 (91%)]\tLoss: 0.053020\n", + "Train Epoch: 1 [55040/60000 (92%)]\tLoss: 0.127693\n", + "Train Epoch: 1 [55680/60000 (93%)]\tLoss: 0.000790\n", + "Train Epoch: 1 [56320/60000 (94%)]\tLoss: 0.638172\n", + "Train Epoch: 1 [56960/60000 (95%)]\tLoss: 0.190752\n", + "Train Epoch: 1 [57600/60000 (96%)]\tLoss: 0.001666\n", + "Train Epoch: 1 [58240/60000 (97%)]\tLoss: 0.040600\n", + "Train Epoch: 1 [58880/60000 (98%)]\tLoss: 0.218874\n", + "Train Epoch: 1 [59520/60000 (99%)]\tLoss: 0.056282\n", + "Train Epoch: 2 [0/60000 (0%)]\tLoss: 0.076416\n", + "Train Epoch: 2 [640/60000 (1%)]\tLoss: 0.112750\n", + "Train Epoch: 2 [1280/60000 (2%)]\tLoss: 0.171557\n", + "Train Epoch: 2 [1920/60000 (3%)]\tLoss: 0.014248\n", + "Train Epoch: 2 [2560/60000 (4%)]\tLoss: 0.062175\n", + "Train Epoch: 2 [3200/60000 (5%)]\tLoss: 0.182906\n", + "Train Epoch: 2 [3840/60000 (6%)]\tLoss: 0.135498\n", + "Train Epoch: 2 [4480/60000 (7%)]\tLoss: 0.123534\n", + "Train Epoch: 2 [5120/60000 (9%)]\tLoss: 0.122674\n", + "Train Epoch: 2 [5760/60000 (10%)]\tLoss: 0.111532\n", + "Train Epoch: 2 [6400/60000 (11%)]\tLoss: 0.099646\n", + "Train Epoch: 2 [7040/60000 (12%)]\tLoss: 0.051113\n", + "Train Epoch: 2 [7680/60000 (13%)]\tLoss: 0.227051\n", + "Train Epoch: 2 [8320/60000 (14%)]\tLoss: 0.138824\n", + "Train Epoch: 2 [8960/60000 (15%)]\tLoss: 0.088158\n", + "Train Epoch: 2 [9600/60000 (16%)]\tLoss: 0.103052\n", + "Train Epoch: 2 [10240/60000 (17%)]\tLoss: 0.369061\n", + "Train Epoch: 2 [10880/60000 (18%)]\tLoss: 0.165350\n", + "Train Epoch: 2 [11520/60000 (19%)]\tLoss: 0.142054\n", + "Train Epoch: 2 [12160/60000 (20%)]\tLoss: 0.034043\n", + "Train Epoch: 2 [12800/60000 (21%)]\tLoss: 0.093324\n", + "Train Epoch: 2 [13440/60000 (22%)]\tLoss: 0.129838\n", + "Train Epoch: 2 [14080/60000 (23%)]\tLoss: 0.023088\n", + "Train Epoch: 2 [14720/60000 (25%)]\tLoss: 0.110030\n", + "Train Epoch: 2 [15360/60000 (26%)]\tLoss: 0.355520\n", + "Train Epoch: 2 [16000/60000 (27%)]\tLoss: 0.072964\n", + "Train Epoch: 2 [16640/60000 (28%)]\tLoss: 0.002617\n", + "Train Epoch: 2 [17280/60000 (29%)]\tLoss: 0.415827\n", + "Train Epoch: 2 [17920/60000 (30%)]\tLoss: 0.061368\n", + "Train Epoch: 2 [18560/60000 (31%)]\tLoss: 0.059896\n", + "Train Epoch: 2 [19200/60000 (32%)]\tLoss: 0.006614\n", + "Train Epoch: 2 [19840/60000 (33%)]\tLoss: 0.072400\n", + "Train Epoch: 2 [20480/60000 (34%)]\tLoss: 0.084101\n", + "Train Epoch: 2 [21120/60000 (35%)]\tLoss: 0.060527\n", + "Train Epoch: 2 [21760/60000 (36%)]\tLoss: 0.245168\n", + "Train Epoch: 2 [22400/60000 (37%)]\tLoss: 0.104240\n", + "Train Epoch: 2 [23040/60000 (38%)]\tLoss: 0.039879\n", + "Train Epoch: 2 [23680/60000 (39%)]\tLoss: 0.127886\n", + "Train Epoch: 2 [24320/60000 (41%)]\tLoss: 0.151734\n", + "Train Epoch: 2 [24960/60000 (42%)]\tLoss: 0.156671\n", + "Train Epoch: 2 [25600/60000 (43%)]\tLoss: 0.109427\n", + "Train Epoch: 2 [26240/60000 (44%)]\tLoss: 0.080561\n", + "Train Epoch: 2 [26880/60000 (45%)]\tLoss: 0.092277\n", + "Train Epoch: 2 [27520/60000 (46%)]\tLoss: 0.210802\n", + "Train Epoch: 2 [28160/60000 (47%)]\tLoss: 0.057986\n", + "Train Epoch: 2 [28800/60000 (48%)]\tLoss: 0.124804\n", + "Train Epoch: 2 [29440/60000 (49%)]\tLoss: 0.119243\n", + "Train Epoch: 2 [30080/60000 (50%)]\tLoss: 0.279113\n", + "Train Epoch: 2 [30720/60000 (51%)]\tLoss: 0.214091\n", + "Train Epoch: 2 [31360/60000 (52%)]\tLoss: 0.106701\n", + "Train Epoch: 2 [32000/60000 (53%)]\tLoss: 0.547162\n", + "Train Epoch: 2 [32640/60000 (54%)]\tLoss: 0.377766\n", + "Train Epoch: 2 [33280/60000 (55%)]\tLoss: 0.128663\n", + "Train Epoch: 2 [33920/60000 (57%)]\tLoss: 0.078428\n", + "Train Epoch: 2 [34560/60000 (58%)]\tLoss: 0.096952\n", + "Train Epoch: 2 [35200/60000 (59%)]\tLoss: 0.047050\n", + "Train Epoch: 2 [35840/60000 (60%)]\tLoss: 0.106311\n", + "Train Epoch: 2 [36480/60000 (61%)]\tLoss: 0.092369\n", + "Train Epoch: 2 [37120/60000 (62%)]\tLoss: 0.038745\n", + "Train Epoch: 2 [37760/60000 (63%)]\tLoss: 0.474230\n", + "Train Epoch: 2 [38400/60000 (64%)]\tLoss: 0.213040\n", + "Train Epoch: 2 [39040/60000 (65%)]\tLoss: 0.665591\n", + "Train Epoch: 2 [39680/60000 (66%)]\tLoss: 0.068594\n", + "Train Epoch: 2 [40320/60000 (67%)]\tLoss: 0.036250\n", + "Train Epoch: 2 [40960/60000 (68%)]\tLoss: 0.144957\n", + "Train Epoch: 2 [41600/60000 (69%)]\tLoss: 0.355639\n", + "Train Epoch: 2 [42240/60000 (70%)]\tLoss: 0.198450\n", + "Train Epoch: 2 [42880/60000 (71%)]\tLoss: 0.221584\n", + "Train Epoch: 2 [43520/60000 (72%)]\tLoss: 0.043087\n", + "Train Epoch: 2 [44160/60000 (74%)]\tLoss: 0.053449\n", + "Train Epoch: 2 [44800/60000 (75%)]\tLoss: 0.244004\n", + "Train Epoch: 2 [45440/60000 (76%)]\tLoss: 0.051597\n", + "Train Epoch: 2 [46080/60000 (77%)]\tLoss: 0.018794\n", + "Train Epoch: 2 [46720/60000 (78%)]\tLoss: 0.047302\n", + "Train Epoch: 2 [47360/60000 (79%)]\tLoss: 0.233751\n", + "Train Epoch: 2 [48000/60000 (80%)]\tLoss: 0.523653\n", + "Train Epoch: 2 [48640/60000 (81%)]\tLoss: 0.011048\n", + "Train Epoch: 2 [49280/60000 (82%)]\tLoss: 0.185908\n", + "Train Epoch: 2 [49920/60000 (83%)]\tLoss: 0.085652\n", + "Train Epoch: 2 [50560/60000 (84%)]\tLoss: 0.065321\n", + "Train Epoch: 2 [51200/60000 (85%)]\tLoss: 0.174393\n", + "Train Epoch: 2 [51840/60000 (86%)]\tLoss: 0.031607\n", + "Train Epoch: 2 [52480/60000 (87%)]\tLoss: 0.174475\n", + "Train Epoch: 2 [53120/60000 (88%)]\tLoss: 0.217395\n", + "Train Epoch: 2 [53760/60000 (90%)]\tLoss: 0.061645\n", + "Train Epoch: 2 [54400/60000 (91%)]\tLoss: 0.141715\n", + "Train Epoch: 2 [55040/60000 (92%)]\tLoss: 0.198288\n", + "Train Epoch: 2 [55680/60000 (93%)]\tLoss: 0.254158\n", + "Train Epoch: 2 [56320/60000 (94%)]\tLoss: 0.110041\n", + "Train Epoch: 2 [56960/60000 (95%)]\tLoss: 0.270937\n", + "Train Epoch: 2 [57600/60000 (96%)]\tLoss: 0.070328\n", + "Train Epoch: 2 [58240/60000 (97%)]\tLoss: 0.024610\n", + "Train Epoch: 2 [58880/60000 (98%)]\tLoss: 0.236358\n", + "Train Epoch: 2 [59520/60000 (99%)]\tLoss: 0.117915\n", + "Train Epoch: 3 [0/60000 (0%)]\tLoss: 0.146749\n", + "Train Epoch: 3 [640/60000 (1%)]\tLoss: 0.039942\n", + "Train Epoch: 3 [1280/60000 (2%)]\tLoss: 0.005945\n", + "Train Epoch: 3 [1920/60000 (3%)]\tLoss: 0.118340\n", + "Train Epoch: 3 [2560/60000 (4%)]\tLoss: 0.212263\n", + "Train Epoch: 3 [3200/60000 (5%)]\tLoss: 0.108361\n", + "Train Epoch: 3 [3840/60000 (6%)]\tLoss: 0.123859\n", + "Train Epoch: 3 [4480/60000 (7%)]\tLoss: 0.151609\n", + "Train Epoch: 3 [5120/60000 (9%)]\tLoss: 0.190431\n", + "Train Epoch: 3 [5760/60000 (10%)]\tLoss: 0.044887\n", + "Train Epoch: 3 [6400/60000 (11%)]\tLoss: 0.118531\n", + "Train Epoch: 3 [7040/60000 (12%)]\tLoss: 0.175035\n", + "Train Epoch: 3 [7680/60000 (13%)]\tLoss: 0.116727\n", + "Train Epoch: 3 [8320/60000 (14%)]\tLoss: 0.233833\n", + "Train Epoch: 3 [8960/60000 (15%)]\tLoss: 0.088643\n", + "Train Epoch: 3 [9600/60000 (16%)]\tLoss: 0.384525\n", + "Train Epoch: 3 [10240/60000 (17%)]\tLoss: 0.043991\n", + "Train Epoch: 3 [10880/60000 (18%)]\tLoss: 0.851141\n", + "Train Epoch: 3 [11520/60000 (19%)]\tLoss: 0.094810\n", + "Train Epoch: 3 [12160/60000 (20%)]\tLoss: 0.083756\n", + "Train Epoch: 3 [12800/60000 (21%)]\tLoss: 0.222417\n", + "Train Epoch: 3 [13440/60000 (22%)]\tLoss: 0.448306\n", + "Train Epoch: 3 [14080/60000 (23%)]\tLoss: 0.037722\n", + "Train Epoch: 3 [14720/60000 (25%)]\tLoss: 0.318071\n", + "Train Epoch: 3 [15360/60000 (26%)]\tLoss: 0.241556\n", + "Train Epoch: 3 [16000/60000 (27%)]\tLoss: 0.034820\n", + "Train Epoch: 3 [16640/60000 (28%)]\tLoss: 0.154392\n", + "Train Epoch: 3 [17280/60000 (29%)]\tLoss: 0.103088\n", + "Train Epoch: 3 [17920/60000 (30%)]\tLoss: 0.309260\n", + "Train Epoch: 3 [18560/60000 (31%)]\tLoss: 0.129015\n", + "Train Epoch: 3 [19200/60000 (32%)]\tLoss: 0.081739\n", + "Train Epoch: 3 [19840/60000 (33%)]\tLoss: 0.194573\n", + "Train Epoch: 3 [20480/60000 (34%)]\tLoss: 0.100797\n", + "Train Epoch: 3 [21120/60000 (35%)]\tLoss: 0.130121\n", + "Train Epoch: 3 [21760/60000 (36%)]\tLoss: 0.148123\n", + "Train Epoch: 3 [22400/60000 (37%)]\tLoss: 0.107668\n", + "Train Epoch: 3 [23040/60000 (38%)]\tLoss: 0.118747\n", + "Train Epoch: 3 [23680/60000 (39%)]\tLoss: 0.145568\n", + "Train Epoch: 3 [24320/60000 (41%)]\tLoss: 0.228613\n", + "Train Epoch: 3 [24960/60000 (42%)]\tLoss: 0.125414\n", + "Train Epoch: 3 [25600/60000 (43%)]\tLoss: 0.083142\n", + "Train Epoch: 3 [26240/60000 (44%)]\tLoss: 0.394818\n", + "Train Epoch: 3 [26880/60000 (45%)]\tLoss: 0.045244\n", + "Train Epoch: 3 [27520/60000 (46%)]\tLoss: 0.005072\n", + "Train Epoch: 3 [28160/60000 (47%)]\tLoss: 0.115797\n", + "Train Epoch: 3 [28800/60000 (48%)]\tLoss: 0.095257\n", + "Train Epoch: 3 [29440/60000 (49%)]\tLoss: 0.005111\n", + "Train Epoch: 3 [30080/60000 (50%)]\tLoss: 0.110229\n", + "Train Epoch: 3 [30720/60000 (51%)]\tLoss: 0.082010\n", + "Train Epoch: 3 [31360/60000 (52%)]\tLoss: 0.055340\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Train Epoch: 3 [32000/60000 (53%)]\tLoss: 0.195543\n", + "Train Epoch: 3 [32640/60000 (54%)]\tLoss: 0.150267\n", + "Train Epoch: 3 [33280/60000 (55%)]\tLoss: 0.177324\n", + "Train Epoch: 3 [33920/60000 (57%)]\tLoss: 0.038098\n", + "Train Epoch: 3 [34560/60000 (58%)]\tLoss: 0.036462\n", + "Train Epoch: 3 [35200/60000 (59%)]\tLoss: 0.076419\n", + "Train Epoch: 3 [35840/60000 (60%)]\tLoss: 0.094155\n", + "Train Epoch: 3 [36480/60000 (61%)]\tLoss: 0.276453\n", + "Train Epoch: 3 [37120/60000 (62%)]\tLoss: 0.013989\n", + "Train Epoch: 3 [37760/60000 (63%)]\tLoss: 0.033511\n", + "Train Epoch: 3 [38400/60000 (64%)]\tLoss: 0.053062\n", + "Train Epoch: 3 [39040/60000 (65%)]\tLoss: 0.002972\n", + "Train Epoch: 3 [39680/60000 (66%)]\tLoss: 0.044364\n", + "Train Epoch: 3 [40320/60000 (67%)]\tLoss: 0.201063\n", + "Train Epoch: 3 [40960/60000 (68%)]\tLoss: 0.239112\n", + "Train Epoch: 3 [41600/60000 (69%)]\tLoss: 0.301890\n", + "Train Epoch: 3 [42240/60000 (70%)]\tLoss: 0.209023\n", + "Train Epoch: 3 [42880/60000 (71%)]\tLoss: 0.340197\n", + "Train Epoch: 3 [43520/60000 (72%)]\tLoss: 0.051514\n", + "Train Epoch: 3 [44160/60000 (74%)]\tLoss: 0.163148\n", + "Train Epoch: 3 [44800/60000 (75%)]\tLoss: 0.035544\n", + "Train Epoch: 3 [45440/60000 (76%)]\tLoss: 0.105758\n", + "Train Epoch: 3 [46080/60000 (77%)]\tLoss: 0.091835\n", + "Train Epoch: 3 [46720/60000 (78%)]\tLoss: 0.218505\n", + "Train Epoch: 3 [47360/60000 (79%)]\tLoss: 0.212545\n", + "Train Epoch: 3 [48000/60000 (80%)]\tLoss: 0.001972\n", + "Train Epoch: 3 [48640/60000 (81%)]\tLoss: 0.165325\n", + "Train Epoch: 3 [49280/60000 (82%)]\tLoss: 0.099900\n", + "Train Epoch: 3 [49920/60000 (83%)]\tLoss: 0.475469\n", + "Train Epoch: 3 [50560/60000 (84%)]\tLoss: 0.102674\n", + "Train Epoch: 3 [51200/60000 (85%)]\tLoss: 0.067554\n", + "Train Epoch: 3 [51840/60000 (86%)]\tLoss: 0.376874\n", + "Train Epoch: 3 [52480/60000 (87%)]\tLoss: 0.133132\n", + "Train Epoch: 3 [53120/60000 (88%)]\tLoss: 0.042010\n", + "Train Epoch: 3 [53760/60000 (90%)]\tLoss: 0.008966\n", + "Train Epoch: 3 [54400/60000 (91%)]\tLoss: 0.073707\n", + "Train Epoch: 3 [55040/60000 (92%)]\tLoss: 0.128305\n", + "Train Epoch: 3 [55680/60000 (93%)]\tLoss: 0.039086\n", + "Train Epoch: 3 [56320/60000 (94%)]\tLoss: 0.176628\n", + "Train Epoch: 3 [56960/60000 (95%)]\tLoss: 0.025344\n", + "Train Epoch: 3 [57600/60000 (96%)]\tLoss: 0.137705\n", + "Train Epoch: 3 [58240/60000 (97%)]\tLoss: 0.035565\n", + "Train Epoch: 3 [58880/60000 (98%)]\tLoss: 0.165229\n", + "Train Epoch: 3 [59520/60000 (99%)]\tLoss: 0.070512\n" + ] + } + ], + "source": [ + "model = create_net(tornasole_save_interval=100, base_loc='./ts_output', run_id='good')\n", + "train(model=model, epochs=4, learning_rate=0.1, momentum=0.9, batch_size=64, device = torch.device(\"cpu\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Data Analysis\n", + "Now that we have trained the system we can analyze the data. Notice that this notebook focuses on after-the-fact analysis. Tornasole also provides a collection of tools to do automatic analysis as the training run is progressing, which will be covered in a different notebook." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We import a basic analysis library, which defines a concept of `Trial`. A `Trial` is a single training run, which is depositing values in a local directory (`LocalTrial`) or S3 (`S3Trial`). In this case we are using a `LocalTrial` - if you wish, you can change the output from `./ts_output` to `s3://mybucket/myprefix` and use `S3Trial` instead of `LocalTrial`." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "from tornasole.trials import LocalTrial" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And we read the data" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "good_trial = LocalTrial('myrun', './ts_output/good')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can list all the tensors we know something about. Each one of these names is the name of a tensor - the name is a combination of the layer name (which, in these cases, is auto-assigned by PyTorch) and whether it's an input/output/weight/bias/gradient." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['Net_conv1.weight',\n", + " 'Net_conv1.bias',\n", + " 'Net_conv2.weight',\n", + " 'Net_conv2.bias',\n", + " 'Net_fc1.weight',\n", + " 'Net_fc1.bias',\n", + " 'Net_fc2.weight',\n", + " 'Net_fc2.bias',\n", + " 'conv1_input_0',\n", + " 'conv1_output0',\n", + " 'conv2_input_0',\n", + " 'conv2_output0',\n", + " 'fc1_input_0',\n", + " 'fc1_output0',\n", + " 'fc2_input_0',\n", + " 'fc2_output0',\n", + " 'Net_input_0',\n", + " 'Net_output0',\n", + " 'gradient/Net_fc2.bias',\n", + " 'gradient/Net_fc2.weight',\n", + " 'gradient/Net_fc1.bias',\n", + " 'gradient/Net_fc1.weight',\n", + " 'gradient/Net_conv2.weight',\n", + " 'gradient/Net_conv2.bias',\n", + " 'gradient/Net_conv1.weight',\n", + " 'gradient/Net_conv1.bias']" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "good_trial.tensors()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For each tensor we can ask for which steps we have data - in this case, every 100 steps" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[0,\n", + " 100,\n", + " 200,\n", + " 300,\n", + " 400,\n", + " 500,\n", + " 600,\n", + " 700,\n", + " 800,\n", + " 900,\n", + " 1000,\n", + " 1100,\n", + " 1200,\n", + " 1300,\n", + " 1400,\n", + " 1500,\n", + " 1600,\n", + " 1700,\n", + " 1800,\n", + " 1900,\n", + " 2000,\n", + " 2100,\n", + " 2200,\n", + " 2300,\n", + " 2400,\n", + " 2500,\n", + " 2600,\n", + " 2700,\n", + " 2800,\n", + " 2900,\n", + " 3000,\n", + " 3100,\n", + " 3200,\n", + " 3300,\n", + " 3400,\n", + " 3500,\n", + " 3600,\n", + " 3700]" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "good_trial.tensor('gradient/Net_fc1.weight').steps()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can obtain each tensor at each step as a `numpy` array" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "numpy.ndarray" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "type(good_trial.tensor('gradient/Net_fc1.weight').step(300).value)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Gradient Analysis" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also create a simple function that prints the `np.mean` of the `np.abs` of each gradient. We expect each gradient to get smaller over time, as the system converges to a good solution. Now, remember that this is an interactive analysis - we are showing these tensors to give an idea of the data. \n", + "\n", + "Later on in this notebook we will run an automated analysis." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "# Define a function that, for the given tensor name, walks through all \n", + "# the batches for which we have data and computes mean(abs(tensor)).\n", + "# Returns the set of steps and the values\n", + "\n", + "def get_data(trial, tname):\n", + " tensor = trial.tensor(tname)\n", + " steps = tensor.steps()\n", + " vals = []\n", + " for s in steps:\n", + " val = tensor.step(s).value\n", + " val = np.mean(np.abs(val))\n", + " vals.append(val)\n", + " return steps, vals" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "def plot_gradients( lt ):\n", + " for tname in lt.tensors():\n", + " if not 'gradient' in tname: continue\n", + " steps, data = get_data(lt, tname)\n", + " plt.plot( steps, data, label=tname)\n", + " plt.legend()\n", + " plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can plot these gradiends. Notice how they are (mostly!) decreasing. We should investigate the spikes!" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAD4CAYAAADiry33AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOydeXhURdq37+rurAQIYRXCKgFZDAHCpiTCIJvgAgM6qOMGoiOKjjrzynyXAg6oI7wziMurqIgjjrLpiAuyKAjIEhKJCkEIhEASAoTs6aTTy6nvj04fujudpLOnk3NfFxfdZ61zTudXdZ566ldCSomGhoaGRvNF19gF0NDQ0NCoXzSh19DQ0GjmaEKvoaGh0czRhF5DQ0OjmaMJvYaGhkYzx9DYBXCnQ4cOslevXo1dDA0NDQ2fIiEh4YqUsqOndU1O6Hv16kV8fHxjF0NDQ0PDpxBCnKtonRa60dDQ0GjmaEKvoaGh0czRhF5DQ0OjmdPkYvSesFgspKenYzKZGrsoGhqVEhgYSHh4OH5+fo1dFA0NFZ8Q+vT0dFq3bk2vXr0QQjR2cTQ0PCKlJDs7m/T0dHr37t3YxdHQUPGJ0I3JZKJ9+/aayGs0aYQQtG/fXnvz1Ghy+ITQA5rIa/gE2u9UoyniM0LfEJjNZsxmc2MXQ0NDQ6NO0YTeifz8fAoKChq7GBoaGhp1iib0TiiKgqIoDXKuXr16ceXKFQBuuOGGGh9n3bp1XLhwwWXZp59+yvLly1m3bh06nY5ffvlFXTd48GBSU1MrPeaqVasoLi6udJtNmzYxYMAAxo8fX+E299xzD/3792fw4ME89NBDWCyWctvs2bOH6dOne9z/lltuIS8vr9JyaGhoVI0m9E5IKWsl9FartUb7HThwoMbn9CT027ZtY8qUKQCEh4ezfPnyah3TG6F///33effdd9m9e3eF29xzzz389ttv/Prrr5SUlPDee+9VqxzffPMNoaGh1dpHQ0OjPD6RXunM0i+Pk3ShbsMrA7u24YXpA1WRl1J67FT7+9//zvr16+nYsSPdu3dn+PDhfPXVV0RFRbF//37mzJlDv379WLZsGWazmfbt2/Pxxx/TuXNnsrOzmTNnDhkZGYwZMwbnKRxDQkIoKioCYMWKFWzcuJHS0lJmzJjB0qVLSU1NZerUqYwdO5YDBw7QrVs3vvjiC77++mvi4+O55557CAoK4uDBgwQGBpKYmMiwYcP49ddfmT59Onv37uXkyZP079/f5Xp27NjB4sWLKS0t5dprr+WDDz5g7dq1XLhwgfHjx9OhQwePQv7iiy+yf/9+5s6dy2233cYrr7zC//zP//Dtt9+i0+l4+OGHeeKJJ7jlllvUfUaOHEl6errH+19QUMC0adM4ffo048eP56233kKn06m+Rx06dOCOO+4gLS0Nk8nEk08+yfz587HZbMydO5f4+HiEEDz00EP8+c9/rv4PQEOjmaO16MtwFl5PrfojR46wZcsWfv75Z7Zt2+ZivGY2m4mPj+eZZ55h7NixHDp0iKNHj/KHP/yBV199FYClS5cyduxYjh8/zowZMzh//ny5c+zYsYPk5GTi4uJITEwkISGBvXv3ApCcnMyCBQs4fvw4oaGhbNmyhVmzZhEdHc3HH39MYmIiQUFBHD16lCFDhqgVlU6n469//SsvvfSSy7muXLnCsmXL2LVrFz/99BPR0dH885//ZOHChXTt2pXdu3dX2Fp/4YUX1POuWLGCNWvWkJqaSmJiIr/88gv33HOPy/YWi4WPPvpIfctwJy4ujtdff52kpCTOnDnDZ599Vm6btWvXkpCQQHx8PKtXryY7O5vExEQyMjI4duwYv/76Kw8++KDH42totHR8rkW/+NZB9XJc57CLoijo9XqX9T/++CO33347gYGBBAYGcuutt6rr7rrrLvVzeno6d911F5mZmZjNZnXgzN69e1UBmzZtGu3atStXhh07drBjxw6GDh0KQFFREcnJyfTo0YPevXsTFRUFwPDhwyuMs3/77bdMnTrVZdndd9/N8uXLOXv2rLrs0KFDJCUlceONNwL2ymrMmDGV36QK2LVrF48++igGg/3nFBYW5rL+scceIzY2lpiYGI/7jxw5kj59+gAwZ84c9u/fz6xZs1y2Wb16NZ9//jkAaWlpJCcn079/f1JSUnjiiSeYNm0akyZNqlH5NTSaOz4n9PVFVS36ymjVqpX6+YknnuDpp5/mtttuY8+ePSxZsqRaZVi0aBGPPPKIy/LU1FQCAgLU73q9npKSEo/H2LFjB1u2bHFZZjAYeOaZZ/jHP/7hcq6JEyfyySefeF2+mrB06VKysrJ45513KtzGPUzm/n3Pnj3s2rWLgwcPEhwczLhx4zCZTLRr146ff/6Z7du38/bbb7Nx40bWrl1bL9ehoeHLaKGbMpzF3Vn0Hdx44418+eWXmEwmioqK+OqrrzweJz8/n27dugHw4YcfqstjY2P5z3/+A9g7S3Nzc8vtO3nyZNauXavG6zMyMrh8+XKl5W7dujWFhYXqua1WK+3bty+33QMPPMCuXbvIysoCYPTo0fz444+cPn0aAKPRyKlTp8od0xsmTpzIO++8o74V5eTkAPDee++xfft2PvnkE3S6in9qcXFxnD17FkVR2LBhA2PHjnVZn5+fT7t27QgODua3337j0KFDgD38pCgKv//971m2bBk//fST12XW0GhJaEJfhrPQe2rRjxgxgttuu43IyEimTp3K9ddfT9u2bcttt2TJEmbPns3w4cPp0KGDunzx4sXs3buXQYMG8dlnn9GjR49y+06aNIm7776bMWPGcP311zNr1qwqBfeBBx7g0UcfJSoqiq1bt3LzzTd73M7f35+FCxeqFUfHjh1Zt24dc+bMITIykjFjxvDbb78BMH/+fKZMmVJp6qQz8+bNo0ePHkRGRjJkyBC1Qnv00Ue5dOkSY8aMISoqihdffBGA+Ph45s2bp+4/YsQIHn/8cQYMGEDv3r2ZMWOGy/GnTJmC1WplwIABPPfcc4wePRqwV4Tjxo0jKiqKe++9l5dfftmr8mpotDSEp9ZrYxIdHS3dZ5g6ceIEAwYMqNfzGo1G8vPzAWjTpg0hISHltikqKiIkJITi4mJiY2NZs2YNw4YNq9dyVYd58+Yxb948VQg1GoeG+L1qaLgjhEiQUkZ7WudVi14IMUUIcVIIcVoI8ZyH9QFCiA1l6w8LIXq5re8hhCgSQjxbkwtoCLyJ0c+fP5+oqCiGDRvG73//+yYl8mAPlWgir6Gh4U6VnbFCCD3wJjARSAeOCCG2SimTnDabC+RKKfsKIf4A/AO4y2n9P4FtdVfsusch7jqdrkKhd4QkWhKjRo2itLTUZdlHH33E9ddf30gl0tDQqC7eZN2MBE5LKVMAhBCfArcDzkJ/O7Ck7PNm4A0hhJBSSiHEHcBZwFhnpa4HFEVBp9Oh0+mw2WyNXZwmw+HDhxu7CBoaGrXEm9BNNyDN6Xt62TKP20gprUA+0F4IEQL8D7C0shMIIeYLIeKFEPGOrJCGRlEUhBDodDqPWTcaGhoavkp9Z90sAf4lpSyqbCMp5RopZbSUMrpjx471XKQKy6C26BvK2ExDQ0OjIfAmdJMBdHf6Hl62zNM26UIIA9AWyAZGAbOEEK8CoYAihDBJKd+odcnrGOfQjSb0GhoazQlvhP4IECGE6I1d0P8A3O22zVbgfuAgMAv4XtrjH+qYdyHEEqCoKYo8XLU9cAh9RcZmGhoaGr5GlaGbspj748B24ASwUUp5XAjxohDitrLN3scekz8NPA2US8Fs6ji36B3f6xPNj772eONXP27cONzHZQAkJibyzTff1Gl5NDSaKl7F6KWU30gp+0kpr5VSLi9b9oKUcmvZZ5OUcraUsq+UcqQjQ8ftGEuklCvrtvh1g5TSJUbvWFZdND96V2rrR18VtfGr14ReoyXhe6Zm256Di7/W7TG7DIYhT1bZotf86O3UpR/9ggULmDx5MrfddhszZsygXbt2rF27lrVr13LmzBmWL1/O+vXrWb16NWazmVGjRvHWW2+h1+td/Oo9PZtnn7WPz9u0aROPPfYYeXl5vP/++4waNYoXXniBkpIS9u/fz6JFi1wcSDU0mhua1w1XW++VCb3mR3+VuvSjj4mJYd++fYDduyYpyT48Y9++fcTGxnLixAk2bNjAjz/+SGJiInq9no8//tjrZwP2N624uDhWrVrF0qVL8ff358UXX+Suu+4iMTFRE3mNZo/vteinvlLnh7SazXDlippHD+WFXvOjr5ja+NHHxMSwatUqkpKSGDhwILm5uWRmZnLw4EFWr17Nhx9+SEJCAiNGjACgpKSETp06uRyjsmcDMHPmTKDy+6ah0ZzxPaGvB7xp0VeG5kdfMVX50Xfr1o28vDy+/fZbYmNjycnJYePGjYSEhNC6dWuklNx///21cqZ03Du9Xl/jfhQNDV9GC93g6nPjCHm4C73mR18xtfWjHz16NKtWrVJb/StXrlRb/xMmTGDz5s3qfcjJyeHcuXMu+3v7bJyp7jVqaPgymtBzVdSFEGr4xl3oNT/6iqmtH31MTAxWq5W+ffsybNgwcnJyVKEfOHAgy5YtY9KkSURGRjJx4kQyMzNdzu/ts3Fm/PjxJCUlERUVxYYNG7y6Tg0Nn8WRWthU/g0fPly6k5SUVG5ZXVJQUCAzMjKkzWaTUkp56dIlmZ2dXW67wsJCKaWURqNRDh8+XCYkJNRruarL3Llz5cGDBxu7GI1CU3o29f171dDwBBAvK9BVLUbP1Ri9c6aKpxj9/PnzSUpKwmQycf/99zdJP/qWSlN/NhoajYkm9FwdFess9J467TQ/ejtN0Y++JT4bDQ1v0YSeq0LvQDM2u4rmR6+h4ftonbFc9aJ34GxspqGhoeHraELPVS96B7Xxu9HQ0NBoamhCj+fQjWO5hoaGhq+jCT2a0GtoaDRvWrzQO/JM3WP0UL9C3xL86N944w369u2LEEK9Vnf27NnD9OnTPa7zxm9eQ0Ojalq80DvbHzioqdBrfvSu3HjjjezatYuePXtW6/wOauM3r6GhcRWfS6/8R9w/+C3ntzo7npSS7oHdWTRqkbqsIqFvLn70paWlSCnZuXNnvfrRO1w4q6KgoIBp06Zx+vRpxo8fz1tvvYVOp3Pxm7/jjjtIS0vDZDLx5JNPMn/+fGw2G3PnziU+Ph4hBA899BB//vOfq/cD0NBoAbT4Fr0D5xa9J2Oz5uJHL6UkLy+P9PT0BvOjr4q4uDhef/11kpKSOHPmjGrn7MzatWtJSEggPj6e1atXk52dTWJiIhkZGRw7doxff/2VBx98sFrn1dBoKfhci/5/Rv5PnR7PZDKRk5PjEqP3ZGzW3PzoDx8+3GB+9FUxcuRI+vTpA8CcOXPYv38/s2bNctlm9erVfP755wCkpaWRnJxM//79SUlJ4YknnmDatGlMmjSpRuXX0Gju+JzQ1zWOEIq7jW51Rsf6oh+9oigN4kfvDc6VrKfve/bsYdeuXRw8eJDg4GDGjRuHyWSiXbt2/Pzzz2zfvp23336bjRs3snbt2oYsuoaGT9DiQzeeOmMd352Fvrn50UdHR9e7H723xMXFcfbsWRRFYcOGDYwdO9ZlfX5+Pu3atSM4OJjffvuNQ4cOAfbpEBVF4fe//z3Lli3jp59+qtZ5NTRaCprQO3nRO+Mu9M3Fj94h9GFhYfXuR7969WrCw8NJT08nMjJS9aB396MfMWIEjz/+OAMGDKB3797MmDHD5fhTpkzBarUyYMAAnnvuOUaPHg3YK8Jx48YRFRXFvffeW6tZqDQ0mjOiqQ3zj46Olu6TO584cYIBAwbUy/ny8/MxGo107drVZXleXh4mk4kuXbqoy4qKiggJCaG4uJjY2FjWrFnTpOxw582bx7x581Qh9ERBQQFFRUUEBgZWO5au4R31+XvV0KgIIUSClDLa0zotRu/mc+PA2djM0dpv6p7n3vjRO95SmloFr6GhUX+0eKF3tz9w4Gxs5hD65uB57hB6bzuafcWPXkNDo2I0oa9C6Cta76s4WvLeCr3mR6+h4fs0HwWrIe5e9A6aq7GZFrrR0Gh5tHihryxGD81X6LWJVTQ0Wg4tXui9Cd00J5zFXRN6DY2WQYsWeodFcUsReimlS6hKE3oNjZZBixb6igZLOS+rL6FvDD96KSW/+93v1O0ruraG9KOvDd741Y8bNw73cRkAiYmJfPPNN3VeJg2Npogm9JS3PwDPxmZV0dT96B0teL1eD9RO6BvCj74qauNXrwm9RkvC59IrL770EqUn6saPXpESi8WCHDSI4BeeL7feXeh93Y/e4RDpEPqdO3eyfPnyRvWjX7BgAZMnT+a2225jxowZtGvXjrVr17J27VrOnDnD8uXLWb9+PatXr8ZsNjNq1Cjeeust9Hq9i1+9p2fz7LPPAva3j8cee4y8vDzef/99Ro0axQsvvEBJSQn79+9n0aJFLg6kGhrNjRbdoqdMbD2FbsBV6JuDH73jWvR6PTk5ObzyyiuN7kcfExPDvn37ALt3TVJSEgD79u0jNjaWEydOsGHDBn788UcSExPR6/V8/PHHLseo7NmA/U0rLi6OVatWsXTpUvz9/XnxxRe56667SExM1EReo9njcy36Ln/7W50dq7i4mLy8PDp27OhxvU6nU8MxzcmPXq/Xc+TIEU6cONHofvQxMTGsWrWKpKQkBg4cSG5uLpmZmRw8eJDVq1fz4YcfkpCQwIgRIwAoKSmhU6dOLseo7NkAzJw5E6j8vmloNGd8Tujrkoq86B14G6P3FT/6lStXAvbrklIyfvx4Nm/e7HX56oNu3bqRl5fHt99+S2xsLDk5OWzcuJGQkBBat26NlJL777+/Vs6Ujnun1+tr3I+ioeHLtOjQTWWdsY7ljoFFzcGPfvfu3WRnZ6PT6YiOjubw4cNNwo9+9OjRrFq1itjYWGJiYli5ciUxMTEATJgwgc2bN6v3IScnh3Pnzrns7+2zcaa616ih4ct4JfRCiClCiJNCiNNCiOc8rA8QQmwoW39YCNGrbPlIIURi2b+fhRAz3PdtTBw55ZXF6MHe0m4OfvSPPPIIV65cQafT0aFDB954440m4UcfExOD1Wqlb9++DBs2jJycHFXoBw4cyLJly5g0aRKRkZFMnDiRzMxMl/N7+2ycGT9+PElJSURFRbFhwwavrlNDw2dxDBqq6B+gB84AfQB/4GdgoNs2jwFvl33+A7Ch7HMwYCj7fA1w2fG9on/Dhw+X7iQlJZVbVhfk5OTIzMzMCtcbjUaZkZEhLRaLlFLKwsJCdfnw4cNlQkJCvZSrpsydO1cePHiwwvV5eXnywoULUkopL126JK9cudJQRat3mtKzqa/fq4ZGZQDxsgJd9SZGPxI4LaVMARBCfArcDiQ5bXM7sKTs82bgDSGEkFI6J2MHAk1qKGZVzpTuo2N93Y/e+XodcfrmQlN/NhoajYk3Qt8NSHP6ng6MqmgbKaVVCJEPtAeuCCFGAWuBnsAfpZTlesOEEPOB+YDHkEZ9ISuwP3DgLvS+7kfvbH8ghMBms1W5j6/40fv6s9HQqE/qPetGSnkYGCSEGAB8KITYJqU0uW2zBlgD9qkE67tMDhRFUVMCPdHc/G6cKzbn1NHK0PzoNTR8H286YzOA7k7fw8uWedxGCGEA2gLZzhtIKU8ARcDgmha2rqnIi95BcxN699BNc7kuDQ2NyvFG6I8AEUKI3kIIf+ydrVvdttkK3F/2eRbwvZRSlu1jABBC9ASuA1LrpOR1QFWhm/o2NmtoPMXom1OcXkNDwzNVhm7KYu6PA9uxZ+CslVIeF0K8iL2XdyvwPvCREOI0kIO9MgAYCzwnhLAACvCYlLLubQxrgEPkqhL65tTylU7z3zpXYg7vGw0NjeaJVzF6KeU3wDduy15w+mwCZnvY7yPgo1qWsV6oarCUg+Yi9O4Vm/MYAQ0NjeZNix0ZW5kXvTP1JfQN7UfvuIZRo0aRmppaaf+DNzbF9UliYiJjxoxh0KBBREZG1vmApvj4eBYuXFjpNqmpqQwe7Lk7ydM919Boyvic182+jae4klZU6+MoUsFqsWAwXKBTzzbE3NnP43beZqeA3SWxsiyeiqitH/3gwYPp2rWrumzbtm0sXLiQX3/9VfWjX79+vct+lfU/rFq1invvvZfg4OAal6s2BAcH8+9//5uIiAguXLjA8OHDmTx5co29592Jjo4mOjq6xvt7uucaGk2ZFtuidwzdqqJB79Ki//vf/07//v0ZO3Ysc+bMYeXKlYwbN46nnnqK6OhoXnvtNb788ktGjRrF0KFDufnmm7l06RIA2dnZTJo0iUGDBjFv3rxyfvQOVqxYwYgRI4iMjGTx4sWAvXU5YMAAHn74YQYNGsSkSZMoKSlh8+bNqh99VFQUJSUlSClVP3qA6dOnc/z4cdXewNm++IcffuCmm25i2LBhzJ49m6KiIlavXq360VdmgfDtt98ybNgwhgwZwoQJEwC7D80dd9xBZGQko0ePVme2WrJkCQ899BDjxo2jT58+rF69GoDnnnuON998Uz3mkiVLWLlyJf369SMiIgKArl270qlTJ7KyslzOb7PZ6N27N1JK8vLy0Ov1qqVzbGwsycnJGI1GHnroIUaOHMnQoUP54osvANizZw/Tp08HICsri4kTJ6rPpWfPnuqbls1m8+qea2g0eSoaMttY/xrKAsHd3qAi8vPzZUZGhjx8+LAcMmSILCkpkQUFBbJv375yxYoV8qabbpJ/+tOf1O1zcnKkoihSSinfffdd+fTTT0sppXziiSfk0qVLpZRSfvXVVxKQWVlZUkopW7VqJaWUcvv27fLhhx+WiqJIm80mp02bJn/44Qd59uxZqdfr5dGjR6WUUs6ePVt+9NFHUkopb7rpJnnkyBH1/AkJCfKPf/yjlFLKDz74QC5YsEB++OGH8p577pEZGRly4MCB8uzZszIzM1OOGjVKXrp0SUop5SuvvKKWr2fPnmrZPHH69GnZtWtXmZKSIqWUMjs7W0op5eOPPy6XLFkipZTyu+++k0OGDJFSSrl48WI5ZswYaTKZZFZWlgwLC5Nms1n+9NNPMjY2Vj3ugAED5Pnz513OdfjwYXnddddJm81WrhyTJ0+Wx44dk19++aWMjo6Wy5YtkyaTSfbq1UtKKeWiRYvU+5SbmysjIiJkUVGR3L17t5w2bZqUUsoFCxbIl156SUop5bZt29TnUp177o5mgaDRGFBLC4RmSXVi9OD7fvTLli1zmewkLi6OU6dOcfPNN6PT6arlR3/w4EFGjx6tXpvDf37//v2qRfLvfvc7srOzKSgoUK85ICCAgIAAOnXqxKVLlxg6dCiXL1/mwoULZGVl0a5dO7p3vzpkIzMzkz/+8Y98+OGHHjvNY2Ji2Lt3L2fPnmXRokW8++673HTTTap3/Y4dO9i6datqz2wymcpN+LJ//34+//xzAKZMmeLyXLy95xoaTZ0WK/SyCi96B94MmvIFP/onnniCN99806Vii42N5d///jdt2rTxuoyOcjr+VVVROnAvv6PfY/bs2WzevJmLFy+6VJgFBQVMmzaN5cuXM3r0aI/HjI2N5f/+7/+4cOECL774IitWrGDPnj2q86WUki1bttC/f3+X/RzhtOqWWQvTaPgqLTZGX5VFsQOH0I8ZM8an/ejvvvtu9u/fr8a6x4wZw5EjR6rtRy+lJCoqikOHDnHmzBngqv98TEyMOs3fnj176NChQ5WVyF133cWnn37K5s2bmT3bnqFrNpuZMWMG9913H7Nmzapw35EjR3LgwAF0Oh2BgYFERUXxzjvvEBsbC9jv5+uvv65W6kePHi13jBtvvJGNGzcC9krS03NxR/Oy1/A1WrTQV9Wah6tCP2zYMJ/2ozcYDDz00ENqxdGxY0dWr17N3Llzq+VHL6Wkffv2vPrqq8yaNYshQ4aoLfElS5aQkJBAZGQkzz33nEtFVxGDBg2isLCQbt26cc011wCwceNG9u7dy7p164iKiiIqKorExETAPl/t1q32gdkBAQF0795dbfHHxMRQWFioGq49//zzWCwWIiMjGTRoEM8/X34C+MWLF7Njxw4GDx7Mpk2b6NKlC61bt660zM73XGvla/gCwtHaaSpER0dL98mdT5w4wYABA+r0PNnZ2dhstnLzj7pjtVq5fPkyoaGhKIpCSEgIxcXFxMbGsmbNmiZlhztv3jzmzZvnMdSRl5eHyWSiS5cu6rKsrCx0Op3HN4CKcNwPgHbt2hEUFFT7gjcipaWl6PV6DAYDBw8e5E9/+pNaqdSU+vi9amhUhRAiQUrpMW+4xcboq9uiVxSlyXueV+ZH7+l6azIYzNna2Bub46bO+fPnufPOO1EUBX9/f959993GLpKGRp3TYoVeSumVx4vzwCJf9jz3JPRCiCqF3t2PXlEU/vWvfzFgwIBmIfQREREeY/caGs2JFiv03rbom4uxmSfzMm9mmXL3ozcajeTn5yOE8HrEsIaGRuOidcZ6QXMQeunBqdNxXdXpp3HcBz8/P5+/JxoaLYVmI/QWm0JWYSkWa9Xi4+1gKQfNQegrCt1A9RwsHWmpBoOhWYRuNDRaAs1G6K02SWZ+CUZz1eEEbwdLOfB1oa9ocFNNrIptNhs6nQ69Xo+iKD59XzQ0WgrNRugD/XTohKDYXHUr01svege+LvQVXW9Npkp0vBk44v1aq15Do+nTbIReCEGQn75aQu/ewrXmmLDmmMptX5NYdlU0pB/9zz//DNivY/DgwapnS0VWxZX50Ts6detT6L31o3/ggQfYvHlzueXe+M1raLQkfC7rZve6NVw+l+JxndmqYLFJEvz1UEn4XbEpWK1W/Pz8EDpBp559GP/AfJRSq2pf7IxziKOyuH5T9aN/+eWXee2117wO3VTmR98QLfra+tHX1m9e4yrp6els3LiRWbNmeRzNreEbNJsWPYBOCECieNvydtI9aVPAJkGR9s/Oxy0TRF/1o09KSuL06dPlQjffffcdt956K6NHj/bKj15KqQr9zp07mTx5MqNHj24UP3oHu3btIjo6mn79+qn+Q85+83FxcYwZM4ahQ4dyww03cPLkSZE2uGgAACAASURBVACOHz/OyJEjiYqKIjIykuTkZI/Hb+mcPHmSgoIC/vOf/1Tpu6TRhKnIv7ix/tXGj77UYpU/p+XKK4WmSrcrLCyUGRkZLh7nNpNFlqYVyNK0AmkrcfWoLykpkV9//bWMjIz0ST/6d999V86aNUuazWY5aNAgefbsWZmVlSXHjh0rk5OTZWFhoVd+9FarVWZkZMiUlBQZHh4uDx8+LHNzcxvNj/7++++XkydPljabTZ46dUp269ZNlpSUuPjN5+fnq3MO7Ny5U86cOVMt6/r166WUUpaWlsri4uJyx68pzcmP/oMPPpCvvfaaXLFihVy5cqXMzc1t7CJpVAAtxY/eT6/DoNNRUkWc3lOMXloU18+BV7fX6XQcOXKE6dOn+6Qf/ezZs3n55Zc5d+6cuuzQoUOcOHGC22+/XbUNrsqP3nHfEhISiI2NpXfv3litVtW8raH96AHuvPNOdDodERER9OnTRzVmc5Cfn8/9999PcnIyQggsFgtgd+9cvnw56enpzJw5U32D0LiKzWYjIyODYcOGMXToUD744APWr1/Pgw8+6GLNrdH0aVahGyEEQf56ii2VC70si7W7CL1Vsc8rqBcuog9VpyG6+9E//vjj/Prrr7zzzjuYTOU7dysr16JFi0hMTCQxMZHTp08zd+5coGI/d3d27NjBpEmTypX/kUceYcWKFS7nmjhxIrt27WLfvn0kJSXx/vvvV1o+9+wdvV7vdYy+Kj/6DRs2VNuPHsp3qLt/f/755xk/fjzHjh1TbabBbtu8detWgoKCuOWWW/j++++9uo6WxKVLl7BYLHTv3p0uXbowZ84ccnNz+c9//oPZbG7s4mlUg2Yl9ADB/npKLTZsSsVxek+Dh6RFQfjpEH56pFtFodPpGDFiBN98841P+tFLKbnzzjv57rvv1Fj36NGj+fHHH0lNTUVRFK/86B2iPnr0aPbu3UtaWho2m43s7Gyg4f3oATZt2oSiKJw5c4aUlJRyk4w4P49169apy1NSUujTpw8LFy7k9ttvV/sTNK6SlpYGoL5l9erVi1mzZnHhwgU2btyopdb6EM1O6IP89UigpJJWvUehtyoIgw7hp0NaXFMphRBERUUxdepUn/SjVxSFwMBAFi5c6OJHv27dOh577DHGjh3rlR+9o0XfuXNn1qxZw3333cfNN9/caH70AD169GDkyJFMnTqVt99+m8DAQJfj//Wvf2XRokUMHTrU5S1o48aNDB48mKioKI4dO8Z9991XZVlbGmlpabRp08bldz5gwACmT5/O6dOn+eKLL3x6fElLotn50VttCkmZBVzTNpCOrQM9bpOVlYUQQhViaVOwZBrRtw0AvcCWY8LQKRid/1UTsIsXL2K1WgkPD/c5P/qcnBwsFgudO3cut48jl9+5UqqIgoICioqKuOaaaxBCUFJSQm5uLh06dMDf379uLqIZ0Fz86P/1r3/RrVs37rzzznLr9u7dy/fff8+YMWOYPHlyI5ROw50W5Udv0OvwN+gqHTilKAp+fn7qd2m1V3bCoANDmf+LRQEnodfpdDz11FOcPn3a5/zoKzNwE0J4/QruOI4jDq6Njm2+FBQUkJ+fX2H/SExMDEVFRRw8eJCQkBBuvPHGBi6hRnVodkIPEFzFCFnp5uQorfZthZ8O9AKE5w7Zt99+26uWb1PD/Xqd0el0ldoNO/vR22w2pJR88sknXH/99ZrQN2Pc4/PuCCGYMmUKRqORnTt3cs0119CnT5+GLKJGNWiWQh/kbyCvxILFpuCnd4vFOw36UZdZrmbcCCHK4vTlO2R91X/dkxe9g6p8fJz96N1DXo57qAl942EymfD39/fat8lb0tLSMBgMLlNPuqPT6bjjjjs4fvw4586d04S+CdPsOmPBnnkDeMynd/RJuAu9MFwNSQhD+Q5ZXzY2qyp04xhU4c1xnCsMIUS1Uiw16habzcbq1atx79OqC9LS0ujatWuVlh5+fn60bduWnJycOi+DRt3RLIU+0E+PwLOTpcfBUlZ7aqUD4a8DxW6H4KA+jM0aAoeIOwu9YraVq/C8FXr3CkMT+sajpKSE4uLiOrcmsFgsZGZmVhi2cScsLEwT+iZOsxR6vU4Q4KfzmGJZTuAUCTbpKvRln53j9DXxbm8KuF+vYrFhvVyMNNlcl1fxtuKo5DShbzo4xltUlYJbXS5cuICiKJrQNyOapdCDPXxTbLaWE2b30Z0OMRcGJ6E3VCz0vha+KfcGU5ZhJK2uy70Reijvae+YgMTXKsDmgNFoBOpe6KvqiHUnLCyMkpISSkpK6rQcGnVHsxZ6myIxu00t6C58quA5t+j1OtDrkE6hn7oW+obyo/f39ycpKUktf+TwIaSmnVOv29Obiic/esd1u3fq1lfmzZQpUwgNDVVdKOsSb/zqU1NTGTx4sMd1nu55Y1CfQh8WFua1n01YWBiA1qpvwvhc1k3el2cwXzBWuZ0iJa3NNrL97EZn6nLFhmK1kutnzyCRVgVDWCBhf3AdOu8YIevAG6Fvin703bp1Y/Xq1cTExLjsJ21uIR2n6/LkR19Zix7sQl+Ta6+Iv/zlLxQXF/POO+/U2TEd1Nav3tM9bwwcQl9UVFStye4rQ0pJWlpatUzenIXeYTeh0bRoti16nT0dHsXN88bRcFX7YqVE6DyYY/npkFbFHsPHLnD/+te/iIyM9Ck/+qlTp3Lq1CnVx0adWMWqsGPHDmJiYpg8eTJ33313pX707kL/7bffMmzYMEaOHMmdd96JzWarMz96gAkTJtC6desKn6/NZqN3795IKcnLy0Ov17N3717A7iuUnJyM0WjkoYceYuTIkQwdOpQvvvgCcPWrz8rKYuLEiepz6dmzp/qmZbPZvLrnjYVD6KWU6ufakpOTQ3FxsddhG0B1YtVa9E2YivyLG+tfbfzo3Tl9qVAmXyp0WZafny8zMjJUz/jSC0XSkl3ei9xqNNu96UutUkopDx06JAcOHCizsrJ8yo9+/vz5ctWqVfLee++VUko5sP8AefLArzLjl7MyJiZGFhQUyIyMDLl06dJK/egd29lsNnn58mUZHh4uU1JSpM1mk8eOHZMFBQV17kfv7CvvicmTJ8tjx47JL7/8UkZHR8tly5ZJk8kke/XqJaWUctGiRep9ys3NlREREbKoqMjluAsWLJAvvfSSlFLKbdu2qc+lOvfcnYbyo//vf/8rFy9eLBcvXiwzMjLq5JhHjx6VixcvlhcvXqzWfitXrpSfffZZnZTBnaSkJPnPf/5TlpSU1Mvxmwu0FD96d4L99VwxmlGkLJt9ynUYvz3jRkEY/MrtK/zsIQmHFcKBAweYPHky/v7+tG7d2qf86GfMmMFbb73F2bNn1WWHE+JISkoiJiYGi8WCzWardBi7oigIIdDpdBw6dEj1owf7q7vNZqtzP/qqiImJYe/evZw9e5ZFixbx7rvvctNNNzFixAj1fm7dulV9SzCZTJw/f97lGPv37+fzzz8H7P0Czs/F23veWBiNRnUcRF3F6dPS0ggICKBjx47V2q8+M2+OHTtGfn4+Z86cYdCgQfVyjuaOV6EbIcQUIcRJIcRpIcRzHtYHCCE2lK0/LIToVbZ8ohAiQQjxa9n/v6vb4ldOkL8eKSWlTmmWDsECp4wbv/K3QRgcVghl9ghl/vWeYvRN2Y9eSonBYODpp5/mlVdeAVk2IEzCzRNuJjExkV27dnHo0KFK/egriwF7k2JZXT96b4iNjWXfvn3ExcVxyy23kJeXx549e9T+CCklW7ZsUe/n+fPnq2U25u09byyMRqM6SrkuhT48PLza8f76EnpFUUhJsc8RrU33WHOqfJpCCD3wJjAVGAjMEUIMdNtsLpArpewL/Av4R9nyK8CtUsrrgfuBj+qq4N7gGCHrPHBKOuWCqxk3Bg9Cr1oh2Le58cYb2blzJ8XFxT7lRy/LJll58MEH+e6777iScwURoGfUsBEcOHBAnUu2sLCwUj96Z6F3+NE73hAKCgqw2Wx15kfvLSNHjuTAgQPodDoCAwOJiorinXfeITY2FrDfz9dff13tDzl69Gi5Y9x4441s3LgRsFeSnp6LOxX59Tc0RqORzp07I4Sok/KYTCYuX75crbcqB2FhYRiNRtUXqa64ePEiJSUlBAQEcPr0aZ9Lb24qeFNtjwROSylTpJRm4FPgdrdtbgccqrYZmCCEEFLKo1JKRx7acSBICBFAA+GYWtBZ6J0Fy+5xg+pY6Y6zN/2IESOYMmUKMTExPuVH76jY/P39eeKxBVy+koXw09GxfQfe/793mTNnDuPHj2fKlCmV+tHbbDb1vrVvE8bbb/wfM2fOZMiQITz88MPYbLY686MHe1hm9uzZfPfdd4SHh7N9+3bA1Y8+ICCA7t27qw6LMTExFBYWcv311wP22aUsFguRkZEMGjSI559/vtz5Fy9ezI4dOxg8eDCbNm2iS5culXYCu9/zxu6Mbd26Na1ataoToc/IyAC8z593pr5SLB2teYdbpiO5QaOaVBS8d/wDZgHvOX3/I/CG2zbHgHCn72eADh6Os6uCc8wH4oH4Hj16lOtkqE3n1tmsInnyYoH6/eLFi+pk1uasYmm+WFThvtbCUlmaViAVi31i6nPnzslLly5Jo9Eohw8fLhMSEmpcrvpg7ty58uDBgy7Lrly5Ii9fviyldOpgNltlaUahtOTaO7eysrLUbSoiMzNTnRjafMkoSzMK1U5nT5Ot+womk0mdPPzAgQNqB3JtaIjO2NLSUrl48WK5b98++fbbb6sTndeG3bt3y8WLF9eo0/PChQty8eLF8tixY7UuhzMffvihfPPNN2VhYaFcvHix/OGHH+r0+E2J1NRUmZeXV+P9aezOWCHEIOzhnEme1ksp1wBrwD7xSF2eO8hfT0GBBZsi0etEuRa9LqDilxpnKwRh0PH000/z22+/YbVafcaP3rlPAkeoSq9D6IXqw1+VM6d0c/yUVgUUiSy1IQINLrn0de2iWN+cP3+eO++8E0VR8Pf35913323sInmFI52yVatWtG7dWu30rg1paWl07ty53Cxd3lAfLXqLxcK5c+cYMWIEISEhdO3aleTkZDU015yQUvLZZ5+pc/PWNd4IfQbg/C4XXrbM0zbpQggD0BbIBhBChAOfA/dJKc/UusTVxNnJslWAXg1lXM24qfgWXM28sUGQQY2fd+7cuULb36aG8yQr0iZBJxA6AXod2K7mxlcU+3T40VssFvR6PTqdjrUr3mbwgEEoJVZ0bkLvPKGLLxAREeExdt/UcRd6R9ilpiiKQnp6uhr2qi4BAQG0atWqToXeMSexw/44IiKCvXv3Ulxc7DKYryEoLS0lLy/P4yxtdcGlS5fIz8/npptuqpfjeyP0R4AIIURv7IL+B+But222Yu9sPYg9RPO9lFIKIUKBr4HnpJQ/1l2xvSeoTKyLLVaC/e2tTZ1O59H6wB2hE2C42iEbFBREUVERJSUlLoOcmjKOig3sQi/K/PmFQaCUXnW2rEjoDx8+jMViISsri9DQUAINAVgvF4NOoJRYkaFXj6+ZmzUc7kJvNBqx2Ww1boBkZWVRWlpao/i8g7rOvDlz5gw6nY6ePXsC0LdvX3744QfOnDlT4wqpJiiKwieffML58+d54oknPKZG15aTJ08CVGtEcnWo8j1bSmkFHge2AyeAjVLK40KIF4UQt5Vt9j7QXghxGngacKRgPg70BV4QQiSW/etU51dRCY6pBUvMNpfRnaq9gYeMG2eEk9D7+fnh5+eH0Wj0CRMv95ALNkXteBZ6nX2YsCKrNDZzuW9lFaQ+xE8N32gzTTU8jswsh9BD7VIsq2tk5om6FvqUlBS6d++uprl269aNoKCgBk+zjI+PJzU1FUVR2L9/f72c4+TJk3Tr1q3KRICa4lVAVUr5jZSyn5TyWinl8rJlL0gpt5Z9NkkpZ0sp+0opR0opU8qWL5NStpJSRjn9q1vzbC8I9jNQ7CT0Do8bhOfUSmeEv6sVQqtWrbDZbJjN5novd21xVEbq5CJWidA7JlcpG0tgc2rxV1B5ORuaqWZorfxA2Fv12gQkDY97ix5qL/StWrWqVWs1LCyMwsLCOvnbKC4uJjMz02XWKp1OR9++fRs0zTInJ4edO3dy7bXXEh0dzdGjR8nPz6/TcxQUFHDhwgX69+9f9cY1xLd6zmpIkL8ei03BYrvqwe4+q1RF6Ny86QMDAxFClHN3bIo4hFun09k9buTV0A36qx2rVRm2ufjcWBXQ6xB6HbpAvT18I6Um9A2M0WjE398fPz+/OhP67t27V/n3UBmODllvxiJUhWOMhvv0hBERERQXFzeIe6iiKHzxxRfodDpuu+02xo4dC8CPP9ZtFNrxhqIJfS1xdMiazOWFvipcOmTL9g0ODqakpKRWwtZQNsXh4eEcP35cbYkPGTWU1NTUq9duKx+6cbcptjlXkFapvg2IIINL+KYuhd4bm+IHHniAzZs3l1vujQ2xr2M0GtUR2bUV+qKiInJycmoVtoG6FfozZ84QEBBQziG0b9++QMOMko2Li+PcuXNMmTKFtm3bEhoaypAhQ0hISKjTAXMnT54kNDSUTp3qL6rdIoQ+qGxqQbPFnkIoEPaMm0o6YlX0ZVYITr72jh5/98EyNR0iX1ubYneh37ZtG1OmTEFRFK655hpeffVVsLnadgqdAJ1A2pRyoRt3oXexjXCadlEXaFDDNw6hr6u+i7/85S989FHNBlJHR0erDpnNFaPRqCYEBAcHq6Oba0J6ejpQu/g81G2KZUpKCr169SrXuRwcHEx4eHi9C312dja7du0iIiJC9TsCGDt2LIqi1Opv1hmz2UxKSgr9+vWr1dtUVficqdm2bdu4ePFitfcrsdjQIRFSwd/PD2kuEyydoEuXLuWMwByoVghmhb///e+sX7+ejh070qlTJyIjI9mzZw9RUVHs37+fOXPm0K9fP5YtW4bZbKZ9+/Z8/PHHdO7cmezsbObMmUNGRgZjxowpZ1Ps6FxbsWIFGzdupLS0lBkzZrB06VJSU1OZOnUqY8eO5cCBA3Tr1o0vvviCr7/+WrXMDQoK4uDBgwQGBqo2xT/99BM333wz8fHx/HbiBH079bKPBC5j177veXHlckqtZrp168batWtZs2aNalPcoUMHdu/efXVS8LJ5dIVex7fffsvf/vY3rGYr7UPD+Gr71+Tm5vKnP/2Js2fPEhwczJo1a4iMjGTJkiWcP3+elJQUzp8/z1NPPcXChQt57rnn6N69OwsWLADso4pDQkJ49tlnmTBhAnv27Knyue7atYtXXnmFgoIC/vnPfzJ9+nT27NnDypUr+eqrr4iLi+PJJ5/EZDIRFBTEBx98QP/+/Tl+/DgPPvggZrMZRVHYsmVLvWU81AdGo1EVVp1OR0hISI2FPi0tDZ1O5zIy2YFislJ0KJNWwzujb+1f6XGCgoIICgqqtdDn5OSQl5dX4ZtuREQEu3fvpqioqF6y3xRF4b///S8Gg4Fbb73VRYDbt2/P9ddfT3x8PGPHjvV6cpaKSElJwWq11mvYBlpIix5AV9YhCVz1ZPeyBhV+Oo4cOcKWLVv4+eef2bZtG7/88guKoqAoCmazmfj4eJ555hnGjh3LoUOHOHr0KH/4wx/srWlg6dKljB07luPHjzNjxoxyLopg91pJTk4mLi6OxMREEhISVI/15ORkFixYwPHjxwkNDWXLli3MmjWL6OhoPv74YxITEwkKCuLo0aMMGTJENWDT6XQ8++yzvLziHy4if+XKFV5e9SrfbviS+Ph4hgwZwurVq1m4cCFdu3Zl9+7d7N69G7hqG+F4q7mSm83DDz/Mli1bOBqXwCf/9yE6Bf73f/+XIUOG8Msvv/DSSy9x3333qef77bff2L59O3FxcSxduhSLxcJdd92l+swAbNy4sdrGZqmpqcTFxfH111/z6KOPljORu+6669i3bx9Hjx7lxRdf5G9/+xsAb7/9Nk8++SSJiYnEx8cTHh5erfM2Ns6hG6id/05aWhpdu3b1OAbCGHeRgm9TufjPBIzxF6t8Y6uLzBuH7YF7fN6Bo0I+c6Z+huUcOnSItLQ0pk6d6tGryeH4eujQoVqf69SpUwQEBKgppPWFz7XoK2p5V0WO0UxeXh5BOoWOwe1Qisz4dQ3x6nVJ+Ok4cOQgt916G4GBgQQGBqo2xYqiNFmbYscf5d13380ry1/mbMbVyuXQoUOcOPUbN916M8JPR3FxseoZ445jIJRjJO3hhDjVplgqkrCw9ihme0zz2WefBRrGphjgzjvvRKfTERERQZ8+fVS/Hgf5+fncf//9JCcnI4TAYrEAMGbMGJYvX056ejozZ870qda8oigUFxeXE/rs7OxqH8tqtZKRkcHIkSM9rjedzkPfLgB92wByNydTfPQy7WZEYOgQ5HH7sLAwNVWzpqSkpNCmTRsXcz5nunTpQqtWrUhOTmbIkCG1Opc7WVlZfP/99/Tv35/IyEiP23Ts2JFBgwZx+PBhbrjhBoKCPN+LqlAUhZMnT9K3b986nZ3NEy2mRR/sr0cgy6yHvcu4ceDokHWMJAV7SMfPzw9FUVyGjDclm2KH0Pv7+/PUowv53zf/BcClAhMWq42bx0/gyPYfOZrwEz/88AOvv/66x2O6t+jRX71vQifQBeoRpfZ1FXXI1odNMXiYGczt+/PPP8/48eM5duwYX375pfo87r77brZu3UpQUBC33HIL33//fbXP3Vg4ZhJr1aoVhT9mYMkqrnGL/uLFi9hsNo8VrLQolKbkEzSgPR3nRxI6oy/m9CIurvqJwh/S1OkonQkLC1NdVGuCw5a4T58+Ff59Oir206dP12kCgCNk4+fnx/Tp0yvVh5iYGMxmM4cPH67x+S5cuIDRaKz3sA20IKEPMOjQCXuYWVq9y7hxIPx03BA9mq++/hqTyaTaFPv722OWztasTcmm2DmP/o+z7+b7fbu5nJVFjtHMwCHDOXD4IKfPnkHaJCUlJWoHl/MxHaZIer0eyu7bmDFjXGyKc0sKEIrdLuHTTz8FGsamGGDTpk0oisKZM2dISUkp90fj/DzWrVunLneIycKFC7n99tvVaQ99AUcOfbBfIPlfpmA8fJHWrVtjMpnUNxZvcbS+PYWuSs/lg1UhoF87hE4QMuoaujw9nMB+7cjflsrlN45iTnetXMLCwtTpHWtCZmYmJpOpwrCNg759+2IymWpt/eDMgQMHyMjI4JZbbqly4FKXLl3o378/hw4dqlZjzpmTJ08ihFAzieqTFiP0Qgj8dGBRsAuWNxk3jn11ghHRI5g2+RYiIyNVm+KwsDCEEJhMJlVUm5JNsSzzopdS4q/3Y8EjC8gqqzhCQtuz9r33ue/xh4gaHsX06dPVYdjONsXlRsUadHTs2JE1a9aoNsV3P3AvCMFfnvkLR48ebTCbYoAePXowcuRIpk6dyttvv13OkOuvf/0rixYtYujQoS6tzI0bNzJ48GCioqI4duyYS39CU8ch9AGKPaZuzS6pcYrlxYsXCQkJ8Vghm5LzQC8I6H3VjlvfNoAO9w2k/b0DsBWZufxmInlfp6hve7XNvKkqPu/g2muvRQhRZ9k3ly9fZvfu3QwYMIDBgwd7tc9NN92EyWTiyJEjNTrnyZMn6dGjR4P49oimNpQ/OjpaxsfHuyw7ceJEtWYGqoiLFy9htgm6yCD0YYHog7034LJml1CQV0DYtZ0pLi4mNjaWNWvW0L9/f/Lz82nfvr1LeKIxmDdvHvPmzVNj7dnZ2dhsNjqGdcBy0Yi+XSCXLFauFJXSKsBAn7BgLJlGe/y11B5Ld66cwJ7+deXKFdq1a4c+x4aulQFDaHl3Q2t2CbmlheAnqj0NXXOjrn6vFXHs2DE2b97M3Mn3IL64iKFTMMbbQ/noo4948MEHq9Wx995772EwGHjggQfKrbv02k+IQAOdHvEcq1ZKrOR/cxbjkYuEzuxLyMhrMBqNrFixgilTplTY51MZH374IcXFxfzpT3+qctu1a9diNpt59NFHq30eZ2w2G++//z55eXk89thj1crkWb9+PRcuXOCpp55S3/C9ITc3l9dee41JkybVahyNM0KIBClltKd1LaZFDyClgp/D96UaoRuwh28ee+ZxoqKiGDZsGL///e8ZNmwYQUFBTWak7Hvvvefyx6U6ddoc9sSC0rKWl8liA13ZGAGbUuE0iWqLHmEfWVvBfRNBBvRSYGti0+01R9QWvcn+LKw5JYS0sotTdVv02dnZHjs9bUVmLJlGAvuFVrivLshA6My+iAA9lgtl4aTgYAICAmrUojebzZw/f77K1ryDiIgILl68WGuL5v3793PhwgWmTZtW7XTN2NhYiouLcW+cVoVjNreGiM+DD2bd1BRFsc8UFWTQIW2QZ7bS3t97pz/hp+ffb6zF0DEYXcDV/XQ6HUFBQRQXF9O2bdsm5ceuKAoGg+Fqp5leUGq1d17ZFIlVsY9ylVaJTu/Zkz4mJobi4mIMeoN9FK1B8NH69eXcA3WBBnToUNyN1HyY0tJS/P3963UgS01wTAruZwQzgFUSjP1tsjpCX1xcTElJiUehLz1tj7EHRlTufSOEwK9zMJZLxer3mqZYnj9/3sWW2IFUJEqRGX0b1zfmiIgIvvvuO06fPl3juSEyMzP54YcfGBAeQW9r9UemOjLiDhw4wIgRI7y26T558iQdOnSoMLOorvH9v0YvcfTOG9BjFXClyFytUZxXJyEp38vviLE1hVa9M6rglqVFohNYrAqt/O31u8liU33pK7Iq3r17Nzt37iThQFxZhs5RjxaxQifQ+9mP2xw8bywWC9nZ2WrruSlRVFREcHAwSv5V8zBDoX0S+OoIvSMd05PYmE7logs24Ne16hauoVMw1stX71NNhT4lJcXFltiB8VAmma8ewVbgapbWuXNnWrduXeM4vdVq5fPPPyc4OJjo9HByt5zCWMpYCgAAIABJREFUZqxeZzbYW/VFRUVez2tgMplITU2lX79+1T5XTfEZoa9tX4Lq12Kzi3ap1UahqRphBn2ZZYClvBg6zKWKi4ublH2x2hlrU0AnMCsSCbQJsrc6TBbF3qK3Xe20dS+/Kv5W7AOu9BW3bv0CyzoHTdX/Y2lqOBwYq5tR0RDP3zFYypprwtDRnsNtyzFVO8WyIqGXUmI6nUdA31C7VUYV+HVuhWK0Yiuy37OwsDDy8vKqXeE7bIndY93FP2eBVWI65Vp5CCGIiIggJSWlRo2L3bt3c/nyZSZdF0OAyT76vehA9c3SevXqRY8ePdi/f79XWU8O982GCtuAjwh9YGAg2dnZtfojcvwQ9DaBf4ABP72OK0Xez1ivWiF4EHqwt+qtVmu109vqC4do22P00iU+3ypAj0Gnw2Sx2d0sFYlOeLYqdkwPKK2KfQrCSsIYhrIKxNYMhN7xHM1ms9ciIqUkOzu7RlPxVQeH0NvySvHv2QYMAmt2zYReCFFu8J71cjFKgZnAvt5ZFvt1tr/ROsI3YWFhKIpSLTtfo9HIxYsXufbaa12W2wrMmM/bY/Cmk+VTkiMiIigtLa32IK3z589z4MABhg0bRuczfvh1aUXgwPYUHbiAUlq9fiYhBOPGjaOgoIBNmzZVOYbg1KlTBAUF1dpbqDr4RIw+PDyc9PR0srKyanyMkpISSktLyZaB6Fr5YVQU0kusFF0KwE/vXX2nFFtQzAr6K+XjtlJKCgoKyMzMbPBpzjyhKAoFBQUEBQVhKLW/jRQbIL/Eir4gkJwiM1lICgL8UIwWbEGCElMJubm5LvF1o9GIoihckQGgE+hzK45BSinJz8/HHwPBWSEulgu+RmFhoTpxS1ZWltcZFYGBgfVup2A0Ggnv2g2lyIKhXSCGsCA1xbI6PlDZ2dn2bCo34zBTsj0+HxBRcUesM4YyobdeLoZrQ11SLB2fq6IiW+KSpGyQ4N+jNaZTufbEAae/1969e6PT6UhOTqZXr15enctsNvPf//6Xtm3bMm7gDRQcOEHojL74dw3hclI2xkMXaX1T9Z5hnz59mDZtGl9//bU6JsTTbF82m41Tp07Rv3//Bu3H8gmh9/PzU60EasqmTZvISE1nVnY0nf88jKIQP8a88h23D+nGP2YN8uoY5owiLr9+lDYTe9JmQvk8+C+//JKff/6ZZ555psbDouuKrKwsNm3axMyZM2n3TRGthndmWWkRe0/lEPf/bmbpl8f5NC6No/Nv5MoniWT/LoDtB7bzyCOPuOSzr1mzhuCgYMad7EXIDd0IHVH5c1jxyqt0Kwpl5gOzvW4RNjXMZjMvv/wyMTExJCYmcs0119TLhM01xWg0EmSwd0zq2wVgaB+I9UoJrQdUL15dUcZNaXIuho5BGNp592aib+Nvz7xxatFD9XLpHbbE7sZqJcevYGgfSEhMODkfn8B8rpCAPlfz+gMDA+nZsyfJyclMnDjRq3Pt3LmTnJwcHnjgAayHriAC9ARHdUIXoCcgIpTCfemE3HDN1RHxXjJixAgURWHbtm1s3ryZWbNmlRP7tLQ0TCZTg4ZtwEdCN3VBXl4ebQytQCcwtA+iXSt/Zg4L5/PEDLK9DOH4dwshcEAYhfszUDzE94cNG4bVaiUpKamui19tHLHlAL2/3S8+NICUrCL6dLT7o/Tv3JoSi43LZQ5vfqX2n4K79bLRaCTIPxCsEkOHqv/wQ8NCMepNlPxypS4vp0HJzMxESkl4eDjXXXcdZ86caTIzilksFsxmM0FlWTaG0EAM7YOw5ZgICQnBbDa7jNSuCEVRPAq9tNptDwL6eteah/KZNyEhIfj5+Xkt9FJKUlJS6N27t4swKiVWSs/kEzioA4ERoaATmE6WP2bfvn25fPmyardcGWfOnOHIkSOMHj2a7h26UvzrFVoN76xm0rUe1x2lyIIx/pJXZXdn1KhRTJ48mRMnTvDZZ5+VC/udPHkSvV5fLkRV37QYoc/NzSVECcTQIVDNBX/oxl6YrQr/OVzeSbIi2kzogSyxUnSwfKdN165dad++Pb/++mudlbumOITe32K/Vn1oAClXjPTpaM+i6NfFPpLyZEExwl+HX7F02Q/sf4BGo5Eg7GELQ/uq31LatG1LcYCVkuNXXDz8fQmHYHTt2pXrrrsOq9Vab06J1cWRBRRYNipW3y4AQ4dApEUhxGB/Pt7E6QsLC13sMhyUnitAWpQq0yrdcc68qW6KZU5ODvn5+eXCNqbfckCRBA1ujy7QQECvNh6FfuDAgfj7+/Pee++xbt06kpKSPParmEwmvvjiCzp06MCECRMwHrkENkmr0VffIgL6tMW/ZxsKf0hXx59UlzFjxjBx4kSOHz/Of//7X5dstpMnT9KrV68GH1zZIoTebDZTXFxMiMkPv05X4+d9O7Xmpn4d+fehc5i9FCX/8NYEXhdG0b6Mcp02QggiIyNJTU2tsddHXeFomfuZ7Y/YGKgnr9hCnw72Fn1EJ7vgn7pchL5dIIay7DhnoTebzVitVgJtdlHx61i10Ldt25YiWzE2o8UeX/VBMjIyCA0NJSQkhJ49exIYGFjOFbOxUAdLmQ2gA32bALUCDlbsFbI3Qu/IuHEfCV2anAs6QcC1bT3tViGeMm+8FXqH7YF7K7fk2BV0rf3xD7c3SgL7h2G5WIw13/WNpV27djz55JNMmDCB3NxcNm7cyGuvvcbevXtV3yiwO7sWFhYyY8YMDHoDxsOZBFzb1kUThBC0Ht8dW14pxYn/n70zD4irPtf/58w+zMAAww4hkBAgbEnIvidmMWrilrpWrfXaXqtWbWu1vbfrbe9tba3WarXe1rbeVq0mcY9m3/eFEBKSQFYIhJ1hmX07vz8OMywzwAyBxKa/5x8NnHPmDOec97zf533e5x16TXD27NksWrSIY8eO+YN9c3Mzra2tV5y2gX+RQO8LujqrAkVC70LpQ3Myaep08GlZ6LKqqEXpeK1uzHvrAn7n05gfP378Ms748uEL2Mouaf/FLhXJ2K6MPlKjJDVaS0V9p1TQ6/D22g96ZI9OOYJKhmyQwRMgBXqXx407Rh707/PPgNraWr8RmlwuJzs7m8rKyi9Ef4D/mthlyKPUCHLBH+i1dumFHE6g75vR20+3oUqPRKYOr3wXTHljMplCGuJ96tQpDAZDr8Kt6PJgrzShzTf6JZ6aXGmVESyr1+l0zJ07lyeffJK7776buLg4tmzZwosvvsiaNWvYvXs3paWlzJ07l9TUVOynWvG0OdDPTAk4liYnBmWyjs5tFxG9Q1f6zZ07l4ULF1JWVsbHH3/sTxaupH7eh3+pQB/p1aKM7x3o542LIytBzxu7zg8o3zxe285X/3KA5S/vREjRocmJwbyjBq+j98MfGxtLWlraVXdD9Gf0FkAucMYiZUE+jh4gJymSyoZO5DFqhDYPgiD04uj92aNVCiahdIgaDFIm6MmPwHm+HVfDF6/haCCYzeZejpcgDS+x2WxBh8VcafiuicoiII/uKshGq0EuoLFKj3Oogb7nYHHosj24ZA6btoE+yhuk58Dj8QxqT9DU1MTZs2cpLi7udX/ZK9sQXV60+d0vIkVCBHKDGvup/mfSymQycnNzeeCBB3jssceYPHkyFRUVbNy4kaSkJObNmweAee8l5FEqNOMDi9G+rN7dZMNWfnm1pvnz5zN//nxKS0vZunUrSUlJREeHXv8YLvxLBHqfJbBe1PgbTHwQBIGvzs6g/FIHBy8E3kDnmsw8/nYJy1/exZ6zLRyv7WBbRRORXVm9ZV/gSqCoqIjGxsYhjTwcLtjtdmlYSIcbuUHN2RYLKrmMtJjuF11OUiRnm8wIBhXYPWjUmqAZvapT6HfQRF/4Ar0zTQlyAfO+f66s3md72zPQ+wZDfBHoG/816cCvihFkAopYDbI2NyqVKuRAbzQaewVXx9k2EHvLKjvsLl7Zcpo391xg3fE6SqpNXGqz4erDXw9VebN//37kcjlTpvT24rKVNyNoFb0UNoIgoMmNwXGmLaT6T3x8PDfeeCPf+c53uPXWW7n77rtRKBS4mm04Trehm56M0E8DoLYgDkWcls6tFy+7CW7BggXMnTsXj8dzVWgb+CeRV14u2traUMjkaFEFDVi3T0rj1+sr+POu80zLlG7QunYbL206zarDNagVMh5fmMVDczJZ+uJ2Vh26yJIHpqDOjqFzRy26mSnIevjm5Ofns27dOsrKykhKSrpi37Mn7HY7Go0GT5sDRbSac00WRhsjkPfodMxJjMTlEWmRC6gBjUodNNAr20ExMbxA3+mwkFQUj7WkEcOyzF7+QF9k1NbWIghCL5mfSqXyT69atmzZVfW+sVgsKJVKZJ0ef0YPUqE8nKaplpaWACmj/XQbgkbh58QBPjpSy/MbKgP2FwQw6lQkRGpIidbyHzfmou+hvOkZ6PszKbNarZSWllJUVNRrWpbo8WI72Yo2N7aXZh4knt6yvx7HhQ40ISqD1Gp1rwHfln11IBPQTe3/2RRkApEL0jCtPi1RSDmh9QMEPZYgcN111zFmzJirNrLyXyKjb2trI1KhQx6pRqYJfLdpVXLunZbOhhP1HL3Yxs8/PcH8X2/j/ZJa7p8xmu3fXcjT1+cQ2yXJ3HKqkWazQ+LqLS7pxukBnU5HVlYWx44dC4mjHAnYbDZ/oO8rrfQhO1F6oC908fdquSo4R+9VhKS4AalDWC6X097ejm5GMqLDg7V04OEpXyTU1taSkJCAUqagbe053G0S5ZWbm0t7e/tVXaVBV1esNgJESXHjg8Ko8TdNDRbo3W43JpOpFz8viiKO0yY0WYZetgcl1W3ER6o59IPFrH1iDn95cCq/uL2QJxeNY0leEskGDVsrGllTUtNLeRMZGYlCoRgwoy8pKcHtdgfYGTvOtyPa3L1oGx/UY6NBHlxmGQq8Tg+WQw1oC4zIowauOUVMTEBuUNO55fKzekEQyMzMDNn0bLjxLxHoTSYTkWgHVI3cP3M0MkHglt/v5s+7z3PzhBS2PD2fn9ycT3xk9wN1x+Q03F6RD4/Uoh4dJTVY7KjB6+zN1RcVFdHZ2UlVVdWIfa+BYLfb0Wq0eDocCFEqqlutfmmlD2PidchlAietUjBTCcpegd5sNqNWqlAgD0lDDxJHajAY6OjoQJUeiTJZh2Vf3RfKA6g/iKLoL8TaTrRg3lmLpUtGm5OTgyAIV52+MZvNRKik+7jnXABFnBbR6UWv0Q0a6E0mE6Io9gr07iYbnnYn6j78/OEqE8Xp0cTp1eSnGFiYm8A909J5anE2v7i9kDcenEpBShQHz5t6KW9kMhkxMTH9BnqPx8OBAwfIzMwkMTGx1+9s5S0IShnq7MBagUwtR51pCGqHEApsR5sQ7W70MwKLsH0hKGREzk/DWdWB8/zlWSFfbfxLBPq2tjb0zuC0jQ/JBi2PLczi5gkprH9qHs/fMaEXn+3DuMRIJo6K5r1D0ls+alG61GCxv3dWn52djUqlumpFWZvNhlqpAhHaVTJcHtEvrfRBo5STYYygrMUsPViiIqAYG6GUgkmoHD1AVFQU7e3tCIKAbkYyrjoLzurw55leabS2tmK320lNTfVL62zHm/3zWUeNGnXVA73FYkEr7+6K9cG34tLJNX77hv4QTHFjPy0Fzp6F2KZOB9WtViaPHrg4OzUjltKaNuhKBnrSN/0F+pMnT9LR0RGQzYteEVt5C+rsmF50aE9ocmJxN1pxt4ZvOGfeewlFYgSqzIFHXPqgm5qITK+kY+vVL8RfDq75QG+327Hb7ehd6oBCbF98a0k2v7tnEuMSB54XeceUNCobzJTVtKPOMKDOiqZze++sXqVSMX78eE6cOHFVjM7sdjtqQVom1nml8+qb0UOX8qbRjDxGjdItD6BuNIIaQSNHpgt9yWkwGPyGVhETExDU8gB664sIXyE2OS4Je0UrskgV7hY77q7AlZubS0NDQ9B5v1cKUgObryu2N3UDECGqcbvdAR3OPeEL9D3ljI7TbSiMGhSx3auEkmrpexanDxLoM2Nxur1UeqS+kp7Km9bW1qD05b59+4iNjWXcuHG9fu6s6cTb4QxK2/jgl1lWhkffOC924rpkQT8zOeQ6i6CUo5+TiuN0W8B83H8mXPOB3vdQRooaFPHDYza2YkIKaoWMVYclxzx/Vn+gN39bVFSEw+HwT5O5krDb7ahFqR5x3unT0OsCtstJjKKq1YpgUKNyCQGBXutVoogLTVrpQ0xMDB0dHbzxxhscOV6KfEI01rKmIXl9X0nU1tZKksN6ATwiMbdlgSBl9SAFeuCqZfVerxer1YrGq0SmV/byYpFHa0AmoHUNrqVvaWkhIiLCb74n2R60BdA2JdUmlHKBgtSBm6emZkgvjH2NHQHKG7fb3atpCSS/l5qaGqZPnx5g7GUvb5G+R27/xU9FnDQKdCCZZTBY9tZJvjaTwhswop+RjKBR0L7uAq56y2Vp668WrvlA79fQi1qUYdAPAyFKo+SGgiQ+Kr2E3eVBnWlAPcZA5/aLvQaTZGZmotfrr7glgtfrxeFwoPRIgf6UxU6sTkV0RGDxKSdJjyhCp1qO0iHrZbVssVhQO+UhF2J9mDFjBosXL8Zms/HJJ5/wpxPvs012nJMbSq5acToU1NbWkpycjP1YCwqjBs34WFTpUdjKuzPghISEqxbo7XY7Xq8XjVPeS3ED0phIRawGrV0K/oMF+p60jbO6A9EZaHtwpKqN/BQDmkHMvWJ1KrIS9BysMvXyvOlPYrl///4AJQxI1IqtvAX1WAOyAeY5C4KAJicGx9m2fm3D+8JjdmItayKiOCHsZjCZRkHUdaNwnGmj4bclXPrZPpr/Wk7n9ouSZcQwWX1YjzbhvGQefMMh4F8m0OtlWuQhuvGFgjumjKLT7mZ9uZTFRy1Ox9vpwtwjq5fJZBQWFlJZWXlFp0/5fW7ccmQRCk61WAP4eR98ypsGweu3S/AFFKvVisYhD4ufB8lRcM6cOTz22GM8/PDDTJw4kWpFC6uPfs5LL73Eli1bhjSBaCThdrupq6sjJT4Zx9k2tBPiEQQBbYERV50Fd4tEheTm5lJdXX1VJk91z4qVB3WWVBg1aDqklVc4gd5+ug1k9LI9cLq9HK1pG5S28WFqRiyHL5iQ91DeBAv07e3tlJeXU1xcHOD34m604m62DUjb+KDJiUV0eXGcD83z3nJI8rXRz0gefOMgiJyXRtIzU4m5I5uIgjjcLTbaP79A02tHqf3JXhpfL6Njc3XIL56+cNZ00rqqgo6NIyPe+JcI9EpBgS42st/miKFg5hgjqdFaVh+WDLDUY6KlrH5bb66+qKgIr9d7RR0t/YHeISA3qDnXHCit9GG0UYdKIeO8y+Wneux2u//FpBFVQ14JCYJAWloay5cv5/EVD7HQWUBsRDQ7d+7klVdeCXtYxEiisbERj8dDnFMHolRbANDmS14wvqw+NzcXURSvCh3nb5ayCgEZPUgFWVWXxVJ/gd7hcGA2mwMKsapRUb2kxyfrOnC4vRSPDk2rPi0zhk6Hm1atzK+88c1Q7hnoDx48KG0/bVrAMWzHpb+xNm/wQK8eYwBFaDJLr9WFZW8d6jEGlInBn4PPjtWxtmzgOpIiVoNuciIxK8eR9J0pJP9gOsb7xqOfnoTo9NCxsYrWNZVhK8y8Vhctfz+JXKci5ksjY49wzQd6k8lEpKBF2U+gGypkMoEvTU5j15lmatukbC9q6Wi8Ziem1d0XOykpifj4+CuqvvEV4hQ2EKNUNJudQQuxAHKZwLgEPScsDlR0jRi02/1BRSsOrFYKFZFFiYyLSOMm9TSeeuoptFot27Ztu+zjDhd8hVhDrRxlis5vdKWI1aBM0fl5+uTkZKKioq4KfeO/Jm5lr0KsD3KjBrlDWlH1F+j7Km48FheuWrNkA9wDvkLsYIobH3w8/cmuepCrwRogsXQ6nRw6dIjc3NyAqVYgDRlRpUcGDAEPBplKjnpM9KAyS9Hlpfn/TuAxO4laOjroNm6Plx9+eJwff1yONwz+Xa5XoS2II3rFWBK/OYmo6zOwlTbRuTl0hY7oFWl9twJPpxPjfeORhyF6CAfXfKBva2tD7x5ccTMUfGlyGqIIa3xZfYYBw7IMbGXNdGySLrYgCBQWFlJdXX3F1Bq+jF5hgU6VtIrpj7oBqUP2sMnSK6P3N0uh8is6QoXXK9LZZ5ygoJChmyqpWXQeNTNnzuTs2bMheYgHg8fjGVZtfm1tLRHaCDSXPP5s3gdtfhzO6k6pJ0EQrppHvf+aiMqgNKSvljKQlr5voO+2PQjUzycbNCQbQntu0mIiSDFo2NMunWNf5Q1AWVkZdrs9QFIJ4G6146o1+1dQoUCbE4O72Ya7ObjCSPSKtL5XgfNCB7F35qDOCF5U3n++lRaLk2azgyMXh+46G7kgjYjiBDo2VWM9GlqTYOfWi9grTESvGINq1MBqv8vBNR3oRVGkzdQmmZkNUyG2J0bFRjBrrJHVh2v8mYB+XhoRkxPp3Nx9sX2OlleqKOvL6FUOGU2CdF79ZfQgedNXmO2o5Er//r6gotNoByyMBcNr288y65dbaOzorXPWTZdazi0H6pk6dSoajYadO3eGdWyQgvwfX3mdV196xZ+JXy5qa2tJ1BoRENAWxff6nbZACoo96Zur4VHf8+UblLrpusd1Cu2ggd7HnztOtyGo5b1sDwCOVIfOz/swNTOWzTVtAcobn8Ry3759JCcnk54eOJ3NZ2kdCj/vg6bLliAYfSOKIu2fnsN2rBnDTWOImBAfsI0Pn5bVEaGSo5QLbDgx9M5nQRCIuX0cqowoWldV4qgauMnKXmmiY1MVEZMS0E0fWu0gVFzTgd5qteJ0ObuklSMz2u+OKWlUt1rZf1662QRBIOa2rO6LXd1BTEwM6enplJWVXZEOUf90KVHBRbcbuUwgPbZ/aWlO1xASuU7r398XVPSxoTWW+OD1iry9v5pOu5vfbek91k4RrUGTG4vlUD0quZIZM2ZQUVERtq3Awf0HqDc10mFq509/+hObNm0adCDzQLDb7TQ1NWE0a1FlRgXQIoqECBTxWn+gv1oe9RaLBa1SgwwheDE2Wg0y0AnqAQO9wWCQDO9EEftpE+qx0b3qV/XtdmrbbBSHSNv4MDUjliazA69R0yvQO51OysrKaG5uZubMmQFS3Tf3XKBsywWEeG1YNKEiTtreXhm4UjbvrMW85xL6OalEzk0NsrcEt8fLuuN1LB6fyIwxRjaWD22ylA+CQobx/jzkBjUt/3ei36Yut8lO6z9OoUyMIPq2rBH3T7qmA31PaeVwaej7Yll+MpFqhV9TDz0udlTXxW5zUFRURHNz8xXxSvEHepSctjtJj41Apej/Uud0KW9sXQOwfYFeAPTx4Q2g2He+hdo2G5lxOv5x4CIXmnurU/QzU/CaXdiONzN9+nTUajU7duwI+fhms5mtW7eS6onlTucscrWj2bVrF6+//vqQaaC6OqkIZzRHBNA2QJf6Jg7HuTY8Fpffo76ioqKXR70oirS0tFBaWsonn3zCq6++yqpVq4bt5W6xWIiQqxHUcmTaQImgoJAhj9ag9agwm81Bpaw9FTeeVjueNke//Hxxenh2uj5DwEaVEKC82bRpE3q9nry8vF77uD1e/m/TadKsHt5rN/s/O1RocmKwn23vJYCwljbS/tl5tEVxGG4ceMbx3nMtmKwubipKZmleIueaLZxpvDyJo1ynJO7BfESPSPOb5QFjR0W3l5a3TiJ6RGLvy+u3A3g48S8R6KPUuhErcmhVcpZPSOHzY/W9eGm5TkncV/IQXV5a3ixnfFYuMpnsihRlbTYbMkGGHBnHOuwD8vMAyQYNkWoFzTIBOTIp0HeaJcVNmC/INYdr0asVvPnVaSjlMp7fUNHr9+qsaORGDeZ9dWi1WqZNm8aJEydoagptms/mzZtxuVzMVowncUUus1vHsrL4BhwOB2+88QYbN24MuxPZR//EE4W2IDhHrM03ghfsJ6WVW25uLna7nYMHD7Jr1y7eeecdfv3rX/Pyyy/z4Ycfcvz4cWQyGeXl5cOm0LFYLFLNJKb/YqXCqEHrUPjlsT3hexH5Ar39jPR89J0PW1JlQqWQkZ8S3ks+K15PdISSCrfLr7zxBXqz2cy0adNQKHq/oHaebma8VUSOQIlG5K7X9/K3fVUhvxw1ObHQNedW+k4mWldVoh5jIPbOnF4GbcGwtqwOnUrO/Ox4FudJnjuXQ9/4oIyPwHjfeNxNNlrePoXo6f4+bZ+ew1VjJvaO7BGhlIMhpEAvCMIyQRAqBEE4IwjC94L8Xi0Iwrtdv98vCEJG18+NgiBsFQTBLAjCK8N76oPDF+ijjUO3GA0Fd0xJw+byBMizlIk6jPfm4qq3YPu4mnHjxl0RR0u73Y5GoUKQyyg1WfqVVvogCALZSZGcd7pRi0psFitmUyeaMBU3Foebz4/XcVNhMunGCP5tTiafltVxrKZb6yzIBPTTk3Fe6MBVb2HGjBkolcqQuPqamhqOHDlCgZhOcmEGupnJqLNjiD3o5mt3PcTEiRPZvXt32Nl9bU0tUUQQnZ3Yb0KgTNUjj1b7B1H4POrXrVvHpk2baG5uJjs7mxUrVvDoo4/y7LPPct/c24nRGdi0adOwTKeyWCxoPUqpC7YfKIxaNJbgA0gsFgsOh6O7EHumDblBHXCNS6pNFKUaBlwFBoNMJjBldCx7O6QXjKvBisFgQBAEFAoFkydPDtjngyO1LJOpkMWoefnJ2czJiuOHHx7n6VVl2F2D/83UmQYEpQx7RSvOS2Za/nYSRZwW4/15/tnQ/cHl8bKuvJ4leYlolHKSDVompBnYcJn0jQ+arGhibs3CUWmi7VOpnmMpacCyrw79vLR+k4qRwKBXUhAEOfB74AYgD7hHEIS8Ppv9G2ASRTELeBF4ruvnduCHwNPDdsZhwGQMXOoYAAAgAElEQVQyoUaJLiE8njlcTBoVzdh4HasOBwYXTU4shuVjsJ9oIcuThNls5siRIyPK1dtsNlSCElGvxOHxDliI9SE7MZJyix2VqMDWacXcaUYrKsPqil13vB6r08PKyZLn9tfnjyEmQslz63pz2RGTE0EuYDlQj06nY8qUKRw7dmzAJiqv18tnn32GThPBREcG2qI4aQj1ynEgl2H76AI3r7iZ++67D6fTyRtvvMHWrVtDOu+a6ovEeyKJmNh/wU4QBLT5RuynTXgd0oCP++67j3vuuYfvfve7fPOb3+TWW29l8uTJJCQk4Gmy0fZOJcVt6TQ1NVFaWhrSuQwEs9mM2qnoZWbWF4o4LRGO4DYIPefEil4R+5k21FnRvYePuD0cr+0Im5/3YVpmDPu6Ar270YpCoSAtLY0pU6b08pwHMDvcHC9vYKJXjn5KEtE6FW98ZSpPLR7H+0dquP3VPVS3DNxoKChlqMdGYy9vofkv5cg0cuIeKghKbfXF7jPNtFld3FTU7WS5JC+R0ottNHSEZ5jWH3TTktDPTcWyt462T87S9sEZVJkGDNdnDMvxQ0Uor+xpwBlRFM+JougE/gHc0mebW4A3u/5/NbBIEARBFEWLKIq7kAL+FYep1YTeO3weN/1BEATunDKKw1WmoPyeflYKuulJxB8XSDTE88knn/D222+PWHeoz+fGrpW4v8GoG4CcRD1nnS7UKLCZrVisln4HtfSHNSU1pMdGMDVDChJRGiWPLcxi15lmdp3uHskm1ynRFsZhKWnA6/Qwa9YsZDIZu3bt6vfYpaWlXLp0iVmGQjR6LepMiW6QG9TE3DIWZ3UnnTtqyMrK4tFHH6WwsJDt27dz9OjRAc+5o6ODTquZBCE66Fi5ntAWxIFb9Gu3MzIyyMnJCQhgottL6zsVyDQKxurSSJLHsHXrVhwOR7DDhgS3243D4UDjVvSyJ+4LhVFDhCi9CPoL9EajEdclM6LNHcDPH6/twOnxhq248WFqRixNiHiUMn9B9qGHHuL6668P2PbzY3Vc71YgCpJLJEirgqcWZ/Pnr0ylxmRlxSu72FoxsFRRkxODp8OJ6PIS91ABCsPgOnyQaJtItYJ52d2Z9dJ8SRm26eTwZPUAhhsy0YyPxbz7EoJGgfHe3GFt3gwFoQT6VKBnC2NN18+CbiOKohtoB0LWSQmC8HVBEA4JgnAoVK42FLS1mIgUNQP60A8XbitORS4T/J2yPdFhd1NTHI8tUc8NjQXMmjibqqoqXn31VbZt2zbs7pZ2ux2VR4Gp62YKKaNPiqQOLypRIXXGOm1oldqQJ0PVttnYe66F24tTe2WI980YTWq0lufWnerVjKKfloRo92A71kxkZCTFxcWUlpb66baesNlsbNq0ibTUNEZfikRbGNfrQdFOjEdbGEfHxiqcl8xoNBpuueUW0tPTWbt2rT/ABT3vi9L1Ss0YNeh3VY2OQqZX+pun+kP7esn8KuZL2cTcPo6p1rGYzWb27t074H4Dwd8shWrgjN6oRYtUVA8W6OVyOQaDoZufH9s70B/xFWJD7Ijti4JUA1qlnGZNd6AXBCGoquSTIzWsEFRo84wBTVILcxP49JtzSYnW8tBfD/Krdac422QOuhLWFsShHhdN3Ffy+u187Qun28v68nqW5CeiVnRf93EJejKMEcNG34BEV8benYtuZjJxD+Qhjxx44MlI4AtRjBVF8X9FUZwiiuKU+Pj+l89hHpP2zvYuxc3IB/qESA0Lc+JZfbiG59dX8M13jnDLK7uY+F8bmPDTDax4bTd3NjTQKELyYS2Pfv0RcnJy2LZtG6+99hqnT58e/ENChM1mQ+WSUYeHSI2COP3gN1ZOYiQmRFQo6bRZcHnd6HShr4Q+KKlBFGFlce9RaRqlnG8tyeZYbTufHe+uYagyDSjitX4f/9mzZwOwe/fugGNv27YNq9XKonEzwSUS0UfnLggC0bdmIdMqML1Xiej2IpfLWblyJXK5nFWrVvUrv6w6fg5BFEifMi7o73t9jkxAm2fEfsrUr6eJ/YwJ885adDOS0ebGos2NJWNCFhneBHbv2h3SmL9g6NWp3ENaaXa4e71AFbEa5IIMrTKwO7alpYXY2FhkMhmOM20ok3QBQedwlYm0GC0JkUPzhVLKZUxKj+a0x+1X3gRDXbsN1dkOokSpZhMM6cYI3v/GLG6blMqr286y6DfbmfGLzXzr3VLeO3iRi63Si0QeqSL+3wpRZ4ZePN59ppkOu5vlRb0/WxAEluYnsedsc0DT3+VAppYTc0vWiDZFDfj5IWxTC4zq8e+0rp8F3UYQBAVgAPpPo64AzGYzbq9H0tCH6b44VHx5+miazQ5e236W0osmIjVKbixM5vs35PKH+4p594m5VEwxEuXyUvtRDXfccQf3338/giDw1ltv8e677/p93C8HdpsNlajggtPNmHh9SBpdo16NUa8GmRKrS2q40keFdlOKosiaklqmZcYyKohe/7ZJqeQkRvL8+gr/UGlBENBNS8JZ3Ymr3kJ0dDQTJ06kpKSkV4BqaGjgwIEDTJkyhchqAXmUCtXowJqLXKck5vZxuOot/q5kg8HALbfcQn19PRs3bgx67rVVFzEKkUTmhZZgaAviEJ0e7GcCZYBeqwvTe5Uo4rW9ZH3Ry8cwXZ6Nx+0esu1Dd1dsd7OU1elmznNb+MOO7sYtQSFDblCjkwUP9EajEdHlwXGhPUBtI4oiJdWmkG0P+sPUjFiOWO1+5U0wfFR6iVtQIRpUAefRE1qVnBfunMjWpxfwP7cVMjUjlh2VTTyzpoy5v9rKnOe28Mzqo3xy9BKeMOwLPi2rI1KjYE5W4HVfkpeIyyOyrWL42IWrjVAC/UFgnCAImYIgqIC7gY/7bPMx8JWu//8SsEW8yrPj/NJKfdSg1ffhwsLcBA7+52JO/WwZO5+5jr8/PJ3/ua2Qf58/lmUFyeSlRHHnreNZqxUxnG6n41gTY8eO5Rvf+AbXXXcdp0+f5pVXXhmUVx4IXq8Xu8OBGiUnLXbGhsDP+5CTpMchdC9jI0NsliqpbuN8s4UvFQcffCyXCXz3+hwutFh592A3CxhR3F2UBZgzZw5er5c9e/YAUuD5/PPP0Wg0LJg1D3tFq0Tb9COZ0+YZpa7kLvtYkGSQ06dPZ//+/QENTh6Hm3pLC8mxiSHfI+oxBgSN3G/A5YMoipg+OIPH4iL27txe2mi5XsXom4vIdadSUlISspS0J/wZvVyNTC8VW/eda6HN6uLt/dW9s/o4LRFi76Ypr9dLa2srRqMRx4UOcIuo+/DztW02GjocQ+bnfZiWGcs5JMWMj77pi/0Ha5mEAsPMlEElkACZcTrunZ7OK/cWc+gHi9nwrXn8ZEUe+SlRrC9v4JvvHOGlzaGtih1uDxtO1HN9flJQZVFxegxGnYqNJ4aPvrnaGPTu7uLcHwfWAyeB90RRLBcE4b8EQbi5a7M3AKMgCGeAbwN+CaYgCBeAF4AHBUGoCaLYGRH4An3PKTpXAvGRapTy/v+saoWcvDtyqcRD06pKPJ1OFAoF8+bN47HHHiMlJYWPPvpoyM0/TqcTURRRiQpOWR2DSit7IjsxknZ397lHxoW2FF5TUoNGKeOGwqR+t1k0PoEpo2N4afNprE6JRulblI2NjaWwsJBDhw5hsVgoLy/nwoULXHfddQjnbeAR0Q7Qyg4QvWIMcoMa06pKfxPNkiVLSEpK4qOPPuq1Yrp08BwuwU16bkZI3xOkjFk73ojtRAu/21DJ7jMSX28tacR2rJmoJaNRpQbWRLQT45mZMQmFV86Gz9aH/Hk++DuVDZH+Fdr2royzxmTj4IXuwr7CqCHCqegV6Nvb2/F4PBiNRomflwsB3i8l1dIzc7mBflJ6NDVd1hs+z5ueOHGpg8JmJ14BdFMSA34/GARBIDsxkgdnZ/L6/VMo+eESbpuUyu+3nukl5e0POyub6bS7uakoOGUklwksHp/I1lONOIfJa/5qI6Q0RhTFz0RRzBZFcawoiv/d9bMfiaL4cdf/20VRvEMUxSxRFKeJoniux74ZoijGiqKoF0UxTRTFK+LXa2qVltYxiVc20IeCBXmJbB4TgeD0UPfuKX+BKSYmhrvuuovIyEhWrVo1JA/7nl2xDYQmrfQhJzGSNrH7lohKHvyBt7s8fHr0Esvyk4jUDDws4ns35NLU6eAvuy/4f96zKAswd+5cXC4XO3bsYMOGDSQlJTF58mRsZU3Io9WDcpwyjYKYO7JxN9swvVuB9VgzYquTL92+Eo/Hw5o1a/ya9qpSKQNML8oa9Hv2hDbfiGhzs23LOX6zoQJ3i422j86iyowicl7wVY0gCCSvzGeimMHp82c4f/58WJ9psViQI0PTY47x9sompmfGolPJeb+km01VGLVo3VJ3rO+7NjdLf1+j0YjjTBuq9MiA4nNJlQmtUk5u8uXxyBEqBQmpkdiE4Bn9x4cucgMqlONjkYdQPxoMcpnAT1bkE6dX8e33SgfV3689VodBq2T22P517EvyEul0uNl3bvgY6FD6AkYKX4hi7EigtakFjahEmzSyGvqh4pEvFfBHmRPOtGM92L1EjIiI4M4778RsNvPBBx+E3VzltyiWK7FBeBl9Up9Anzr4S3LzyUY67G6/dn4gTMmIZfH4RP6w7Swmi8Td9i3KxsfHk5eXx/79++no6OCGG25AtHmwn+4eBjIYNGOjibxuFLbyFlrfOknDC4ex/7aCObI8qqur2fDnj7EcauBSQx1KmYL4hPAEADXRSuyILJKpOFrdRv3bJ0EGsXcN3ImpiNEwa/FcdKKa9R99Hta1lWbFqlDGSPWmC80WLrRYuaEgiRsKk1l7rA5b1wpGYdQSIar8+0G3tDJaG4XrkhlNVuBLvKTaRFGaYcAVaaiYlhnLOdGDs753QdbjFWktaSASgZjZ/XvQhAtDhJLnVhZxutHMixv770S2uzxsPNHA9fmJAzaEzRkXh1YpH1KXrN3l4XhtO6sP1/Dfa09w/xv7mfbfm8j94Toe+utB6tr7n+c7UrhmA31bc6ukuIkbWQ39UDEqNoLEhaM4hJuWj8/4JxgBpKamsmzZMk6fPj2gtjwYfBm9R6FCECDDGB5104yU5SmQo9YNXsReU1JDUpSGWQNkRz3xzLIcLE43r247AwQWZQHmzZsHSK6fo0ePluaIegPVNgPBsDSDlJ/OIuHxicTclUPkvDTyUrLJUaSxv+Yo5e/vo0noIDkxOWBu6UCwuzx8c/VRSuRelmu03I8Kai3E3Jo1oL7dh+g56UyPyqe+rZFjJaHXYiydZrTe7kLsjtMSbbMgJ4GVxWmYHW5/UFLEBWrpW1paUKvVKOpcXbbEvfl5u8vDiUtDb5Tqi6kZEk9v7xPo95xtZqFdwB6llIaHDCMW5CRwz7R0/nfnOQ5XBe9R2VHZhNnhZnmPJqlg0CglW4RNJxpD8qi/1GbjqX8cYdFvtpH/4/Usf3kXT686ypt7q2i1OJkzLo6H52Sy52wzS1/YwTsHqq+IwaEP126gb2+/Yhr6oeKRBVn8JVrE5vHS8m5FLz+MKVOmUFBQwNatWzl37twAR+kNX0ZvkSlIjdYOOu+zJ/RqBZ6u4K6VD9500tTpYHtlk7+HIBRkJ0Zye3Eab+6t8svjfEVZc1dWn5SUxCOPPMItt0h9edayJhRGaQBIOJB12e/qJiVgWJZB3AN5rHzmAYxxRrZHV9KiMDNqTKBl7kD45eenOFXfyZhZacitbh5Gw6EIIagZWjAIMoHp9y4kVtSzeX3orpvmTnOXD710XbZXNDHaGEFGnI7pmbGkRmtZ00XfKGK16AgM9EajEefZdsmWOLU3PVNW047bKzL5Mvl5H6ZmxHIeL3K7p5fyZufuagpRYJydGtLqLFz8503jSY3W8p33jvprQT2x9lgdMRFKZo4dvM1naX4i9R12jtUOzPu321w8+JcDbDjRwJh4PY8uGMsr905i07fnceKn17P2ibm8cOdEfrA8j/VPzaMg1cD33z/Gl/+0f9DO3+HCNRnovV4vHXYzkTIdsqgr35wQKjRKOY/fnM+vRRuu6k46d/RwwBQEVqxYgdFoZM2aNXR0DOxtDVKQLy8vB6DJIw+Ln/chpssuIkI9+Avyo9JaPF4xQDs/GL69JBuFTODHH5cjiqK/KGs90ugvoCYlJaFQKPCYndIM16LQaJvBoFKpuOOOO7A7pLm4qamh0wdbTjXw1z0X+OrsDIqvywC5gFUj4wfW9rCW4+rkSBYWzKLDZWHPp9tC2sdisUga+mgNDreHPWdbmJ8trXBkMoHbi1PZdbqJhg47glKGXi8F8r6B3n6mLcCWGCT9PEiF1OFAjE6F0yA9ez6e3up0E13ZjlsAw9T+C/eXA71awfN3TOBCi5XnPu+tsrK7PGw60cCygqSQ6KnrchOQywb2qHe6vXzj74c532zhTw9M4Y8PTOE7S3NYXpRCVkIkij6fM9qo4+2vSWq8spp2rv/tDt7YdT4saehQcE0G+s7OTryiF4M+asR9ni8Xi8cn4M6JYZvMTcfGapy13RYKarWaO++8E6fTyerVq/s1xhJFkbKyMl555RVOnDhBoTudKqcsJOuDvshMlnh5feLgD/zqwzVMGBVNVkJ4L5SUaC3fXpLNllONfH5ceoj6FmV9sB1rlma4DqK2CQdJSUnceOONqNVqRo0aNfgOQGOHnadXlTE+OYpnl+Ui00qt7Mq7sjEDnx0Lj8stuG0GoxTx7C7dj9M2sDWCKIpY7VZp4EiMmkMXTNhcHn+gB6lXwSvCh0ekrD7SaEBAehZcLhft7e3ERBjwtNrRBNGtl1SbyIzTSb0Uw4T4LpsKH0+/qbSORV4FzixD2MNswsGMMUa+OjuDN/dW+VVRANsqmrA4PdxUODBt40N0hIppGbH9yixFUeR7a8rYc7aF51YWMSsrNPpSEATunZ7Ohm/NY8aYWH726Qnu+MMezjQOrZkuFFyTgd43si8mZniyk5GEIAj8eEU+L2KnUw6t755C7FGdT0hIYMWKFVRXV7Nly5aA/ZuamnjzzTd5//33MRgMfHXlfUx3j6PG42bsEObk5qTEIorAIBl9+aV2TtV38qXioRXUHpyVQUFqFD/5uJwOuyugKOuDtawJRYIWReLw1lomT57Ms88+S2Tk4AoTr1fkO6skKuB3d0/002Ha/Dgyx8eTmxTJZ8cGHizdF4JCxsw5s3Hg4tSuga2rHQ4HHq9Xsj+IUrO9sgmVXMaMMd30w5h4PcXp0awpqZHktfERaJG09D5PpSi7FMT78vOiKHKk2jRs2bwP+dlGzIg0XZCojwu7a9AhkH5d8Nmtw4lnrs9lTJyOZ1aX+TtcPy27RKxOxYwxoSvxluYnUtlg5nxzYJfvCxsref9ILU8vzeb2MFe1ICU8f35wKi/eNYFzzRZufGkXr28fmall12Sgb2uRbuyYhNDHkl1NZMbpuHt+Jj90mXE32mj+20lcDd03VlFREVOmTGH37t3+ph+n08nmzZt57bXXqK+vZ/ny5Tz88MPEKyWOtQFxSNRNTlIUVd4YxMiBOec1h2tRyWWsmBBadtQXCrmMX9xWRLPZwa/XVQQtyno6HDgvdBAxTLRNX4RahH1j13l2nm7mh8vzGJcY+GK4qTCZw1WmsNUU2bPyUaHgRFn5gNuZzdIqL0IbgSAX2FbRyNTMGHTq3g6NKyenUdlgpvxSh6Sl96roaO/wK250rTLkhkCjuupWK81m52Xr5/ti6hgj5/FgvdRJY4ed8Q0O2nRy1Bkjr4TTquQ8f+cE6tpt/PzTk9icHjafbGRZQVIAnTIQlnR51G/sQ9/840A1L285w91TR/HYwvDkuT0hCAK3TUpj47fmszgvAatzZCSY12Sgb6mTlvuxqcO33B9pPLYwi0sGJW9HijirOmj4bQmt71X4R5EtW7aMlJQUPvjgAw4fPsyrr77Kzp07KSws5PHHH2fS+AmYt9ZgWl2JV4BavGFJK30Ym6Bjp2ccb5zw8NNPyvn8WB3N5t7Ugsvj5aPSWhaNTyA6Yug1kMI0A1+ZlcHf91dxuMoUUJS1lknXcbAmqZHEsZp2frX+FMvyk7h3WvDC7Y1djTefh0nfKFRKxsSM4nxHLW5r/4PG/c1Sej2X2mxUNph70TY+LC9MQaWQsfpwTZfEUk1nW3eg19Z4UWfFBLw0fVOdLtf6oC9So7U0qWSo25xs23GB8cjRTU+6YnRqcXoMj8wfy7uHLvLjj49jc3lYXhjebNa0mAjykqN6mZxtq2jkPz88zvzseH52a8GwfJ/4SDWvfnkyTy4a3HNpKLgmA72pqZUI1GiSro6B0FAQoVLwg+V5vNrZyZb5CejnpmIta6b+N4cwfXgGwerhjjvuQBAEPvnkE5RKJQ8++CDLZy3FtaGOul8coGNjFcpkPZ/lReJQyUiKCpT7iV4vJ3ZsweUI7hytVsj52S0FpBi0vHOgmm+8VcKUn2/iuue38ezqMlYfrmHVoRpaLM6wi7DB8J2lOSRFafjPD47h1ch7FWVtZU0ok3VhT7kaLlgcbp74xxGMOjW/XFnY7wM9Nl4/JPoGIG9SAQ7BReWO/gfH+wN9dCQ7KiVZ5fzswBWXIULJkvGJfHz0EmKMWgr05k5aWlrQR+hQ2AjOz1e1oVcryA6yWrlcCHEaIjygPdSEQ4DR88JTOV0unlw8jtykSN47VEOcXuUfdxgOluYncrjaRLPZwfHadh57q4ScxEh+/+XiYek56AlZiOq1sI87Ike9ymhra5PMzK7QmK7hwg0FSSzIiednm09zviCG5GemoJuahOVAPXW/OoSwt417br+Lm266iQcX3YVui5mGF0uwlDSiK04g8VvFxD9UwB6XkzHxuqCB6dyRg3z++xc4vPajfs/j3unpvPP1GZT9+HrWfGMW37shl8w4HZ8fr+PpVUf5jw+OYdSpmJ9z+Zm2Xq3gv24p4FR9J3/aed5flDXvrMVZ3Yk2DO38cEIURX7ycTkXWiy8eNfEQVcuNxYmc6jKRH17eKMXcmcUokDGyWP90zcWc/eg9u2VTSQbNGQnBqflbi9OpdXiZE+zGZ2oxua009jYSLRSCuLBDMQOV5mYMMoQskQ2HBi7bBYm2aEpLQKZZvCBIMMJtULO83dMQCETuKkwOSzaxoeleUmIIvzf3ioe+utBDFolf/nqVPTqK/tdLgfXZKBvt3YSpdCF7KX+RYEgCPz2rokkGtR84+8ltAgiMbdmkfSdyUQUxmHeWYPirTpG7RAw/fUkrjozUUtGk/y9qcTcPs7vxX2uycyYuOCB4OTObQCUbVqHd5DxdiqFjMmjpeXvGw9OpfRHS1n31Fx+dmsBL98zadiymSV5iVyfn8hLmytpiFKiiNfSsakKgIiiKzduzYfDVa2sfG0Pqw7X8NiCrJA01zd2UQKfHw8vq1epVGTEpnHWXIu7Pbj6prNVKmZqjVHsOt3M/Oz+axbzsuOJ06tYc+wSOo20EqqrqyPKrUGZFBFgS2xxuDlV3zFs+vm+GNfDFTRzccaIfMZgKEg1sO6puXx3We6Q9h+fHElqtJbfbT6NzeXhrw9NIzHIavmLjGsu0Hs8HswuCwbdPw9t0xPRESpev28KbTYnj71VgtPtRWHUEntXDolPFaPJjkEWoSTmjmySn51G1KL0Xn4hdpeH2jZbUH7eabNy9vABYpJT6Wxp4lzJwbDOTSYTyE2K4v4Zo0OWkoWKn95cgEIm4wcflxMxNQlEUKbpg1pMN3ba+c2GCtaFGVQHw7kmM4/87TArX9tLjcnGL24v5NtLskPaNyvhcuibQqyCg3M7g9tAmU2dqEUFFz1eOh3uoPy8D0q5jFsmprL5ZCMRXVp6URTRdyhRB7E9OFrThleEScPMz/swdkwMFkRq1QIJ2VfPdyorIXLIGbggCCyfkIxSLvD6fZNHhOIaaVxzgb69vR0RiDaMzI17JZCXEsVzK4s4eMHEz9d2P/zKRB3GL48n4RsT0E0Obq17ocWCKAafKnX6wF7cTgdLv/5N9MY4SjesHdHvEQ6SDBqeXprNjsomdqi9CFoFuim9m2outdn48UfHmfPcVl7ecoanV5UFFIqHgmazgx99dJylL+5g5+kmvr0km23fXcA909LD4kx99E2480bHTylAhsDJ48HpG0unGY2oYl9zJ3KZMOhL9vbiVJweL2ZZdwJg8GgDaBuPV/Tr7otHjczzIpPJUKzIZPSXx3/he1oGwtNLc9j5zHXDnuD0ROOFcziGYGQYCq65QN9aLzVI/LNIK/vDLRNT+drcTP5vbxXvHbo4+A5d8M1mDdYsdXLXNqLiE0kdn8+ERcuoKjuCqa7vDJmrh/tnZjBhVDQ/2ViB7lvF6KZLgb66xcr33y9j/q+38tb+am6dmMJfHpyKzeXht5v6N7AaDDanh1e2nGbBr7fx1v5q7p42im3fXcgTi8YRoQo/+7uxMBlRlGahhgOtVsuo2BTOWS/hDDKVyWyWDM3WX2ylOD0ag3bgZqP8FAO5SZGctHR3WxoEXa8JTLVtNu753328d6iGL09PxzCCDUxjZ48iKfuf+3lUymUkGUaOrnFYrXzwq/9i7UvPjcjxr71AXyOpEmJTrzy3O9x4dlkus7OM/ODD4xy9GDhLtSdazA4ef7uEn689SVGaIWB5aWkzUX3sKOPnzEcQBAoXXY9MLufoxs9G8iuEBblM4H9uK8BkdfHcpgrONln49nulLPzNNtYcruXuqels++4CfvWlCSzMTeDL09N558DFIXUUHq9tZ+Hz23h+QyUzxxpZ/9Q8fn5rIfGRQ+8MzUrQk5MYGXaXLEjqm3aZlZo9gcMzrA4rWrmakksdA9I2PbGyOI3DHU5kooAgChhHJfhrVp8dq+OG3+6g/FI7L9w5gZ/fWtDvcUx1tex8502ctivjyfKvil3/eBNzawszv3TviBz/mt4fADwAACAASURBVAv0psYW6cZOD81k6osMhVzGy/cUE69X88jfDwelKURR5KPSWha/sJ315fV8e0k2qx+ZFWDBWrFnB6LoZfycBQDoomMYN20Wx7dt6ldqeTWQn2Lg3+Zk8s6Biyx5cTufHavjwVkZ7Hx2IT+7tYC0Hn7sTy4aR4RSzi8+OzXAEQNhc3p44p0jALz37zP54wNTwrZx6A83FiZzsKo1fPpmQj4AJ8tPBLgaWl12FF0mc8FklcFwy6QU6gSIQE2kqEE3zojV6eZ7a8p49K0SMuP1fPbkXG4vTuuXUmk4f5Z3fvQMBz5cxfrXXrqibov/SrhUeZLSDZ8xadlyksfljMhnXHuB3tSGDjVK4xfTnjhcxOpUvH7/ZFotUnHWN3MVoL7dzsNvHuLJf5SSbtSx9om5PLFoXFCf7ZO7txOfMQZjWreOeeLSm3BYLJzas+OKfJdQ8dTicSwen8Aj88ey69nr+OHyvKAqB6NezaMLs9h8qpE9Z5qDHCk4fvn5Sc41W/jNnROGpKseCDcVJQ2JvomKiiI5OpFzjjqcF7tXKB6PB4fXiUNUEqdXkZ8SWldpQqSGjKwYYr16EkQDtQYFy1/exbuHLvKNBWNZ/chMRg9gYV1z4jjv/fT7KFQqim+4mcr9uyn5rH9J7v/H0OBxu9jw+stExsYx5677R+xzrrlA32ZuJ0qhC2kO5T8LClIN/HJlIfvPt/I/n51EFEXeOVDNkhe2s/tsMz+4aTzvf2NWv2oAU10t9Wcq/dm8D6nj84kbNZrS9Wu/UNlahErBn74ylWeX5RI3iMnWV2dnkBqt5edrT4bkALi9sok391bx0OxMZo9AYS0rIZLsRP2Q6Jv8SQW0yDqp399tS+0fHOIUmDcuPqzi8PIpo5jkyme6Zzy3vl+KxeHmrX+bzrPLcgeUxp49fIA1//Mj9DGx3P3TX7HgK18ja+pMtv/9z9ScPB729/r/6B8HPlxNS001ix9+FJV25JLTay7QdzjNREX888mfBsNtk9J4aHYmf9l9gRte2sn33z9GfmoU656cx8NzxwzY7HJy13YQBHJnz+v1c0EQmLD0JhrPn6X+7NCLmlcTGqWcZ5blcKKugw+ODFxYNlmcfHfVUcYl6Hlm2cgskaGbvmkMl74pkMYpnzpxCrFr5dbZJGnomz3ygAY1j9vNR8//N2cP7w96vCV5iZyQwRavm3m5Cax7ct6gqpETO7fy0fM/xzhqNHf99Dmi4iTN/rJHnyI6MYlPf/scZlPwoR7/H+GhpeYi+z94l5xZ8xhTPHVEP+uaCvQuhxOr6CDG8MV3rRwK/uNGqThbY7LxP7cV8vbDM8gYxIpYFEVO7d7GqLxCImMDH/K8uQtQarSUrv/iSC3DxYqiFCakGXh+fYV/nF5fiKLIDz48jsnq5MW7JoY1kCVc3ORT3xwPL6s3Go3ERcVy3lMvDfAGOhul/7aICub0CdKndm/nzMG9bP7zH3A7A71yNEo5xntzib8zh/+9fzIxuoG7e0s+/5jPX/kNo/IKuPNH/01EVLdKRx2h4+Zv/wcOm5VPf/scnhAHpgwH3E7nNfdyEb1eNv7xZZRqDQu/8rUR/7xrKtC3VDcCEBP3xRsIPhxQyGX89avT2Pcfi7h3emga74azpzHVXQqgbXxQaSPIm3cdFXt3Yu0YeJLOFxUymcAPludR32HnjzuDT+P6qPQSa4/V8dTibApSh3eEXV+MS4xkXIKetUNpnppQQIOsjZaD1QB0tnR1xcZG9fKKF71eDny4ighDNJ3NTZSu/zTo8ZYVJA9YcAXpJbhn1Vts/ev/kjV1Jrc9+5OgNEJcegZLv/Y4tafK2fnOm2F/t3AgiiL1ZyrZ9KdX+cMj9/P6Iw+w462/4HG7RvRzg8Fm7uTY1g3sWfU2DefODAvNWbZ5PbWnTjD//n9DFz3yPT//PGYNg0EUaTm4GYDY5KFxr067jZaaauxmM3aLGUfXf+3mTunfFjO66Bjm3fcQKs3V8dFRymVhWQ+c3LUNuULBuOmz+t1m4tIbObphLeXbNjH15pXDcZpXHFMzYlmWn8Qftp/l7qmjSOhRvK1ts/HDj4777RxCgdvppHLfLkYXTRrSg3hjYTK/23Kaxg57r3MZDOPzxrNj5w4qKitIcRbS1hXos7J7N4+dObiP1ks13PTEdynfvpn9H7xHwXVL0ejCUw+JXi9b/vq/lK7/lPwFi1n69W8ik/e/2hk/dyGXTp/i8KcfkDIuh+wZc8L6vMFgaTNxYudWyrdtoqWmGoVSRda0mciVSg5+vIaLJ45x0xPPEJ04MhOqfLCZOzlzcC+V+3ZTfazUbxeyd/XbxCSnkDNzLjmz5hE3Knxv/c7WZna89RfSC4rIX7B4uE89KK6dQH9uK6aTB0EeR1xS+Pa8DefO8OGvf4a5tSXgdwq1Go1Oj0an5+zhA9SdqeT27/3kiryJLwdej4dTe3YwpnjagAEgbtRo0vIKOLrxM6Ysvw0hjGHZ4cLjduOy20Eg7KA0GJ69IZdNJxt4cVMlv7i9CJCGhjz93lG8XpEX75wYknFXw7kzfP77F2ipqUYbGcXihx8NO6DdVJTMS5tPs668ngdmZoS8X1JSEgZ9FBfaG7GfaKGxpR2ZKDB7QrdTqCiK7P/wPaKTksmeOYfY1FH87dknOPjRaube+2BY57n973+mdP2nTF5+G/Pveyik7tUFDzxMw7kzrHvtJYyjRmNMDW1KVzB43G4cVgs1J49Tvm0T50sPI3q9JGfnsuRrj5Mzay7qCOl5HlM8lQ2v/46/PfsES772GLmz5w/5c4MhWHA3JCQyeflt5MyYQ1R8AqcP7KVizw72f7CKfe+/izEtndxZ88iZNZeY5NCG8Gz58+t43W4Wf+3xK9YtfO0E+rHXYYqqRGZuIOrTu+HL70J0aJaoFXt3se7VF9FGRbHiW99DF2OUArtej1qnR6Hs7ho8V3KQT377S9754dPc/v3/IjZlaBOWrgSqjx/F2t5G7pzBH4iJS2/i098+x/mjhxkzKbTCkMftxtJmwtzagrm1GXNrC52tLZhbW7B2tOOy2XDabbgcdpx2Oy6btZvbFQRmrrybmSvvGbYXS2acjvtnjubNPRd4cFYmOUmR/GXPBfaea+GXtxeSPojk1uN2se/999j/wbvoDNEs/fcnOLrxMz558Zfkzp7PdQ89glYfWqE/u4u+eXFjJYerTBSnx1CcHkNucuSAKzJBEBhfkMeBfQdoO1yL2WxBiYqJPSwKqsqO0HDuDEu+/k1kMjkJGWMYP2cBJZ99zMRly4PWYoLh3JGDHF77IROvvynkIA8gVyhZ8a3v87fvPcknL/yCe//7NwErXNHrpa2xnqaq8zRVncfc2orDErhCdtq6h7XoY2KZuuJ28hcsJjYl0AI7e/psksaMY+3vfs3a3/2aqmOlXPfgv6PU9L9iclitVJWVcL60BGtHG16PB6/bjdfjweNx43V78Hqkf5vqaruD+023kjNzLgmZY3v9XYoWXU/RouuxtJk4vX8Pp/bsYPeqt9j93t9JyBhL1tQZjJk8jYSMMUH/nqf37+HMwb3MvfdBYpKGNrRnKBC+SLI6gClTpoiHDh0a0r5/++UfaXa28i3l70GhhnvegbQp/W4viiJ7V7/D3tVvk5I9npu/8x8hZel1Zyr44Ln/QhRFbnvmR6RkD80Vb6Tx+e9f4Oyh/Tzy+t9QqAYuxHncLv742EMkjsnitmd/3O92zRer2LvmH9SePI6lvQ363D9yhQJ9rJEIQzQqbQQqjRalRoNKq0Wp0aJSa1BqtNSfreTU7u1kTZ3BDY99+7KlZV6vB3tnJ/UNTTzyx+3kRsv50rQMvrG1g5k5KfzxgSkDBrKm6gus+/2LNF44S97chSx88P+1d95xelT1/n+fmXn6s71nk93NpkF6I6QZ6U0hogiIBv3RVEDxKlfhelXkol5AxQZyRVREkRLARLxUKRdCeu/Jbur28mx72jxTzu+PebLZTQ/skmWZ9+t19pw9M8/MZ74z850z55w558v4w2Es02TFomdY9uyTBDKzuOCmr51wD4k1+9r43Vu7WLOvjaYu52M3v0dh4tBsppRlM7Ush7Elmfg8Ch5FwaMpaIqgvraGx/70R842x7FFNBBTU9x+17e6t/vUD++gvaGe63/1++5CSEdTI3/8ty8zdt45XPDlrx9XW6y9jcf+/VbC2Tlc86OfH/f6OBL7Nq1n4T3fY/TMOUz7xKdo3rubpr27ad6zi+Z9ezCSjhMXQiGYnd1dePKHM/CHnEKUP+y8KecMGUrZ+IkoyvEbyW3L4t1nnmD5358mp6SUT972bQorKruXdzQ1UL16BdWrV1CzZRO2ZeIPhckoKERVVRRVQ9GcWFVVFE1DUVTnDWnmXIoqR55USbsr0sKOpUvYsewd6nZuAynJyCugctoMRkybwbBxE9E8HpKxKH/61s0EM7P4/I8fQNX6tpwthFgtpTyiwxtUjv7Bux7AHwpw/ZfOhr9+FqKNcPnDMO7yw9Y19CQv/faX7Fj6NmPnncP5N32tV8n9eLQ11PHcj39AtC3CJ277NiOnn/meNPcXhp7ktzctYMysuVz4ldtO6DdLnv4Ly557iht+9QhZhb3rQCN1Nbz7zBNsX/o2Xr+fUTNmk5FfSEZuHuEeIZBxYhOySylZ++Ji3nz8UXKHDOVT//49sotPbPafWHsbq154nsZdVcQ72ol3dpDo6jzsoQNgCZVh4yZy+pmzGDFtBhl5vUu7tmWxcvGzvPvME/jDYc678RZGnTHrsO007q7mpQd/Tsv+vYw/+wLOuvYGfMETezhJKanrSLJmbxtr9rWxZl87W+o6MKwj33sCyed86xhj5xIVSQj4ueXOmwGo3b6VJ7//75x17Y1M+8T8Xr9747FHWPviP/jiTx8kb+jRq1OkbfPsT35A7dbNfOG/f9HrI7qTZfnfn+GdHg2z3kCQgvLh3aGwopK8YWV4vH036fgB9m1az//+5mckuzqZdcU13aOzttY4Ddm5Q4Z2O9sho08/ZttDXxHvaGfX2lVUr1rOng1rMHUdjz9AxcQpWKbB7rWrueZHP6N4RN/PJPWRcPR20uS+n9zLqCGVfObLn4NYCzz5edi/DM75HnzsW5B2QF2RFhbdfw+Nu6uZd82XmH7pp99TXVm8o53n7/0hjbuqOff6rzLp/ItPehv9xbZ3/49//vI+Pvu9H1E2ftIJ/aartYVHbr2O6Zd+mnnput72hnqWPvs3tr79JprXy5SLL2X6Jy8nkNE3837u3bCOF37x3wB88ht3UD5x8lHXTUS7WLX4Wda89A8sw6Bk5BhC2TkEs7IIZGYTzMwkmJWNFsrg689X0R6JcFNFAnP3JtobnR4wRZUjGTHtTEZMPxPV4+Glhx6goWoHo8+cw7k33NyrS+GhmIbB0mf+ysrFzxHOy+Oir37jhG172LHoBivXb2N/awKZU4Rh2ZiWxLBtDFMS3bkM2bgHj1QpLy7j6q9+AYDn7/0hdTu3c9Nv/nBYlUW8s4NHv34DZeMnMf/2/zzqvlf/8++8+effc94NNzPp/Evek/4DSNtmy9tv4A0EKKyoJLOg6AMdpTLe2cFLDz3A7rWrUFSV0tPGMWLaDCqnzfhAq0aOhJlKsX/zBqpXL6d61XKibRGmffJyzlpwfb/s7yPh6Nu3N/CLvz3M7DHTOP+qTzj1vkYSFt8KG5+BSdfApb+gYc9e/v7Te0glElzytdvfd0ncSCZ54Zf3smvNSs68/CrmXPWFATEc6/P33U3TripufOiPJ/Q6fIDFP/sxNVs3cfXd97Fy8XNsfus1VFVj0oWfYMZlnyHYD98otDfU8/f7/4tIbQ0fX3AdUy+Z38uGejzOmv9dxKoXnieVTHDa7HnM/uw1x2z82lzXwbb6Lj4zbShSSiK1Nd033IHXawB/OINzr/sKY2bPO+HzVrdjGy899ABt9bWUjBpDbukwcocM7Y6zCot6vZZLKelqbaGhegcN1TtpqNpB466q7oHCLvzKbYw/+/xe+9i5cyd//etfAZgxagqXfH4+TXt28fh3vs6cK7/AzM9cfURty557iiVPPc7Vd99P6ZjTD1veuLuaJ777LYZPmc782787IK7V94uUksZdVWQXl/R5A39fIaWkrb6W7OKSk7ofT4aPhKPfvGQ5z7z6Ihl1NSjRFkI5OYRzcp2Q2EuoYQnklLNst5dgdi6f+vb3KSirOPEdmCmoXw9GHCo+Bj0aEG3L4rVHH2Ljv17mtDkfZ9zHzyWnZAgZ+QX9dlKPRaKrk4e/vIApF192eOmhfR+8+n2YeQsMO7yuee/GdSy8xykNqprGxPMvZsb8zxLO6d9vE1KJOC8++ABVK5c6VWk33oqUNute+V9WLFpIsquTkWfMYvaVnz+583YE4h3t7FqzkraGOqZcdOl7OjZDT7Ji0UJqtm6ira6WWHtb9zJFVckuKiG3dCi2bdNQtYN4R3t6mUZB+XCKR4yieORoti15i30b1/PJb3y7V88e0zS5/7770VM65338HOaePY8Xfnkfu9eu5Mbf/BF/+MgOzUgmefS2G8kuLuGqu+7t5ciNZJLH7/wGRiLOgvt+fcy3F5cPH8dy9IOm1032sGKGFhQycsJYVD1OtC1CtC1Ca20N+9o60OPDoRmGBFqZX7SB4NtRKDsThs2E0qngOaRfvB6FmhWwdynsWwo1q8BM9xAoHAvzboexnwJFRVFVzr/xVjJy83l34RNsW/IW4DjK7OIh5JQMScel5JaUUlg5ol/74e9Y9g62ZR3+kVRnHTx2KbTtgR2vwOeegMre65SNn8SY2fPwh8KcefmVh9Vp9xfeQJDLvnknS599kqULn6B5727inR3E2iJUTJrKnKsW9Fm9ZjAr+7AS9Mni8fmZc+UXuv9PxqK01dUSqatxQq0TA1RMmtrt2AvKK3u1BY2ZOZeFP/oe//zVT/H4AwyfPA0ATdMYNXoUmzZtIpyTSVt9LTuWvsP0yz59VCcP4PH7mXXFNbz2+wfZtWYFI6YdfGN948+P0FZfyxXf/S/XyX/EGDQl+sbdnSy8dxXlE/KYfnEFxZW9L2QjmSReu42M9o0otStg33Jo2e4sVDxQMgnKZoK0HcdevwGkBUKB4glQNhvKZ4GRgLd/7vw2f7RT9z/+ClCdZ2a8o925yetraW+oo62+lrb6Otob6rq7FiqqStGIUZSNm8iwsRMZMuY0PL6+m9TgyR98h0RXJ1/62UMHS3TRZvjTJY6z//Tv4PV7oLUarnwMxgyctgWAnSve5aWHHqCgvJK5Vy9g6OlHHy99MJCMRXn67v+gra6Wz/zHD7uPd8uWLTz99NMsWLCA6tf+yda33+SG3zx63J5hlmny2O23oKgq197/axRFZcfyJfzj5z/hjPlXdLe/uAwuPhJVN3rcYOObtaz/136SMYOhp+Uw/eIKhozOPno9ZDwC+1c4Dbb7lkHtGqfBtnS649TLZsHQM8B/SMOjbcPWRfB/P4XGTZAzHD72TZh4NWhH7qZm2xZdLc201u6ndutm9m/ZSEP1TqRto6gaJaNGM2zsBIaNm0h2cQma14fm8aB5fUftLSClxEgm0BNxUvEEqUScaKSVxT//MXOuWsDMT1918Dgfuwxaq9g57SmWLfEwYkIm0yK342teCZf/D0y44qRt3p9Yptnn3c8GMvGOdp686w5ibRGu/P6PKaociW3b7Nixg5K8XP5w201MPO9Czr3uqye0vQOO/cKv3EbZhMk8/u2vkV1cwtV33/+RsutHiY+Eo9/ZtpNvvvlNwiKT8v2TKKkaj6b7MQo6MKc04KswCXlDTMyfyNSiqSjiCB+tmOmBoY7irA/DtmHHi/DWfVC/DrKGwZzb4PRLIeP4n2inEnFqt29l/+YN7N+ykcbqKqS0D1tPKIrj+L1eNI+3+7d6In7ELoWKqnLdL/7H6SKZ7IQ/z8ds2M47BY+zeb0gM99PZ0sSf0hjRsFLjEs8jHLZAzDtiyd02JZpY5k2Xr/rMPqSzpZmnvzBtzF1navuure7i+Qbjz3Cupdf4PpfPkJmwYlNPCKl5In//BbRtghZBUU07a5mwX2/OuU9UU4aKZ3CWPNWOP0yCA7Ocaz6go+Eo9/buZdfr/01cSNO3IyTTOrk7R1Bxa5pBPUsmkP7WF36Kp3+FkpFOdMzZjLGN45gKot4R4pYu06sXcc0bFRNoGoKiqagqgLVo6CoCqomCGZ4KRufR8WEfIKZ6QeClFD1muPwa1Y4eQWnw4izofJsKJ8NvuP3BtDjceq2byHaFsFM6ZiGgZnSsdKxmTK6Ryn0BgP4giG8gSC+QBBvIIA3GMQXCBHOzSOrsAhSMfjLFbTtruFl+QCtLQpTLijjzPmVtNZEWbKwirqd7eQEIsz2P0j5pVcgZt9yRG2WabN/a4Tq1U3sWt9MKmHhC6iE8wJk5Pq7QzjXR0aen8y8AIEMz8G3qVQMOuuhq86JU10w9nIIfbjnEu1r2uprefIH30FRFK6++z48/gCP3HodY2bO5aKb/+2w9dsb4wQyvfgChz9092/ZyNM/vBOAi27+N8Z9/NyTFySl80bozwS1/+aVPYy2vbDhKYy1C9nfkEW7NYSywBbyps9GzLoZCvpumOlU0mTnykbMlE3ZuFyyi4Ifyt5IHwlHfzQs02b78gZWv7SXzubEYct1TxxvpqAgP4eC/Bw8XhXLkliGRUyP05WIEtcTxFNJdF3HEw3hTThjbxRUhBkxuZCKifnkloQQAA0bofp12PWmU9dvJp02gGEzHKc/4mwonnjibw3vFSMJf7uKbZsFb8W+hub38vFrR7ExsJRX977KhIIJXFp5KalqH+8+u5OO5iTDvOuYc45G3vyvgRBYlk3ttjaqVjWwa20jehK8apJK3zKylf1E7Xy67CKidiFdZj4pu3c7g19LkOtrIFfsIlepIlfbT662j4CSnkHJlwlzvg4zbwbvyY9PNFhp3reHp++6A184TPn4yWx4/WW+9NOHen0E1dmSYMmTm9i1qQufTzLxvOFMOncYvkMm+X7t9w+iah7O+uKNJ+y8pGUS37acyJqlRKr2Eu0ShNVWskJxMrMlmbk+tKx8yCiBcBFkDoHSaSf0FntM9C7Ysoj4ykXsqbLZrZ/B/tRULHnwIZat1THCt4SRoyzyzr0SMfLc7u9jTpaO5gQb36ph65J6UomDwy5nFgSoGJ9H+YQ8SkfloHqOP0SHoVskYwbhbN8pm/ToI+3oD2BbNns3tWKZklC2Dzugs6zjHV7a/yIrGlZgS5tROaMYGh7Kvs597O/aT8o+OMa3X/VTllmGikpzbRdlkbFUtE2gMOp8VejNhhGTixg5oZhAphevX8WjGHiaV6PtexOx+02neyaA6nV67pRMcsKQyVA4Djx91CBrpkj97f/x9urhbEueQ8GIEC2zNvBU7V+IJCMUBYtojDcCMKN4BpcNn8+Q3aez/h/VpEyV08obEXmV7NocJalreESC4b7ljPIvYViZhTpyLmSXQ7IDku2QaIdkO3o0QVenIBrT6EjmEJEjiBilROK5pMyDDigQUsgrVCmxl1Pa9RzFOe2oZ30Lpl77wZQakx1Qtw4i1aD6wBvCUkMkjQBxw0cy5See1NB1lUBeFlkFAbIKAoc50f6kbsc2Ft7znxh6klFnzuayb/4H4DiU1X/fwLq3WhDSYHJwMa1mGbv1mXi9konnVTDp3DL8oRPTmkqYNO/rIlLTTmT7Tlr3txHpCKDbB99AFcXGtns6O0lYayNLqSNTrSdLbSBLa3DsNHos3jHzoGLO8R/eRgJadiKbttG2bjm7N3eyOzGVRmMUoJCRrVExpZjhk/LJKQqxZ2ML1Strqa3qQkpBllrHyNxtjJh7GvlnfxrhPf6XylJKara3seH1GvZsbEERMGJIIxPlHwladez1Xcre5FRqWnKxLIHmUxl2Wg7l4/MoKMsg2qbTFUk6oTVJV3OUrtYE6dEe8PksiktVSkZmUzJ2GAWVuXi8H0wX64+Eo7eiMdoXPkNwyhT8p5+OOImxO1oSLbyy5xVe3vMyHXoHZZlllGeWO3FGOeWZ5RQGC7tLRNFUlLVNa1nduJr1ezeTqNIoi4yltGM0mjzCDSYkilegecGrpgioMYKyhVCqhqBsJqB0EFCj+HOzCZaU4sktQQllooRyUELZKKFc1IxclFAewtejW6aUYBlOt08j2R23vPAwr6yeSptVSnTiXp4J/xZdJvlY6cdYMHYBM0tmUh+rZ3H1YhZVLaImWkNQC3Jh8cVMWlFA095RKMJguG8lIwt2UTa+EG3kXPSyM+nSPERTUeJmnISZIGkmSZiJXiFpJrGkRcgTIsObQUgL40uGEG0+zFYVvRk661O01sRAgioMij1bGZpVS+m8uRSe9UnUHhODJGMGHU0J2pvitDfF6WhK0NEUR4+byLQdpAQkSOcPUjoFPSFAIYViJVGsBIoZRVhxFCwMITCtDJJ2Fro8ftWaT0uSFU6SmWmTleclszCI8ARIpRR0XaCnFFI66EnQkzZ60hHhD3vxBz34wh78QQ/+kAdfSMMf8hDK8pE3NIR2hIlQ9m1az5t//j0X3/ot8oeVs+NfG1j6j/3E9CCjA28za2YX4bNvgP3LaH7lSVY3zKVan43HK5l4TgWTzhtGINz7Poh3pqivaqduZzv12xtpqU8hpXNde0WMXG8dufmQW1lK7oTJ5FUUEsjwkIwadDQnukNns3MOOppjJKK9J3sJKO1kqY1kZZlkleSQWTkCoSgkGhuIt7aR6IiTiJokdC8JO5O4nY0pnUJOQbHC8OllDJ9cQF5pGCEEUkpM28STLgQkulLsWt1A1TtbqK1Rkahkas1kZ+kEczMJFhUTLBlCMMtHKMtLMNOHL6hRvbaZjW/WEKmL4fdbjMtZxnjzD4Q9nTD6IqeNrWYl1K/HCeVadgAAD45JREFUsAS1+gT2yLPYm5xCVO/90NIUgwy1mQzRQFhtJlNtwqvEaTZG0JA6jTbLGZRNwSI/1EhJXhdFpQI1mImhZJESGRiESNl+UqaGoVsYSYui4VlMueC9DUnxvh29EOIi4JeACvxeSvnfhyz3AX8GpgGtwFVSyj3pZXcC1wMW8HUp5cvH2td7dfTxlSvZu+BaR4/Xi3/8eAKTJxOYPInA5Ml4Ck+sEeu9EDfibGjZwKr9q9ld1UA8pqMnDFK6iTBUPJYPr+XHY/nwWD78ZpiAEcZvhvEbIZSTmP9FYKEICwUnCCyEsBHYCGEBFnE7j5QnxYsj/0Qkdz+XjbiMz4/9PJVZlYdtT0rJmqY1LKpaxMt7XiZuxhkuhlDoD9DpFURtnWgqStSIYth9O+nDUE8Z41JnUNo5ikBdBla742w1JUXRUC+WCNDerJOM92yglmSEDLLDMfweHaTt2EDaIC2EtAAbIS2sVIKoHiMmVGLCQ1z1kVR86CikbOdhkPRESXnj4EshfCk8fgOv3yToMwhrBhkJL8HOAJ6oHyUawoxnoKfyiFsFyEM+Q/GKGF4Rw6fE8Ik4XsWZ71WXmSRlJkmZgW4Gsent1BUhySuQFJUFKBxRQNGYIeQUH5z3uGn1Wt5+ejsNHYXke6opmFrFngnFbOzax9bIVvID+UzMG8/EWAcVK1ezp34u1ck5aB6YeHY5uUNC1FW1U7etmfYWp4pCFSmKPdsp9mwlkN1MalQJkYpKGkM5NCSbaYg10BhrpCHWQESPEFADBD1Bwp4wIU+oOx30BAmTSVaygFAiB19XANGYwGoxSHZ6SBi9uzkrmPi1OAG/STCsEsgKEMjLwT8kB88Ikxa1ntquWupiddR21VIbq6W2q5a4GSfLl0VhsJCiYFF3KBBFeDal0LclMLtAT4VI2tnYR/lEKD+jnYmepxjl+RdqQQVdE6+iffR5RBSBLW3C3jCZwku4bR/B+g0otauQ+1YSiSi0W0MIqy0EMqCrMJ/m7BIaw7k0+YI0qYJ2aZEpNPJsyIra+BsC2E1B4q25tEWLsOThhU+BhVck8KgpPKrJ8OEpZn3j2hO+h3pt6/04eiGECuwAzgdqgJXA56SUW3qsczMwUUr5FSHE1cDlUsqrhBBjgb8BM4AhwGvAaCnlked74/1V3RiNTSTWrSOxdi2JdetIbt6MNBzn5CktJTB5MlpBAcKjITwe0JxYaB4n9niQlolMJLDjCexEAjse6/W/tEzUcBglnIESDqNmhFFCYZSMsJMfCoGqIRSBVBRStkGXHaPLiNFpdNFlxUjYOgmZIm4nSdgp4ikbPQkpXcHQBXE9RSwVx7AsBCpCqghUFKnix4tPatgIbFsgbQFSQZEKAgXFVjCVBJ35y/lE4XTm+Mbia49hNjc7oakZs70NNRRGzc9Hy81Fzc9Dy83Dyg6z3tzD69HVJGydMH4yhJ8M6SOEl6D0ErQ9BKSGT3jwKh68mg+v4sWjefGq3u60QEG3dXQrRdLWSVhJktaBOEnUilNnRdhnNFJt1NGu6tgiSI4+kvLOUZR2VmCrSeK+JqL+Jjr9zbT7m2jzR0iqFikhsJAIQLVBSBBSoEpQ0sEADlzdUkDIE2JoeCilmUMZFh5Kvj+feLKLWKKDaKKDWKKDWLKLeLKTeLKLZDJGghSmCqaCE6eDJRSyZCGaomEocUwlgcAGGxTpvFUIKdFQCCgKQUUlKCAoJWFLI5TyENK9BBIhRKIcmajE0CuRtlP9oCoJMsKNaJpOS2QUltbF9uH/YmXG2/gMi4AO5WohY/xltJmdbE3uplM1SHghHAwxqSODypqzsbrOBBQUJY7q304itJOW7P3sz++gXrNpTcXAtPAb4DXAb0CG5aVEzaFQySJfZJIp/Ji2SVKmSNqp9PWrd6fjVpJO4qQ0gaFBKh0MDWzFS64sQVUVkp44uqpjY2LbFtK2sW0L27bQLIlfh2AKArok2/QxRMmhWGaQb4cI2hpdaoqISNBKjGY6aSFK0itIeiDpAUsFSwFN8ZAlQ2TrQbIMH2Ejg6CZScK3m3bfXhJaBrqpYsYTeHSTQAoCzuCiJL2Q8ELCJ9C9AhEKooUy0IJ+pGLTHuskHmvHlwKfSTqWhE2NLBEiaieIixSGBqYqMA5cL4pKLsMRQsHQkuhKgpSWwBApLMXGQmIpkrMyh3PPlYvek/97v45+FnCXlPLC9P93Akgpf9JjnZfT6ywVQmhAA1AA3NFz3Z7rHW1/fVlHb6dSJDdvJrFuvfMA2LABu6MDaZrOA+B4x+73owQCKMEgSjCACAQRioIdi2FFo9jpcLztDAREMIhWkI9WUICWk4sdi2K2tGJGIliRiNNVdIBgeVR0j0QKUGzHaQspUKRE2I4DFbZEDHyzHxMbsNMPEFsBQxHEg4V0hSuIh8uJhypIefPJa3mX8n0vE04kUE7wmG3hOK2OUB6mx0coWo9mSzQbNFugWqDaH3IDDkK65k1ixu+efE+/fb9DIJQC+3v8XwMcOhJY9zpSSlMI0QHkpfOXHfLbw0aiEkLcBNwEUFb23odMPRTF6yU4ZQrBKVMOWyalBMvqdvrSNJEpA6EIlGAQEQic0IQY0rad0n60y3H8sRjSsiFdYiEdpC2dPMty/jct53/TQlqmoyWtB1s6v5MHfnd4GqEcrIQWzoQVTu8DgfBoaPlpp54OSujoDWPSsrA6OjBbWrAiEczWVrAlwutF+LwoXm867XNijxehqQcfcFIenEez23ekK8qldOwgD8mzbGQijhWLIeO9YzsWw4473wgIRQVVdc6FqjrVGYoKquIsU4SzrGdaKAhVOdgbo8eD+NCCjdA8CE1DeDRn+4f8j20jU4ZzjRwhAI4moYCi9EqjiHQ7io20rfQ5tsEyneM3TQwjiWmkMI0klpEiy9ApNAwsowbL3AWWTXhUPplnXIESDnW/NSqhdBwMIi2z22Z2PN6dTnS0IiJ12JZJwH86wUAmAV8off607uMUXi8iEEDxB1CCAZRAAHEg7fcjfL6D59G2nWv4wP1z4FpPpbD1FFJPInUdO6kjU3p3uuc1220jIZxzJgR4PKgZGQeP68CxpoPwepHJ5MFjjMexY+k4EUfG4+l72XTuJ9NM31sW0jTANBE+f9p26e2HQs7203kI4djuKEEahmOXQAAl4O9tI38A4fM6+zUMZCqFNAzsVKo7LVNGbx9gWT2uDSceWnl49WpfMCC+eJFS/g74HTgl+g9in0IIp+pG0+AYM9QcdzuKghoOoYY/vN0Dhaqi5eai5bofoww2Tn5G04GLCDhOljz324uT5URaAWuBnrMYDE3nHXGddNVNFk6j7In81sXFxcWlHzkRR78SGCWEGC6E8AJXA4sPWWcxcOD7+SuA16XzjrwYuFoI4RNCDAdGASv6RrqLi4uLy4lw3KqbdJ37rcDLON0r/yCl3CyEuBtYJaVcDDwKPC6EqAIiOA8D0us9DWwBTOCWY/W4cXFxcXHpewbNB1MuLi4uH2WO1evmxL/UcXFxcXH5UOI6ehcXF5dBjuvoXVxcXAY5rqN3cXFxGeQMuMZYIUQzsPd9bCIfaOkjOf2Fq7FvcDX2Da7GvuFUayyXUhYcacGAc/TvFyHEqqO1PA8UXI19g6uxb3A19g0DWaNbdePi4uIyyHEdvYuLi8sgZzA6+t+dagEngKuxb3A19g2uxr5hwGocdHX0Li4uLi69GYwlehcXFxeXHriO3sXFxWWQM2gcvRDiIiHEdiFElRDijlOsZY8QYqMQYp0QYlU6L1cI8aoQYmc6zknnCyHEr9K6NwghpvaTpj8IIZqEEJt65J20JiHEF9Pr7xRCfPFI++pjjXcJIWrTtlwnhLikx7I70xq3CyEu7JHfb9eCEGKYEOINIcQWIcRmIcRt6fwBY8tjaBxotvQLIVYIIdandf4wnT9cCLE8vc+n0sOjkx7u/Kl0/nIhRMXx9Pejxj8JIXb3sOXkdP4puXeOi0xPA/dhDjjDJ1cDlYAXWA+MPYV69gD5h+TdB9yRTt8B3JtOXwK8CAhgJrC8nzTNA6YCm96rJiAX2JWOc9LpnH7WeBdw+xHWHZs+zz5gePr8q/19LQAlwNR0OgPYkdYyYGx5DI0DzZYCCKfTHmB52kZPA1en8x8GvppO3ww8nE5fDTx1LP39rPFPwBVHWP+U3DvHC4OlRD8DqJJS7pJSpoAngfmnWNOhzAceS6cfAz7VI//P0mEZkC2EKOnrnUsp/w9nroD3o+lC4FUpZURK2Qa8ClzUzxqPxnzgSSmlLqXcDVThXAf9ei1IKeullGvS6S5gK848yAPGlsfQeDROlS2llDKa/teTDhI4B1iYzj/UlgdsvBA4VwghjqG/PzUejVNy7xyPweLojzSB+bEu7P5GAq8IIVYLZ+JzgCIpZX063QAUpdOnUvvJajpVWm9Nvwb/4UCVyEDQmK46mIJTyhuQtjxEIwwwWwohVCHEOqAJx/lVA+1SSvMI++zWk17eAeT1t85DNUopD9jyR2lbPiCE8B2q8RAtp9RHDRZHP9CYK6WcClwM3CKEmNdzoXTe5QZUv9aBqCnNb4ERwGSgHvjZqZXjIIQIA88C35BSdvZcNlBseQSNA86WUkpLSjkZZz7pGcBpp1jSYRyqUQgxHrgTR+sZONUx3zmFEo/LYHH0A2oScillbTpuAp7HuYAbD1TJpOOm9OqnUvvJavrAtUopG9M3mg08wsFX8lOmUQjhwXGgf5VSPpfOHlC2PJLGgWjLA0gp24E3gFk41R0Hpjntuc9uPenlWUDrB6Wzh8aL0tVjUkqpA39kANnySAwWR38iE5h/IAghQkKIjANp4AJgE70nUP8isCidXgxcm26tnwl09KgC6G9OVtPLwAVCiJz0a/8F6bx+45D2istxbHlA45Emnu/XayFdJ/wosFVK+fMeiwaMLY+mcQDaskAIkZ1OB4DzcdoT3gCuSK92qC0P2PgK4PX029PR9PeXxm09HuoCpw2hpy0HxL3Tiw+q1be/A05r9w6cOr7vnkIdlTg9ANYDmw9owalL/BewE3gNyJUHW/UfTOveCEzvJ11/w3ldN3DqB69/L5qA63Aau6qA//cBaHw8rWEDzk1U0mP976Y1bgcu/iCuBWAuTrXMBmBdOlwykGx5DI0DzZYTgbVpPZuA7/e4h1ak7fIM4Evn+9P/V6WXVx5Pfz9qfD1ty03AXzjYM+eU3DvHC+4QCC4uLi6DnMFSdePi4uLichRcR+/i4uIyyHEdvYuLi8sgx3X0Li4uLoMc19G7uLi4DHJcR+/i4uIyyHEdvYuLi8sg5/8DQVJW+80Ub8cAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_gradients(good_trial)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also print inputs and outputs from the model. For instance, let's print the 42nd sample of the 2700th batch, as seen by the network. \n", + "\n", + "Notice that we have " + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAD4CAYAAAAq5pAIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAALkklEQVR4nO3dX4hc9RnG8eeJ1RsjmlS6xBir1dxItbEsoVCpFlHSgEYRxCAlpdL1QkEhFw32wkApSKnWXgkrBpNilYCKQaWaBmnam5JVkpg/1aQaMcsmqQQ1ubK6by/mpKxx58xmzjlzJn2/H1hm5vfOzHk5+uT8m5mfI0IA/v/Na7sBAINB2IEkCDuQBGEHkiDsQBLfGOTCbHPqH2hYRHi28UpbdtsrbL9r+6DtdVXeC0Cz3O91dtvnSHpP0s2SDkvaIWl1ROwreQ1bdqBhTWzZl0s6GBHvR8Tnkp6XtKrC+wFoUJWwL5b00YzHh4uxr7A9ZnvC9kSFZQGoqPETdBExLmlcYjceaFOVLfukpCUzHl9ajAEYQlXCvkPSUttX2D5P0t2SttTTFoC69b0bHxFf2H5A0uuSzpG0ISL21tYZgFr1femtr4VxzA40rpEP1QA4exB2IAnCDiRB2IEkCDuQBGEHkiDsQBKEHUiCsANJEHYgCcIOJEHYgSQIO5AEYQeSIOxAEoQdSIKwA0kQdiAJwg4kQdiBJAg7kARhB5Ig7EAShB1IgrADSRB2IAnCDiRB2IEkCDuQRN9TNuPscMkll5TWX3311dL6tddeW1p/4oknSutr164trWNwKoXd9iFJJyR9KemLiBitoykA9atjy/7jiPi4hvcB0CCO2YEkqoY9JL1h+y3bY7M9wfaY7QnbExWXBaCCqrvx10fEpO1vSdpq+58RsX3mEyJiXNK4JNmOissD0KdKW/aImCxuj0l6SdLyOpoCUL++w277fNsXnLov6RZJe+pqDEC9quzGj0h6yfap9/lTRPy5lq5Qm8suu6y0fs0115TWI8qPvHrVMTz6DntEvC/pezX2AqBBXHoDkiDsQBKEHUiCsANJEHYgCcIOJEHYgSQIO5AEYQeSIOxAEoQdSIKwA0kQdiAJwg4kQdiBJAg7kARhB5Ig7EAShB1IgrADSRB2IAmmbEYlN9xwQ2n9wgsv7Fr79NNP624HJdiyA0kQdiAJwg4kQdiBJAg7kARhB5Ig7EASXGdPrphyu2+jo6Ol9fnz53etcZ19sHpu2W1vsH3M9p4ZYwttb7V9oLhd0GybAKqay278M5JWnDa2TtK2iFgqaVvxGMAQ6xn2iNgu6fhpw6skbSzub5R0e819AahZv8fsIxExVdw/Immk2xNtj0ka63M5AGpS+QRdRITtKKmPSxqXpLLnAWhWv5fejtpeJEnF7bH6WgLQhH7DvkXSmuL+Gkkv19MOgKb03I23/ZykGyVdbPuwpEckPSpps+17JX0o6a4mm0RzIqodWU1PT9fUCZrWM+wRsbpL6aaaewHQID4uCyRB2IEkCDuQBGEHkiDsQBKEHUiCsANJEHYgCcIOJEHYgSQIO5AEYQeSIOxAEoQdSIKwA0kQdiAJwg4kQdiBJAg7kARhB5Ig7EAShB1IgrADSRB2IAnCDiRB2IEkCDuQBGEHkiDsQBI9Z3HF2e3IkSOl9YMHD5bWly5dWlqfN4/txdmi538p2xtsH7O9Z8bYetuTtncWfyubbRNAVXP5Z/kZSStmGf99RCwr/l6rty0AdesZ9ojYLun4AHoB0KAqB1wP2N5d7OYv6PYk22O2J2xPVFgWgIr6DfuTkq6UtEzSlKTHuj0xIsYjYjQiRvtcFoAa9BX2iDgaEV9GxLSkpyQtr7ctAHXrK+y2F814eIekPd2eC2A49LzObvs5STdKutj2YUmPSLrR9jJJIemQpPsa7BEVHDp0qLS+a9eu0vpVV11VWp+enj7TltCSnmGPiNWzDD/dQC8AGsTHn4AkCDuQBGEHkiDsQBKEHUiCsANJEHYgCcIOJEHYgSQIO5AEYQeSIOxAEoQdSIKfkkajLrrooq61ycnJAXYCtuxAEoQdSIKwA0kQdiAJwg4kQdiBJAg7kIQjYnALswe3MMzJPffcU1rfuHFjad12aX3z5s1da6tXz/bDxagqImb9j8KWHUiCsANJEHYgCcIOJEHYgSQIO5AEYQeS4PvsyfW6Tt6rPm9e+fai1+sxOD237LaX2H7T9j7be20/WIwvtL3V9oHidkHz7QLo11x247+QtDYirpb0A0n3275a0jpJ2yJiqaRtxWMAQ6pn2CNiKiLeLu6fkLRf0mJJqySd+izlRkm3N9UkgOrO6Jjd9uWSrpP0D0kjETFVlI5IGunymjFJY/23CKAOcz4bb3u+pBckPRQRn82sRefbNLN+ySUixiNiNCJGK3UKoJI5hd32ueoE/dmIeLEYPmp7UVFfJOlYMy0CqEPP3Xh3rp08LWl/RDw+o7RF0hpJjxa3LzfSIRp16623ltZ7fQV6enq60usxOHM5Zv+hpJ9Kesf2zmLsYXVCvtn2vZI+lHRXMy0CqEPPsEfE3yV1+2TETfW2A6ApfFwWSIKwA0kQdiAJwg4kQdiBJAg7kARhB5Ig7EAShB1IgrADSRB2IAnCDiRB2IEk+Cnp5DZt2lRav/POOyu9/yeffFLp9agPW3YgCcIOJEHYgSQIO5AEYQeSIOxAEoQdSMKD/F1v2/yI+JBZsmRJaf2DDz4orW/fvr20ftttt3WtnTx5svS16E9EzPpr0GzZgSQIO5AEYQeSIOxAEoQdSIKwA0kQdiCJntfZbS+RtEnSiKSQNB4Rf7C9XtIvJP27eOrDEfFaj/fiOjvQsG7X2ecS9kWSFkXE27YvkPSWpNvVmY/9ZET8bq5NEHaged3CPpf52ackTRX3T9jeL2lxve0BaNoZHbPbvlzSdZL+UQw9YHu37Q22F3R5zZjtCdsTlToFUMmcPxtve76kv0r6TUS8aHtE0sfqHMf/Wp1d/Z/3eA9244GG9X3MLkm2z5X0iqTXI+LxWeqXS3olIr7b430IO9Cwvr8IY9uSnpa0f2bQixN3p9whaU/VJgE0Zy5n46+X9DdJ70iaLoYflrRa0jJ1duMPSbqvOJlX9l5s2YGGVdqNrwthB5rH99mB5Ag7kARhB5Ig7EAShB1IgrADSRB2IAnCDiRB2IEkCDuQBGEHkiDsQBKEHUiCsANJ9PzByZp9LOnDGY8vLsaG0bD2Nqx9SfTWrzp7+3a3wkC/z/61hdsTETHaWgMlhrW3Ye1Lord+Dao3duOBJAg7kETbYR9vefllhrW3Ye1Lord+DaS3Vo/ZAQxO21t2AANC2IEkWgm77RW237V90Pa6NnroxvYh2+/Y3tn2/HTFHHrHbO+ZMbbQ9lbbB4rbWefYa6m39bYni3W30/bKlnpbYvtN2/ts77X9YDHe6ror6Wsg623gx+y2z5H0nqSbJR2WtEPS6ojYN9BGurB9SNJoRLT+AQzbP5J0UtKmU1Nr2f6tpOMR8WjxD+WCiPjlkPS2Xmc4jXdDvXWbZvxnanHd1Tn9eT/a2LIvl3QwIt6PiM8lPS9pVQt9DL2I2C7p+GnDqyRtLO5vVOd/loHr0ttQiIipiHi7uH9C0qlpxltddyV9DUQbYV8s6aMZjw9ruOZ7D0lv2H7L9ljbzcxiZMY0W0ckjbTZzCx6TuM9SKdNMz40666f6c+r4gTd110fEd+X9BNJ9xe7q0MpOsdgw3Tt9ElJV6ozB+CUpMfabKaYZvwFSQ9FxGcza22uu1n6Gsh6ayPsk5KWzHh8aTE2FCJisrg9JukldQ47hsnRUzPoFrfHWu7nfyLiaER8GRHTkp5Si+uumGb8BUnPRsSLxXDr6262vga13toI+w5JS21fYfs8SXdL2tJCH19j+/zixIlsny/pFg3fVNRbJK0p7q+R9HKLvXzFsEzj3W2acbW87lqf/jwiBv4naaU6Z+T/JelXbfTQpa/vSNpV/O1tuzdJz6mzW/cfdc5t3Cvpm5K2STog6S+SFg5Rb39UZ2rv3eoEa1FLvV2vzi76bkk7i7+Vba+7kr4Gst74uCyQBCfogCQIO5AEYQeSIOxAEoQdSIKwA0kQdiCJ/wIrTKNeLgYTHwAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# The raw tensor\n", + "raw_t = good_trial.tensor('Net_input_0').step(2700).value[42]\n", + "# We have to undo the transformations in 'transformer' above. First of all, multiply by 255\n", + "raw_t = raw_t * 255\n", + "# Then reshape from a 784-long vector to a 28x28 square.\n", + "input_image = raw_t.reshape(28,28)\n", + "plt.imshow(input_image, cmap=plt.get_cmap('gray'))\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also plot the relative values emitted by the network. Notice that the last layer is of type `nn.Linear(500, 10)`: it will emit 10 separate confidences, one for each 0-9 digit. The one with the highest output is the predicted value.\n", + "\n", + "We can capture and plot the network output for the same sample." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXkAAAD4CAYAAAAJmJb0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAP8UlEQVR4nO3df4xlZX3H8feHpWKXxmLCJgjLMGu6tAFiUScoNZq20EqpcaPWhGaqsf4xpQW1PxIj3aQ2mk1MNW1tsNXxR5Om0xKDUoiiwKaN6T+os7Kl/DQLsrCUpqOm2HYNuvLtH/euzC4zuzt77p1z95n3K7mZe55z5zzfPWE/PHvOc5+TqkKS1KbT+i5AkjQ+hrwkNcyQl6SGGfKS1DBDXpIadnrfBSx39tln1/T0dN9lSNIpZc+ePd+uqi0r7ZuokJ+enmZxcbHvMiTplJJk/2r7vFwjSQ0z5CWpYYa8JDXMkJekhhnyktSwsYd8kquSPJxkX5L3jbu/viwswPQ0nHba4OfCQt8VSdKYp1Am2QR8DPgV4ADw9SS3VdUD4+x3vS0swNwcHDw42N6/f7ANMDvbX12SNO6R/GXAvqp6tKp+ANwE7Bhzn+tu587nAv6wgwcH7ZLUp3GH/HnAE8u2DwzbfizJXJLFJItLS0tjLmc8Hn98be2StF56v/FaVfNVNVNVM1u2rPit3Ik3NbW2dklaL+MO+SeB85dtbx22NWXXLti8+ci2zZsH7ZLUp3GH/NeB7Um2JXkBcA1w25j7XHezszA/DxdcAMng5/y8N10l9W+ss2uq6lCS64E7gE3AZ6rq/nH22ZfZWUNd0uQZ+yqUVXU7cPu4+5EkPV/vN14lSeNjyEtSwwx5SWqYIS9JDTPkJalhhrwkNcyQl6SGGfKS1DBDXpIaZshLUsMMeUlqmCEvSQ0z5CWpYYa8JDXMkJekhhnyktQwQ16SGmbIS1LDDHlJapghL0kNG1vIJ/nTJE8m2Tt8XT2uviRJKzt9zMf/i6r6yJj7kCStwss1ktSwcYf89UnuTfKZJC9e6QNJ5pIsJllcWloaczmStLGkqk7+l5PdwDkr7NoJ3A18Gyjgg8BLquqdxzrezMxMLS4unnQ9krQRJdlTVTMr7et0Tb6qrjzBAj4JfKFLX5KktRvn7JqXLNt8E3DfuPqSJK1snLNr/izJpQwu1zwG/M4Y+5IkrWBsIV9VbxvXsSVJJ8YplJLUMENekhpmyEtSwwx5SWqYIS9JDTPkJalhhrwkNcyQl6SGGfKS1DBDXpIaZshLUsMMeUlqmCEvSQ0z5CWpYYa8JDXMkJekhhnyktQwQ16SGmbIS1LDDHlJalinkE/y1iT3J3k2ycxR+25Isi/Jw0le361MSdLJOL3j798HvBn4xPLGJBcB1wAXA+cCu5NcWFU/6tifJGkNOo3kq+rBqnp4hV07gJuq6pmq+hawD7isS1+SpLUb1zX584Anlm0fGLY9T5K5JItJFpeWlsZUjiRtTMe9XJNkN3DOCrt2VtWtXQuoqnlgHmBmZqa6Hk+S9JzjhnxVXXkSx30SOH/Z9tZhmyRpHY3rcs1twDVJzkiyDdgOfG1MfUmSVtF1CuWbkhwALge+mOQOgKq6H/gs8ADwZeA6Z9ZI0vrrNIWyqm4Bblll3y5gV5fjS5K68RuvktQwQ16SGmbIS1LDDHlJapghL0kNM+QlqWGGvCQ1zJCXpIYZ8pLUMENekhpmyEtSwwx5SWqYIS9JDTPkJalhhrwkNcyQl6SGGfKS1DBDXpIaZshLUsO6Psj7rUnuT/Jskpll7dNJvp9k7/D18e6lSpLWqtODvIH7gDcDn1hh3yNVdWnH40uSOugU8lX1IECS0VQjSRqpcV6T35bkniRfSfLaMfYjSVrFcUfySXYD56ywa2dV3brKrz0FTFXVd5K8EvinJBdX1fdWOP4cMAcwNTV14pVLko7ruCFfVVeu9aBV9QzwzPD9niSPABcCiyt8dh6YB5iZmam19iVJWt1YLtck2ZJk0/D9S4HtwKPj6EuStLquUyjflOQAcDnwxSR3DHe9Drg3yV7gZuDaqvput1IlSWvVdXbNLcAtK7R/Dvhcl2NLkrrzG6+S1DBDXpIaZshLUsMMeUlqmCEvSQ0z5CWpYYa8JDXMkJekhhnyktQwQ16SGmbIS1LDDHlJapghL0kNM+QlqWGGvCQ1zJCXpIYZ8pLUMENekhpmyEtSwwx5SWpYp5BP8uEkDyW5N8ktSc5atu+GJPuSPJzk9d1LlSStVdeR/F3AJVX1MuCbwA0ASS4CrgEuBq4C/jrJpo59SZLWqFPIV9WdVXVouHk3sHX4fgdwU1U9U1XfAvYBl3XpS5K0dqO8Jv9O4EvD9+cBTyzbd2DY9jxJ5pIsJllcWloaYTmSpNOP94Eku4FzVti1s6puHX5mJ3AIWFhrAVU1D8wDzMzM1Fp/X5K0uuOGfFVdeaz9Sd4BvAG4oqoOh/STwPnLPrZ12CZJWkddZ9dcBbwXeGNVHVy26zbgmiRnJNkGbAe+1qUvSdLaHXckfxw3AmcAdyUBuLuqrq2q+5N8FniAwWWc66rqRx37kiStUaeQr6qfOca+XcCuLseXJHXjN14lqWGGvCQ1zJCXpIYZ8pLUMENekhpmyEtSwwx5SWqYIS9JPVpYgOlpOO20wc+FNa8Admxdv/EqSTpJCwswNwcHh4vC7N8/2AaYnR1NH47kJaknO3c+F/CHHTw4aB8VQ16SevL442trPxmGvCT1ZGpqbe0nw5CXpJ7s2gWbNx/ZtnnzoH1UDHlJ6snsLMzPwwUXQDL4OT8/upuu4OwaSerV7OxoQ/1ojuQlqWGGvCQ1zJCXpIYZ8pLUMENekhrWKeSTfDjJQ0nuTXJLkrOG7dNJvp9k7/D18dGUK0lai64j+buAS6rqZcA3gRuW7Xukqi4dvq7t2I8k6SR0CvmqurOqDg037wa2di9JkjQqo7wm/07gS8u2tyW5J8lXkrx2tV9KMpdkMcni0tLSCMuRJB035JPsTnLfCq8dyz6zEzgEHF7u/ilgqqpeDvwh8A9JXrTS8atqvqpmqmpmy5Yt3f9EknQCxv2wjklx3GUNqurKY+1P8g7gDcAVVVXD33kGeGb4fk+SR4ALgcWuBUtSV+vxsI5J0XV2zVXAe4E3VtXBZe1bkmwavn8psB14tEtfktowCSPo9XhYx6ToukDZjcAZwF1JAO4ezqR5HfCBJD8EngWurarvduxL0iluUkbQ6/GwjkmR4RWWiTAzM1OLi17RkVo1PT0I9qNdcAE89tjGq2NUkuypqpmV9vmNV0nrZlJG0OvxsI5JYchLWjfr8bi7E7EeD+uYFIa8tEFMwg3PSRpBz84OLs08++zgZ4sBD4a8tCEcvuG5fz9UPXfDc72DfiONoCeFN16lDaC1G406kjdepQ1uUm54av0Z8tIGMCk3PLX+DHlpA5ikG55aX4a8tA76ntniDc+Nq+uyBpKOY1K+yj87a6hvRI7kpTHbSIthafIY8tKYObNFfTLkpTFzZov6ZMhLY+bMFvXJkFfT+p7VAs5sUb+cXaNmTcqslsP9GerqgyN5NctZLZIhr4Y5q0Uy5NUwZ7VIhrwa5qwWaQQhn+SDSe5NsjfJnUnOHbYnyV8l2Tfc/4ru5Uonzlkt0ggeGpLkRVX1veH7dwMXVdW1Sa4G3gVcDbwK+GhVvepYx/KhIZK0dmN9aMjhgB86Ezj8f40dwN/VwN3AWUle0rU/HdskzAuXNDlGMk8+yS7g7cDTwC8Nm88Dnlj2sQPDtqeO+t05YA5gyjtinUzSvHBJk+GERvJJdie5b4XXDoCq2llV5wMLwPVrKaCq5qtqpqpmtmzZsvY/gX7MeeGSjnZCI/mquvIEj7cA3A68H3gSOH/Zvq3DNo2J88IlHW0Us2u2L9vcATw0fH8b8PbhLJtXA09X1VPPO4BGxnnhko42innyHxpeurkX+FXgPcP224FHgX3AJ4HfG0FfOgbnhUs6Wucbr1X1llXaC7iu6/F14g7fXN25c3CJZmpqEPDedJU2LlehbIyrHUpazmUNJKlhhrwkNcyQl6SGGfIaC5dXkCaDN141ci6vIE0OR/IaOZdXkCaHIa+Rc3kFaXIY8ho5l1eQJochr5FzeQVpchjyGjkfuydNDmfXaCxcXkGaDI7kJalhhrwkNcyQl6SGGfKS1LAmQt51UiRpZaf87BrXSZGk1Z3yI3nXSZGk1XUK+SQfTHJvkr1J7kxy7rD9F5M8PWzfm+RPRlPu87lOiiStrutI/sNV9bKquhT4ArA8zP+1qi4dvj7QsZ9VuU6KJK2uU8hX1feWbZ4JVLdy1s51UiRpdZ2vySfZleQJYJYjR/KXJ/m3JF9KcnHXflbjOimStLpUHXvwnWQ3cM4Ku3ZW1a3LPncD8MKqen+SFwHPVtX/Jrka+GhVbV/l+HPAHMDU1NQr9+/ff5J/FEnamJLsqaqZFfcdL+TX0MkUcHtVXbLCvseAmar69rGOMTMzU4uLiyOpR5I2imOFfNfZNctH5zuAh4bt5yTJ8P1lw36+06UvSdLadf0y1IeS/CzwLLAfuHbY/hvA7yY5BHwfuKZG9U8GSdIJ6xTyVfWWVdpvBG7scmxJUnen/DdeJUmrG9mN11FIssTgss/JOhs45s3dDcRzcSTPx3M8F0dq4XxcUFVbVtoxUSHfVZLF1e4wbzSeiyN5Pp7juThS6+fDyzWS1DBDXpIa1lrIz/ddwATxXBzJ8/Ecz8WRmj4fTV2TlyQdqbWRvCRpGUNekhrWRMgnuSrJw0n2JXlf3/X0Kcn5Sf4lyQNJ7k/ynr5r6luSTUnuSfKFvmvpW5Kzktyc5KEkDya5vO+a+pTkD4Z/T+5L8o9JXth3TaN2yod8kk3Ax4BfAy4CfjPJRf1W1atDwB9V1UXAq4HrNvj5AHgP8GDfRUyIjwJfrqqfA36eDXxekpwHvJvBCrmXAJuAa/qtavRO+ZAHLgP2VdWjVfUD4CYGK2JuSFX1VFV9Y/j+fxj8JT6v36r6k2Qr8OvAp/qupW9Jfhp4HfBpgKr6QVX9d79V9e504CeTnA5sBv6j53pGroWQPw94Ytn2ATZwqC2XZBp4OfDVfivp1V8C72WwUupGtw1YAv52ePnqU0nO7LuovlTVk8BHgMeBp4Cnq+rOfqsavRZCXitI8lPA54DfP+pZvBtGkjcA/1VVe/quZUKcDrwC+Juqejnwf8CGvYeV5MUM/tW/DTgXODPJb/Vb1ei1EPJPAucv2946bNuwkvwEg4BfqKrP911Pj14DvHH4ZLKbgF9O8vf9ltSrA8CBqjr8L7ubGYT+RnUl8K2qWqqqHwKfB36h55pGroWQ/zqwPcm2JC9gcOPktp5r6s3wiVyfBh6sqj/vu54+VdUNVbW1qqYZ/Hfxz1XV3EjtRFXVfwJPDB/0A3AF8ECPJfXtceDVSTYP/95cQYM3ors+Gap3VXUoyfXAHQzujn+mqu7vuaw+vQZ4G/DvSfYO2/64qm7vsSZNjncBC8MB0aPAb/dcT2+q6qtJbga+wWBW2j00uMSByxpIUsNauFwjSVqFIS9JDTPkJalhhrwkNcyQl6SGGfKS1DBDXpIa9v83PK0Ut0h3xAAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The network predicted the value: 1\n" + ] + } + ], + "source": [ + "plt.plot(good_trial.tensor('Net_output0').step(2700).value[42], 'bo')\n", + "plt.show()\n", + "print('The network predicted the value: {}'.format(np.argmax(good_trial.tensor('Net_output0').step(2700).value[42])))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Vanishing Gradient" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have now worked through some of the basics. Let's pretend we are debugging a real problem: the Vanishing Gradient. When training a network, if the `learning_rate` is too high we will end up with a Vanishing Gradient. Let's set `learning_rate=1`.\n", + "\n", + "Notice how the accuracy remains at around ~10% - no better than random." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Train Epoch: 0 [0/60000 (0%)]\tLoss: 2.330259\n", + "Train Epoch: 0 [640/60000 (1%)]\tLoss: 2.459480\n", + "Train Epoch: 0 [1280/60000 (2%)]\tLoss: 2.342018\n", + "Train Epoch: 0 [1920/60000 (3%)]\tLoss: 2.370183\n", + "Train Epoch: 0 [2560/60000 (4%)]\tLoss: 2.306495\n", + "Train Epoch: 0 [3200/60000 (5%)]\tLoss: 2.287591\n", + "Train Epoch: 0 [3840/60000 (6%)]\tLoss: 2.284006\n", + "Train Epoch: 0 [4480/60000 (7%)]\tLoss: 2.360036\n", + "Train Epoch: 0 [5120/60000 (9%)]\tLoss: 2.298006\n", + "Train Epoch: 0 [5760/60000 (10%)]\tLoss: 2.341758\n", + "Train Epoch: 0 [6400/60000 (11%)]\tLoss: 2.304522\n", + "Train Epoch: 0 [7040/60000 (12%)]\tLoss: 2.286000\n", + "Train Epoch: 0 [7680/60000 (13%)]\tLoss: 2.293531\n", + "Train Epoch: 0 [8320/60000 (14%)]\tLoss: 2.358820\n", + "Train Epoch: 0 [8960/60000 (15%)]\tLoss: 2.307475\n", + "Train Epoch: 0 [9600/60000 (16%)]\tLoss: 2.310409\n", + "Train Epoch: 0 [10240/60000 (17%)]\tLoss: 2.324923\n", + "Train Epoch: 0 [10880/60000 (18%)]\tLoss: 2.341324\n", + "Train Epoch: 0 [11520/60000 (19%)]\tLoss: 2.346567\n", + "Train Epoch: 0 [12160/60000 (20%)]\tLoss: 2.398468\n", + "Train Epoch: 0 [12800/60000 (21%)]\tLoss: 2.327744\n", + "Train Epoch: 0 [13440/60000 (22%)]\tLoss: 2.355408\n", + "Train Epoch: 0 [14080/60000 (23%)]\tLoss: 2.342527\n", + "Train Epoch: 0 [14720/60000 (25%)]\tLoss: 2.381174\n", + "Train Epoch: 0 [15360/60000 (26%)]\tLoss: 2.327620\n", + "Train Epoch: 0 [16000/60000 (27%)]\tLoss: 2.312094\n", + "Train Epoch: 0 [16640/60000 (28%)]\tLoss: 2.349320\n", + "Train Epoch: 0 [17280/60000 (29%)]\tLoss: 2.325104\n", + "Train Epoch: 0 [17920/60000 (30%)]\tLoss: 2.410883\n", + "Train Epoch: 0 [18560/60000 (31%)]\tLoss: 2.312054\n", + "Train Epoch: 0 [19200/60000 (32%)]\tLoss: 2.328432\n", + "Train Epoch: 0 [19840/60000 (33%)]\tLoss: 2.326895\n", + "Train Epoch: 0 [20480/60000 (34%)]\tLoss: 2.294943\n", + "Train Epoch: 0 [21120/60000 (35%)]\tLoss: 2.310558\n", + "Train Epoch: 0 [21760/60000 (36%)]\tLoss: 2.353846\n", + "Train Epoch: 0 [22400/60000 (37%)]\tLoss: 2.319908\n", + "Train Epoch: 0 [23040/60000 (38%)]\tLoss: 2.286135\n", + "Train Epoch: 0 [23680/60000 (39%)]\tLoss: 2.337246\n", + "Train Epoch: 0 [24320/60000 (41%)]\tLoss: 2.360455\n", + "Train Epoch: 0 [24960/60000 (42%)]\tLoss: 2.375340\n", + "Train Epoch: 0 [25600/60000 (43%)]\tLoss: 2.329563\n", + "Train Epoch: 0 [26240/60000 (44%)]\tLoss: 2.395788\n", + "Train Epoch: 0 [26880/60000 (45%)]\tLoss: 2.309679\n", + "Train Epoch: 0 [27520/60000 (46%)]\tLoss: 2.316720\n", + "Train Epoch: 0 [28160/60000 (47%)]\tLoss: 2.346542\n", + "Train Epoch: 0 [28800/60000 (48%)]\tLoss: 2.307389\n", + "Train Epoch: 0 [29440/60000 (49%)]\tLoss: 2.317679\n", + "Train Epoch: 0 [30080/60000 (50%)]\tLoss: 2.395548\n", + "Train Epoch: 0 [30720/60000 (51%)]\tLoss: 2.491510\n", + "Train Epoch: 0 [31360/60000 (52%)]\tLoss: 2.282305\n", + "Train Epoch: 0 [32000/60000 (53%)]\tLoss: 2.254564\n", + "Train Epoch: 0 [32640/60000 (54%)]\tLoss: 2.333660\n", + "Train Epoch: 0 [33280/60000 (55%)]\tLoss: 2.362551\n", + "Train Epoch: 0 [33920/60000 (57%)]\tLoss: 2.384102\n", + "Train Epoch: 0 [34560/60000 (58%)]\tLoss: 2.326107\n", + "Train Epoch: 0 [35200/60000 (59%)]\tLoss: 2.320798\n", + "Train Epoch: 0 [35840/60000 (60%)]\tLoss: 2.464493\n", + "Train Epoch: 0 [36480/60000 (61%)]\tLoss: 2.331449\n", + "Train Epoch: 0 [37120/60000 (62%)]\tLoss: 2.415666\n", + "Train Epoch: 0 [37760/60000 (63%)]\tLoss: 2.473129\n", + "Train Epoch: 0 [38400/60000 (64%)]\tLoss: 2.397358\n", + "Train Epoch: 0 [39040/60000 (65%)]\tLoss: 2.407380\n", + "Train Epoch: 0 [39680/60000 (66%)]\tLoss: 2.462022\n", + "Train Epoch: 0 [40320/60000 (67%)]\tLoss: 2.399495\n", + "Train Epoch: 0 [40960/60000 (68%)]\tLoss: 2.306419\n", + "Train Epoch: 0 [41600/60000 (69%)]\tLoss: 2.341526\n", + "Train Epoch: 0 [42240/60000 (70%)]\tLoss: 2.294206\n", + "Train Epoch: 0 [42880/60000 (71%)]\tLoss: 2.290251\n", + "Train Epoch: 0 [43520/60000 (72%)]\tLoss: 2.317634\n", + "Train Epoch: 0 [44160/60000 (74%)]\tLoss: 2.358360\n", + "Train Epoch: 0 [44800/60000 (75%)]\tLoss: 2.332942\n", + "Train Epoch: 0 [45440/60000 (76%)]\tLoss: 2.312563\n", + "Train Epoch: 0 [46080/60000 (77%)]\tLoss: 2.328177\n", + "Train Epoch: 0 [46720/60000 (78%)]\tLoss: 2.319674\n", + "Train Epoch: 0 [47360/60000 (79%)]\tLoss: 2.349114\n", + "Train Epoch: 0 [48000/60000 (80%)]\tLoss: 2.294027\n", + "Train Epoch: 0 [48640/60000 (81%)]\tLoss: 2.344978\n", + "Train Epoch: 0 [49280/60000 (82%)]\tLoss: 2.322978\n", + "Train Epoch: 0 [49920/60000 (83%)]\tLoss: 2.308064\n", + "Train Epoch: 0 [50560/60000 (84%)]\tLoss: 2.344061\n", + "Train Epoch: 0 [51200/60000 (85%)]\tLoss: 2.403563\n", + "Train Epoch: 0 [51840/60000 (86%)]\tLoss: 2.312183\n", + "Train Epoch: 0 [52480/60000 (87%)]\tLoss: 2.287836\n", + "Train Epoch: 0 [53120/60000 (88%)]\tLoss: 2.333920\n", + "Train Epoch: 0 [53760/60000 (90%)]\tLoss: 2.312499\n", + "Train Epoch: 0 [54400/60000 (91%)]\tLoss: 2.359949\n", + "Train Epoch: 0 [55040/60000 (92%)]\tLoss: 2.363973\n", + "Train Epoch: 0 [55680/60000 (93%)]\tLoss: 2.304453\n", + "Train Epoch: 0 [56320/60000 (94%)]\tLoss: 2.454728\n", + "Train Epoch: 0 [56960/60000 (95%)]\tLoss: 2.338699\n", + "Train Epoch: 0 [57600/60000 (96%)]\tLoss: 2.289698\n", + "Train Epoch: 0 [58240/60000 (97%)]\tLoss: 2.325969\n", + "Train Epoch: 0 [58880/60000 (98%)]\tLoss: 2.334784\n", + "Train Epoch: 0 [59520/60000 (99%)]\tLoss: 2.363783\n", + "Train Epoch: 1 [0/60000 (0%)]\tLoss: 2.304468\n", + "Train Epoch: 1 [640/60000 (1%)]\tLoss: 2.237942\n", + "Train Epoch: 1 [1280/60000 (2%)]\tLoss: 2.379854\n", + "Train Epoch: 1 [1920/60000 (3%)]\tLoss: 2.321807\n", + "Train Epoch: 1 [2560/60000 (4%)]\tLoss: 2.326054\n", + "Train Epoch: 1 [3200/60000 (5%)]\tLoss: 2.366278\n", + "Train Epoch: 1 [3840/60000 (6%)]\tLoss: 2.262960\n", + "Train Epoch: 1 [4480/60000 (7%)]\tLoss: 2.338786\n", + "Train Epoch: 1 [5120/60000 (9%)]\tLoss: 2.378443\n", + "Train Epoch: 1 [5760/60000 (10%)]\tLoss: 2.333674\n", + "Train Epoch: 1 [6400/60000 (11%)]\tLoss: 2.328306\n", + "Train Epoch: 1 [7040/60000 (12%)]\tLoss: 2.338466\n", + "Train Epoch: 1 [7680/60000 (13%)]\tLoss: 2.356887\n", + "Train Epoch: 1 [8320/60000 (14%)]\tLoss: 2.377309\n", + "Train Epoch: 1 [8960/60000 (15%)]\tLoss: 2.312181\n", + "Train Epoch: 1 [9600/60000 (16%)]\tLoss: 2.397353\n", + "Train Epoch: 1 [10240/60000 (17%)]\tLoss: 2.364430\n", + "Train Epoch: 1 [10880/60000 (18%)]\tLoss: 2.379686\n", + "Train Epoch: 1 [11520/60000 (19%)]\tLoss: 2.351562\n", + "Train Epoch: 1 [12160/60000 (20%)]\tLoss: 2.350115\n", + "Train Epoch: 1 [12800/60000 (21%)]\tLoss: 2.244029\n", + "Train Epoch: 1 [13440/60000 (22%)]\tLoss: 2.360412\n", + "Train Epoch: 1 [14080/60000 (23%)]\tLoss: 2.315639\n", + "Train Epoch: 1 [14720/60000 (25%)]\tLoss: 2.389025\n", + "Train Epoch: 1 [15360/60000 (26%)]\tLoss: 2.397625\n", + "Train Epoch: 1 [16000/60000 (27%)]\tLoss: 2.324974\n", + "Train Epoch: 1 [16640/60000 (28%)]\tLoss: 2.326982\n", + "Train Epoch: 1 [17280/60000 (29%)]\tLoss: 2.397022\n", + "Train Epoch: 1 [17920/60000 (30%)]\tLoss: 2.341864\n", + "Train Epoch: 1 [18560/60000 (31%)]\tLoss: 2.316780\n", + "Train Epoch: 1 [19200/60000 (32%)]\tLoss: 2.290725\n", + "Train Epoch: 1 [19840/60000 (33%)]\tLoss: 2.302054\n", + "Train Epoch: 1 [20480/60000 (34%)]\tLoss: 2.341123\n", + "Train Epoch: 1 [21120/60000 (35%)]\tLoss: 2.367768\n", + "Train Epoch: 1 [21760/60000 (36%)]\tLoss: 2.341992\n", + "Train Epoch: 1 [22400/60000 (37%)]\tLoss: 2.338322\n", + "Train Epoch: 1 [23040/60000 (38%)]\tLoss: 2.355606\n", + "Train Epoch: 1 [23680/60000 (39%)]\tLoss: 2.284112\n", + "Train Epoch: 1 [24320/60000 (41%)]\tLoss: 2.374856\n", + "Train Epoch: 1 [24960/60000 (42%)]\tLoss: 2.331543\n", + "Train Epoch: 1 [25600/60000 (43%)]\tLoss: 2.321192\n", + "Train Epoch: 1 [26240/60000 (44%)]\tLoss: 2.265647\n", + "Train Epoch: 1 [26880/60000 (45%)]\tLoss: 2.298278\n", + "Train Epoch: 1 [27520/60000 (46%)]\tLoss: 2.317490\n", + "Train Epoch: 1 [28160/60000 (47%)]\tLoss: 2.272723\n", + "Train Epoch: 1 [28800/60000 (48%)]\tLoss: 2.400963\n", + "Train Epoch: 1 [29440/60000 (49%)]\tLoss: 2.507440\n", + "Train Epoch: 1 [30080/60000 (50%)]\tLoss: 2.393094\n", + "Train Epoch: 1 [30720/60000 (51%)]\tLoss: 2.419714\n", + "Train Epoch: 1 [31360/60000 (52%)]\tLoss: 2.351777\n", + "Train Epoch: 1 [32000/60000 (53%)]\tLoss: 2.419491\n", + "Train Epoch: 1 [32640/60000 (54%)]\tLoss: 2.429312\n", + "Train Epoch: 1 [33280/60000 (55%)]\tLoss: 2.297789\n", + "Train Epoch: 1 [33920/60000 (57%)]\tLoss: 2.351755\n", + "Train Epoch: 1 [34560/60000 (58%)]\tLoss: 2.342575\n", + "Train Epoch: 1 [35200/60000 (59%)]\tLoss: 2.359362\n", + "Train Epoch: 1 [35840/60000 (60%)]\tLoss: 2.323574\n", + "Train Epoch: 1 [36480/60000 (61%)]\tLoss: 2.405147\n", + "Train Epoch: 1 [37120/60000 (62%)]\tLoss: 2.372452\n", + "Train Epoch: 1 [37760/60000 (63%)]\tLoss: 2.360568\n", + "Train Epoch: 1 [38400/60000 (64%)]\tLoss: 2.419126\n", + "Train Epoch: 1 [39040/60000 (65%)]\tLoss: 2.283723\n", + "Train Epoch: 1 [39680/60000 (66%)]\tLoss: 2.336538\n", + "Train Epoch: 1 [40320/60000 (67%)]\tLoss: 2.346513\n", + "Train Epoch: 1 [40960/60000 (68%)]\tLoss: 2.304324\n", + "Train Epoch: 1 [41600/60000 (69%)]\tLoss: 2.341439\n", + "Train Epoch: 1 [42240/60000 (70%)]\tLoss: 2.361294\n", + "Train Epoch: 1 [42880/60000 (71%)]\tLoss: 2.406241\n", + "Train Epoch: 1 [43520/60000 (72%)]\tLoss: 2.300334\n", + "Train Epoch: 1 [44160/60000 (74%)]\tLoss: 2.309165\n", + "Train Epoch: 1 [44800/60000 (75%)]\tLoss: 2.361495\n", + "Train Epoch: 1 [45440/60000 (76%)]\tLoss: 2.443631\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Train Epoch: 1 [46080/60000 (77%)]\tLoss: 2.314088\n", + "Train Epoch: 1 [46720/60000 (78%)]\tLoss: 2.392107\n", + "Train Epoch: 1 [47360/60000 (79%)]\tLoss: 2.323653\n", + "Train Epoch: 1 [48000/60000 (80%)]\tLoss: 2.294007\n", + "Train Epoch: 1 [48640/60000 (81%)]\tLoss: 2.314728\n", + "Train Epoch: 1 [49280/60000 (82%)]\tLoss: 2.355071\n", + "Train Epoch: 1 [49920/60000 (83%)]\tLoss: 2.349204\n", + "Train Epoch: 1 [50560/60000 (84%)]\tLoss: 2.307742\n", + "Train Epoch: 1 [51200/60000 (85%)]\tLoss: 2.347462\n", + "Train Epoch: 1 [51840/60000 (86%)]\tLoss: 2.405621\n", + "Train Epoch: 1 [52480/60000 (87%)]\tLoss: 2.423558\n", + "Train Epoch: 1 [53120/60000 (88%)]\tLoss: 2.382326\n", + "Train Epoch: 1 [53760/60000 (90%)]\tLoss: 2.320254\n", + "Train Epoch: 1 [54400/60000 (91%)]\tLoss: 2.369054\n", + "Train Epoch: 1 [55040/60000 (92%)]\tLoss: 2.321925\n", + "Train Epoch: 1 [55680/60000 (93%)]\tLoss: 2.321786\n", + "Train Epoch: 1 [56320/60000 (94%)]\tLoss: 2.339195\n", + "Train Epoch: 1 [56960/60000 (95%)]\tLoss: 2.323470\n", + "Train Epoch: 1 [57600/60000 (96%)]\tLoss: 2.421056\n", + "Train Epoch: 1 [58240/60000 (97%)]\tLoss: 2.329478\n", + "Train Epoch: 1 [58880/60000 (98%)]\tLoss: 2.315077\n", + "Train Epoch: 1 [59520/60000 (99%)]\tLoss: 2.340677\n", + "Train Epoch: 2 [0/60000 (0%)]\tLoss: 2.308716\n", + "Train Epoch: 2 [640/60000 (1%)]\tLoss: 2.318739\n", + "Train Epoch: 2 [1280/60000 (2%)]\tLoss: 2.294240\n", + "Train Epoch: 2 [1920/60000 (3%)]\tLoss: 2.417459\n", + "Train Epoch: 2 [2560/60000 (4%)]\tLoss: 2.342914\n", + "Train Epoch: 2 [3200/60000 (5%)]\tLoss: 2.373566\n", + "Train Epoch: 2 [3840/60000 (6%)]\tLoss: 2.307191\n", + "Train Epoch: 2 [4480/60000 (7%)]\tLoss: 2.340860\n", + "Train Epoch: 2 [5120/60000 (9%)]\tLoss: 2.305294\n", + "Train Epoch: 2 [5760/60000 (10%)]\tLoss: 2.383138\n", + "Train Epoch: 2 [6400/60000 (11%)]\tLoss: 2.337879\n", + "Train Epoch: 2 [7040/60000 (12%)]\tLoss: 2.336192\n", + "Train Epoch: 2 [7680/60000 (13%)]\tLoss: 2.339699\n", + "Train Epoch: 2 [8320/60000 (14%)]\tLoss: 2.323756\n", + "Train Epoch: 2 [8960/60000 (15%)]\tLoss: 2.305490\n", + "Train Epoch: 2 [9600/60000 (16%)]\tLoss: 2.325570\n", + "Train Epoch: 2 [10240/60000 (17%)]\tLoss: 2.288280\n", + "Train Epoch: 2 [10880/60000 (18%)]\tLoss: 2.306230\n", + "Train Epoch: 2 [11520/60000 (19%)]\tLoss: 2.342124\n", + "Train Epoch: 2 [12160/60000 (20%)]\tLoss: 2.346761\n", + "Train Epoch: 2 [12800/60000 (21%)]\tLoss: 2.428949\n", + "Train Epoch: 2 [13440/60000 (22%)]\tLoss: 2.404235\n", + "Train Epoch: 2 [14080/60000 (23%)]\tLoss: 2.278017\n", + "Train Epoch: 2 [14720/60000 (25%)]\tLoss: 2.326802\n", + "Train Epoch: 2 [15360/60000 (26%)]\tLoss: 2.358422\n", + "Train Epoch: 2 [16000/60000 (27%)]\tLoss: 2.343786\n", + "Train Epoch: 2 [16640/60000 (28%)]\tLoss: 2.293986\n", + "Train Epoch: 2 [17280/60000 (29%)]\tLoss: 2.336337\n", + "Train Epoch: 2 [17920/60000 (30%)]\tLoss: 2.321667\n", + "Train Epoch: 2 [18560/60000 (31%)]\tLoss: 2.371925\n", + "Train Epoch: 2 [19200/60000 (32%)]\tLoss: 2.374017\n", + "Train Epoch: 2 [19840/60000 (33%)]\tLoss: 2.320786\n", + "Train Epoch: 2 [20480/60000 (34%)]\tLoss: 2.342391\n", + "Train Epoch: 2 [21120/60000 (35%)]\tLoss: 2.308513\n", + "Train Epoch: 2 [21760/60000 (36%)]\tLoss: 2.271694\n", + "Train Epoch: 2 [22400/60000 (37%)]\tLoss: 2.332782\n", + "Train Epoch: 2 [23040/60000 (38%)]\tLoss: 2.360431\n", + "Train Epoch: 2 [23680/60000 (39%)]\tLoss: 2.289818\n", + "Train Epoch: 2 [24320/60000 (41%)]\tLoss: 2.305624\n", + "Train Epoch: 2 [24960/60000 (42%)]\tLoss: 2.311587\n", + "Train Epoch: 2 [25600/60000 (43%)]\tLoss: 2.331149\n", + "Train Epoch: 2 [26240/60000 (44%)]\tLoss: 2.313762\n", + "Train Epoch: 2 [26880/60000 (45%)]\tLoss: 2.349113\n", + "Train Epoch: 2 [27520/60000 (46%)]\tLoss: 2.355408\n", + "Train Epoch: 2 [28160/60000 (47%)]\tLoss: 2.304258\n", + "Train Epoch: 2 [28800/60000 (48%)]\tLoss: 2.377938\n", + "Train Epoch: 2 [29440/60000 (49%)]\tLoss: 2.321165\n", + "Train Epoch: 2 [30080/60000 (50%)]\tLoss: 2.364525\n", + "Train Epoch: 2 [30720/60000 (51%)]\tLoss: 2.406883\n", + "Train Epoch: 2 [31360/60000 (52%)]\tLoss: 2.400862\n", + "Train Epoch: 2 [32000/60000 (53%)]\tLoss: 2.334538\n", + "Train Epoch: 2 [32640/60000 (54%)]\tLoss: 2.282245\n", + "Train Epoch: 2 [33280/60000 (55%)]\tLoss: 2.300971\n", + "Train Epoch: 2 [33920/60000 (57%)]\tLoss: 2.308848\n", + "Train Epoch: 2 [34560/60000 (58%)]\tLoss: 2.333123\n", + "Train Epoch: 2 [35200/60000 (59%)]\tLoss: 2.333816\n", + "Train Epoch: 2 [35840/60000 (60%)]\tLoss: 2.313128\n", + "Train Epoch: 2 [36480/60000 (61%)]\tLoss: 2.320728\n", + "Train Epoch: 2 [37120/60000 (62%)]\tLoss: 2.311455\n", + "Train Epoch: 2 [37760/60000 (63%)]\tLoss: 2.312425\n", + "Train Epoch: 2 [38400/60000 (64%)]\tLoss: 2.301049\n", + "Train Epoch: 2 [39040/60000 (65%)]\tLoss: 2.287769\n", + "Train Epoch: 2 [39680/60000 (66%)]\tLoss: 2.368213\n", + "Train Epoch: 2 [40320/60000 (67%)]\tLoss: 2.329561\n", + "Train Epoch: 2 [40960/60000 (68%)]\tLoss: 2.296645\n", + "Train Epoch: 2 [41600/60000 (69%)]\tLoss: 2.339840\n", + "Train Epoch: 2 [42240/60000 (70%)]\tLoss: 2.400887\n", + "Train Epoch: 2 [42880/60000 (71%)]\tLoss: 2.366787\n", + "Train Epoch: 2 [43520/60000 (72%)]\tLoss: 2.371027\n", + "Train Epoch: 2 [44160/60000 (74%)]\tLoss: 2.338437\n", + "Train Epoch: 2 [44800/60000 (75%)]\tLoss: 2.389745\n", + "Train Epoch: 2 [45440/60000 (76%)]\tLoss: 2.362866\n", + "Train Epoch: 2 [46080/60000 (77%)]\tLoss: 2.440138\n", + "Train Epoch: 2 [46720/60000 (78%)]\tLoss: 2.340149\n", + "Train Epoch: 2 [47360/60000 (79%)]\tLoss: 2.426742\n", + "Train Epoch: 2 [48000/60000 (80%)]\tLoss: 2.357159\n", + "Train Epoch: 2 [48640/60000 (81%)]\tLoss: 2.400013\n", + "Train Epoch: 2 [49280/60000 (82%)]\tLoss: 2.337224\n", + "Train Epoch: 2 [49920/60000 (83%)]\tLoss: 2.369920\n", + "Train Epoch: 2 [50560/60000 (84%)]\tLoss: 2.327389\n", + "Train Epoch: 2 [51200/60000 (85%)]\tLoss: 2.318965\n", + "Train Epoch: 2 [51840/60000 (86%)]\tLoss: 2.357245\n", + "Train Epoch: 2 [52480/60000 (87%)]\tLoss: 2.421128\n", + "Train Epoch: 2 [53120/60000 (88%)]\tLoss: 2.365572\n", + "Train Epoch: 2 [53760/60000 (90%)]\tLoss: 2.359708\n", + "Train Epoch: 2 [54400/60000 (91%)]\tLoss: 2.317222\n", + "Train Epoch: 2 [55040/60000 (92%)]\tLoss: 2.371051\n", + "Train Epoch: 2 [55680/60000 (93%)]\tLoss: 2.360173\n", + "Train Epoch: 2 [56320/60000 (94%)]\tLoss: 2.345640\n", + "Train Epoch: 2 [56960/60000 (95%)]\tLoss: 2.355781\n", + "Train Epoch: 2 [57600/60000 (96%)]\tLoss: 2.335961\n", + "Train Epoch: 2 [58240/60000 (97%)]\tLoss: 2.336265\n", + "Train Epoch: 2 [58880/60000 (98%)]\tLoss: 2.383019\n", + "Train Epoch: 2 [59520/60000 (99%)]\tLoss: 2.294914\n", + "Train Epoch: 3 [0/60000 (0%)]\tLoss: 2.302218\n", + "Train Epoch: 3 [640/60000 (1%)]\tLoss: 2.321162\n", + "Train Epoch: 3 [1280/60000 (2%)]\tLoss: 2.301874\n", + "Train Epoch: 3 [1920/60000 (3%)]\tLoss: 2.406926\n", + "Train Epoch: 3 [2560/60000 (4%)]\tLoss: 2.365343\n", + "Train Epoch: 3 [3200/60000 (5%)]\tLoss: 2.323746\n", + "Train Epoch: 3 [3840/60000 (6%)]\tLoss: 2.344622\n", + "Train Epoch: 3 [4480/60000 (7%)]\tLoss: 2.351114\n", + "Train Epoch: 3 [5120/60000 (9%)]\tLoss: 2.407657\n", + "Train Epoch: 3 [5760/60000 (10%)]\tLoss: 2.418502\n", + "Train Epoch: 3 [6400/60000 (11%)]\tLoss: 2.337087\n", + "Train Epoch: 3 [7040/60000 (12%)]\tLoss: 2.303796\n", + "Train Epoch: 3 [7680/60000 (13%)]\tLoss: 2.401513\n", + "Train Epoch: 3 [8320/60000 (14%)]\tLoss: 2.337463\n", + "Train Epoch: 3 [8960/60000 (15%)]\tLoss: 2.324577\n", + "Train Epoch: 3 [9600/60000 (16%)]\tLoss: 2.335718\n", + "Train Epoch: 3 [10240/60000 (17%)]\tLoss: 2.384667\n", + "Train Epoch: 3 [10880/60000 (18%)]\tLoss: 2.267396\n", + "Train Epoch: 3 [11520/60000 (19%)]\tLoss: 2.306527\n", + "Train Epoch: 3 [12160/60000 (20%)]\tLoss: 2.367751\n", + "Train Epoch: 3 [12800/60000 (21%)]\tLoss: 2.309073\n", + "Train Epoch: 3 [13440/60000 (22%)]\tLoss: 2.315047\n", + "Train Epoch: 3 [14080/60000 (23%)]\tLoss: 2.347873\n", + "Train Epoch: 3 [14720/60000 (25%)]\tLoss: 2.268999\n", + "Train Epoch: 3 [15360/60000 (26%)]\tLoss: 2.333838\n", + "Train Epoch: 3 [16000/60000 (27%)]\tLoss: 2.349008\n", + "Train Epoch: 3 [16640/60000 (28%)]\tLoss: 2.375700\n", + "Train Epoch: 3 [17280/60000 (29%)]\tLoss: 2.331388\n", + "Train Epoch: 3 [17920/60000 (30%)]\tLoss: 2.335067\n", + "Train Epoch: 3 [18560/60000 (31%)]\tLoss: 2.332542\n", + "Train Epoch: 3 [19200/60000 (32%)]\tLoss: 2.345043\n", + "Train Epoch: 3 [19840/60000 (33%)]\tLoss: 2.300745\n", + "Train Epoch: 3 [20480/60000 (34%)]\tLoss: 2.416367\n", + "Train Epoch: 3 [21120/60000 (35%)]\tLoss: 2.282617\n", + "Train Epoch: 3 [21760/60000 (36%)]\tLoss: 2.317955\n", + "Train Epoch: 3 [22400/60000 (37%)]\tLoss: 2.329546\n", + "Train Epoch: 3 [23040/60000 (38%)]\tLoss: 2.333439\n", + "Train Epoch: 3 [23680/60000 (39%)]\tLoss: 2.432110\n", + "Train Epoch: 3 [24320/60000 (41%)]\tLoss: 2.389215\n", + "Train Epoch: 3 [24960/60000 (42%)]\tLoss: 2.317299\n", + "Train Epoch: 3 [25600/60000 (43%)]\tLoss: 2.398170\n", + "Train Epoch: 3 [26240/60000 (44%)]\tLoss: 2.354642\n", + "Train Epoch: 3 [26880/60000 (45%)]\tLoss: 2.310941\n", + "Train Epoch: 3 [27520/60000 (46%)]\tLoss: 2.352980\n", + "Train Epoch: 3 [28160/60000 (47%)]\tLoss: 2.370045\n", + "Train Epoch: 3 [28800/60000 (48%)]\tLoss: 2.332853\n", + "Train Epoch: 3 [29440/60000 (49%)]\tLoss: 2.328536\n", + "Train Epoch: 3 [30080/60000 (50%)]\tLoss: 2.410731\n", + "Train Epoch: 3 [30720/60000 (51%)]\tLoss: 2.315743\n", + "Train Epoch: 3 [31360/60000 (52%)]\tLoss: 2.362804\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Train Epoch: 3 [32000/60000 (53%)]\tLoss: 2.268909\n", + "Train Epoch: 3 [32640/60000 (54%)]\tLoss: 2.324456\n", + "Train Epoch: 3 [33280/60000 (55%)]\tLoss: 2.313516\n", + "Train Epoch: 3 [33920/60000 (57%)]\tLoss: 2.345426\n", + "Train Epoch: 3 [34560/60000 (58%)]\tLoss: 2.328141\n", + "Train Epoch: 3 [35200/60000 (59%)]\tLoss: 2.392569\n", + "Train Epoch: 3 [35840/60000 (60%)]\tLoss: 2.333704\n", + "Train Epoch: 3 [36480/60000 (61%)]\tLoss: 2.352234\n", + "Train Epoch: 3 [37120/60000 (62%)]\tLoss: 2.323742\n", + "Train Epoch: 3 [37760/60000 (63%)]\tLoss: 2.318627\n", + "Train Epoch: 3 [38400/60000 (64%)]\tLoss: 2.340932\n", + "Train Epoch: 3 [39040/60000 (65%)]\tLoss: 2.401247\n", + "Train Epoch: 3 [39680/60000 (66%)]\tLoss: 2.390721\n", + "Train Epoch: 3 [40320/60000 (67%)]\tLoss: 2.372447\n", + "Train Epoch: 3 [40960/60000 (68%)]\tLoss: 2.381556\n", + "Train Epoch: 3 [41600/60000 (69%)]\tLoss: 2.370942\n", + "Train Epoch: 3 [42240/60000 (70%)]\tLoss: 2.382010\n", + "Train Epoch: 3 [42880/60000 (71%)]\tLoss: 2.337266\n", + "Train Epoch: 3 [43520/60000 (72%)]\tLoss: 2.327033\n", + "Train Epoch: 3 [44160/60000 (74%)]\tLoss: 2.379825\n", + "Train Epoch: 3 [44800/60000 (75%)]\tLoss: 2.323223\n", + "Train Epoch: 3 [45440/60000 (76%)]\tLoss: 2.283228\n", + "Train Epoch: 3 [46080/60000 (77%)]\tLoss: 2.334064\n", + "Train Epoch: 3 [46720/60000 (78%)]\tLoss: 2.381998\n", + "Train Epoch: 3 [47360/60000 (79%)]\tLoss: 2.324826\n", + "Train Epoch: 3 [48000/60000 (80%)]\tLoss: 2.344363\n", + "Train Epoch: 3 [48640/60000 (81%)]\tLoss: 2.407687\n", + "Train Epoch: 3 [49280/60000 (82%)]\tLoss: 2.405679\n", + "Train Epoch: 3 [49920/60000 (83%)]\tLoss: 2.347231\n", + "Train Epoch: 3 [50560/60000 (84%)]\tLoss: 2.381284\n", + "Train Epoch: 3 [51200/60000 (85%)]\tLoss: 2.320855\n", + "Train Epoch: 3 [51840/60000 (86%)]\tLoss: 2.332896\n", + "Train Epoch: 3 [52480/60000 (87%)]\tLoss: 2.331153\n", + "Train Epoch: 3 [53120/60000 (88%)]\tLoss: 2.318925\n", + "Train Epoch: 3 [53760/60000 (90%)]\tLoss: 2.324926\n", + "Train Epoch: 3 [54400/60000 (91%)]\tLoss: 2.312320\n", + "Train Epoch: 3 [55040/60000 (92%)]\tLoss: 2.316199\n", + "Train Epoch: 3 [55680/60000 (93%)]\tLoss: 2.314889\n", + "Train Epoch: 3 [56320/60000 (94%)]\tLoss: 2.341814\n", + "Train Epoch: 3 [56960/60000 (95%)]\tLoss: 2.343977\n", + "Train Epoch: 3 [57600/60000 (96%)]\tLoss: 2.324932\n", + "Train Epoch: 3 [58240/60000 (97%)]\tLoss: 2.346128\n", + "Train Epoch: 3 [58880/60000 (98%)]\tLoss: 2.328274\n", + "Train Epoch: 3 [59520/60000 (99%)]\tLoss: 2.302693\n" + ] + } + ], + "source": [ + "model = create_net(tornasole_save_interval=100, base_loc='./ts_output', run_id='bad')\n", + "train(model=model, epochs=4, learning_rate=1.0, momentum=0.9, batch_size=64, device=torch.device(\"cpu\"))" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "bad_trial = LocalTrial( 'myrun', './ts_output/bad/')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can plot the gradients - notice how every single one of them (apart from one) goes to zero and stays there!" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAD4CAYAAADiry33AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOy9eXgUZbr+f7/Va5LORna2hLATCGEHIQgjoLjAIKLiMvpTZBy343HmHHDm/BBwF0cZxhk33Fcc0RlERYYRBBGBgAlISAgkgSRkT6fTe3dVvd8/qqvTnfSeTneI9bkuLpKuqu63uytPPfUs90MopZCQkJCQ6L8w0V6AhISEhETvIhl6CQkJiX6OZOglJCQk+jmSoZeQkJDo50iGXkJCQqKfI4/2ArqSmppKc3Jyor0MCQkJiUuKY8eOtVBK0zxt63OGPicnB0VFRdFehoSEhMQlBSHkvLdtUuhGQkJCop8jGXoJCQmJfo5k6CUkJCT6OZKhl5CQkOjnSIZeQkJCop8jGXoJCQmJfo5k6CUkJCT6OZKhl5CQ6FW+r2jB2SZDtJfxi0Yy9BISEr3KI58UY/OeM9Fexi8aydBLSITInW8dwTs/VEd7GX0ajqdoMVhRozVHeym/aCRDL9EnKalph8nGRnsZXuF4igMVLdhX3hTtpfRptCYbeArUaU3RXsovGsnQS/Q5Oix2LH/5B7x7yKt0R9RpM9rA8RTnmo3RXkqfpsVgdfxvg9nGRXk1v1wkQy/R57jQagLLU5xp0Ed7KV5p0lsAADVaEyx2yYB5o0Vvc/5cK3n1UUMy9BJ9jlpHPLeype96y016wVOlFKhu7bvrjDaiRw90fq8SkUcy9BJ9DtHzq2w2gFIa5dV4prmj04Cda5IMvTfcDb3k0UcLydBL9DlEz6/DwkJrskd5NZ4RQzeAcEGS8EyzwQqljIFSzkgefRTpc4NHJCRq2jo9v8pmAwbEDYjiajzTpLciQS1HvFqBc5Kh90qL3oZUjRJqhUwy9FFE8ugl+hy1WjNGZ8QD6Ltx+ma9FekJauSmxUmVNz5oMViRGq/CoOQYKXQTRSRDL9GnoJSiRmvCzNwBUMgIqvqooW/SW5GmUWF4mqZP5xKiTYvBipQ4JQYnx0pNU1FEMvQSfQqtyQ6TjUN2ShyGDojts/HvJr0F6QkqDE+Lg9HGodElOSvRSYvBilSNCoOTY9BmtMFo7btNcL4ob9Dj7YNV0V5GyEiGXqJPIcbnByfHIDdN0yc9ekopmjqsSI8XPHoAUpzeAzxP0WqwITVeMPQAUNd+aXr17/1YjfVflEJn7pvFAf6QDL1En0JM2A0ZEIvc1DhUt5rA8X0rLNJhYWFleaTHqzE8XTL03uiw2MHy1OHRxwK4dEssq1uEdVc09t0mPl9Ihl6iG/vKm6L2Bym+7qDkGAxLjYON5XGxj3mBzY7SyvQEFdLjVdCo5DgXggwvpbRfx/bFGvpUjRJDBgge/aVaeSM2xZX14W5tX0iGXsINO8dj9bvH8Pd956Ly+jVaExJjFEhQKzAsNQ5A36u8aXLE49PiVSCEIDctLqQ1/rO4DjOe+g9sLB/uJfYJmh3yB2kaFdI0KqjkjFvp7KWCleWczka5ZOgl+gM1bSbYOD5qgyJqtWan95friH9X9bGwiCh/kB6vBgAMT9OE5NF/W9aMJr3VrXu0P+H06B0XRKHEMjoe/b+K63A+RKmKmjYzxOhhuRS6kegPVDpqwiujVBte02bC4CQhnpuqUSJeJe97Hr1L6AYAhqfF4aLOEnRFSUlNOwD0f0OvET6nwcmxUTH0FjuHh7cVY+uB0KpmxAvEmMx4lDfoL8lwm2To+xnNeiu2HqgM+WQUk4otBit0EZYfoJS6efSEEAxLi+tzlTdNHVaoFQziVUJjuVh5E8w624w2XHCEMVoNNj97X5q0GKyQMQRJMQoAQiVVNHI/1a1GUBp6wry6VVjzlXmZ0Jntl2QprWTo+xk7Si7iiS9PoyLE0IurJ3+uJbIhk2aDFVaWd1ZoAEBualzU7i680aS3Ij1eDUIIgM4QUzCGpKS23flzv/Xo9TakxCnBMMLnNCQ5FlqTHYYI19JXOy7AIRv6FiPi1XLMzE0BcGmGbyRD389o7BDCChWNIRr6FgPS44Vb7VDizj2hs7QyxvnYsFQNLurMfUrzvVlvRZrjMwKA7JRYMARBSSGIYRsAaDX2X49eDNsA6Kylj3D4Rgz9NXZYobcEf5da3WpETkocxmQKshzlDR1hXV8kCMjQE0KuIoSUE0LOEkLWetiuIoRsc2w/TAjJcTyeQwgxE0KKHf9eCe/yJbriNPRNoXkdlc1GzB2VBoWMRDw23tks1enRD0uL63Oa7016i/NiCABqhQxDBsQG59HXtGNkugZqBYMWfT/16B06NyKioY905U21y3kcyt3h+VYTclLjkBynRHq86pIssfRr6AkhMgB/A7AYwDgAKwkh47rsdjcALaV0BIAXATzrsu0cpbTA8e/eMK1bwgudhj54b7zdZEOr0YZRGRrkpMRFzaMXDQIghG4AoKoPhW+E0I3K7bFgKm8opSip1aFgSBJSNap+7NHbkBqndP4eraapqhYjUjXCOiqDDEfaWB61WhNyUoS1j3YkZC81AvHopwM4SymtpJTaAHwMYGmXfZYCeMfx86cAriBiAFMioog13qF08Imhh+FpGsFwRbissVZrQkqcErHKTvXsvlZLb7Fz0FtYpCeo3R4f7kga8wF08dZqzWgz2jBxSBJSNKp+GaOnlKK5i0cvyBVHXpe+qsWEuSPTIGNI0ENiarUm8BTIThHOw9EZ8ahoMvS5bm1/BGLoBwGocfm91vGYx30opSwAHYAUx7ZhhJCfCCHfEUIKPb0AIWQ1IaSIEFLU3Nwc1BuQcEf06KtajLBzwTXiiAJiuWka5KbF4XyrKejn6Am1WrObNw8AcSo5MhJUfSYh69os5UpumgZWlg9Iy0VMxBYMSUJqnLJfVt3orSxsLO/0pAGhiirSJZZ6ix0tBitGZsQjO8jwGiCEbQBgWGqnR29j+T4VSgyE3k7G1gMYSimdBOARAB8SQhK67kQpfY1SOpVSOjUtLa2Xl9R/MVhZGG0cxmTGw87RoBtEzjUboZARDEmOwfA0DVieRjSeWqs1Y/CA2G6P56ZqUBXhCiBvOGvoPYRugMAqO0pq2qGUMxidGY8UjRKtxv7n0Yt5B9dkLOAosWyP3DklatQMS41Fbgh3qWLJrOjRj8kUzNelFr4JxNDXARji8vtgx2Me9yGEyAEkAmillFoppa0AQCk9BuAcgFE9XXRfpdVgxWP/+jlqFSINOsEIzRmRCiD4ypvKZgOyU+IglzEuYl2R8Vx4nqLOg0cPoE/V0nftihUZniYYgkA+r5IaHcYPTIBCxggxeoMtoJDPpUSL4y7Fo6GPoEcvxuSHpWowPD0O1S3BieSdbzVCo5IjxZFrGJmhASH909AfBTCSEDKMEKIEcDOAHV322QHgDsfPNwD4llJKCSFpjmQuCCG5AEYCqAzP0vseX52sxzuHzqPYpXQukjQ5wjazR6SCkOATspUtRmfyM9dpuCLjSTfprbBxPIYke/Lo46A12aHtA0lL8TMWu2JFBsQpkRSr8Kufz3I8TtbpkD84CQCQolGB5Sk6Qij768u0Grx59LFoN9lDKnMMBdGjz06JxfA0DWwcH1QyuLrVhJzUWGfPhFohQ05KXP8z9I6Y+wMAvgFwGsAnlNJThJCNhJAljt3eAJBCCDkLIUQjlmDOBXCCEFIMIUl7L6W0Ldxvoq9QUqsD0OlZR5pGR1ghOyUWg5NjcCaIhCzL8TjfanQ2/ySoFUiLV0Ws8qZG26lD3xXxotMXErJNeivkDMGAWKXb44QQ5KbG+b0wVjQZYLZzKBgiGHoxht3Sz+L0nTo37p+T+P1GyquvajFgUFIM1AqZy11X4Od0davRGbYRGZ0Rf8k1TQUUo6eUfkUpHUUpHU4pfdLx2DpK6Q7HzxZK6QpK6QhK6XRKaaXj8e2U0jxHaeVkSukXvfdWos8JR5KtoSNKht6RKExPUGNUenxQwmQ1WjPsHHUaVUAIR0TKuIpe1hAPMfphqcFLDPQWTXqhCUjs9nRFqFTyvUaxUWqiw9CnxAkeb2s/q7xpNthACLpdEDtLLCNk6B0eOSDkegAEXHlj53jUas3O0kqR0ZnxqG41wmzrO018/pA6Y8OEwco6QyVR8+g7LNCo5NCo5BiRoUFlsxFsgFUzYshBTCqKP59tisw81Jo24Q9/UFJ3j35wcgzkDOkTYwW7dsW6Mjxdg2a91WcYpqS2HQlqudN4iB5vf/ToB8QqIZe5m5ghTo++9xOylFJUNRucJbrJcUqkxCkD9ujrtGZwPEVOV48+Mx6UImoKr6EgGfow8XOdDqI9rNdFR4q1qcPqjB2PTI+HjeNxPsCqmUpnDb2rR6+BzmxHWwRi47VaE9LjVVArZN22KWQMhg6I7TMefdeKGxHxIumrFLS4RoeJQ5KcMV+nR9/PKm9a9NZu8XlAyGXEKGQR8ei1Jjs6LKyboQ6mP6TKUbWWk9rd0ANA2SUkhSAZ+jAhhm3GD0pAQ5TU7Ro6LMh0NPKMyhCMTqCVN+eaDY6EYuetdm4QlSQ9pabNc8WN61r6gqFvdgwF94Tz8/Li6ZltHM406p3xeQBIjlWAkP7p0XeNzwNiLX1MRMp2xfPFLRyZHrhI3nlnaaV76CYnJQ4qOXNJJWQlQx8mSmp0GJwcg3FZCWiIkkff2GFBRkLnMAwg8A7Zymajmzfv+hyRCJnUtpvcNG66Miw18M7T3oLleLQabUjrUlopMnRALOQM8eoxnrqoA8dTTBzcaejlMgbJscp+F6NvMdg8evRA5EosRUPv6tHnpmrQarQFVMFV3WpCnFKGtC7vQ8YQjMzQXFIJWbn/XSQCoaS2HROHJCEzQY1mvRUsx3eLT/YmlFK30E2cSo5BSTEBl1hWthhwxZgMt8cGJcVAJWd6vcSS5XhcbLdgyUTvHv2wVKHz9KLO7POC0BPsdjtqa2thsXjOsXA8xWvXZSEp1orTp0973Gfr0izIGYvH7byFxetLspBJW3H6dGfx2QuLUiFnOK/PeSnyWGEiYpUyj+9pdb4KJpu819/vEGLH1iVZsDRfwOkWIVQ2NYnD60uycKGyAg1y33+fV2TZMf+aDJSVlXXb9uisBFhZPirfmVqtxuDBg6FQKAI+RjL0YaDVYEWt1ozfzMqGRqUATwVt9axE74Yr3LSb7LBxPDJcvM1RGZqADL3OZEeLweZ2iwsADEMc3YS9GzJp6LCA46lPAy6urarF2GuGvra2FvHx8cjJyYEnqSaTjQXfJDSVJcZ4/iOLaTHCxvIY5YjjunKh1YQEG4sxWe7N4apmAyiFs0ntUofjKewXdchMVHdrLAOE8Fe9zoKRAxMgZ3rPGTrfakSSncPozM7P28pyKG/QY3ByLAbEdQ8tuVLeoIdawXQrrwRc3kNWQsQdutbWVtTW1mLYsGEBHyeFbsLACUf9fP7gJGQlCid2fYQrb8Qa+gwXsa2RGfE41+xfgEkcMJKb1t3Q5Kb5rw3vKU4del+GXhQ368WLjsViQUpKikcjDwAsJ3yOCpl3vT6VgoGV4z1WKpnsLGKU3ZPNcoaA7UedsSwvVHp5M+JKh2G0s737nm0sD6Xc/fNWyhgQQmBlfZdGUkphY3movHj9YtGAJcKD3QkhSElJ8XrX6Q3J0IeBktp2MASYMCjRaWgjXWIpvl6GS6JwZLoGNpZ3jqzzhqeKG5HhaRrUtJn8/mH0hE4deu93QGnxKsQpZb2ekPUluioKvPnyQlVymdNIuMJyPGwsj1hPhl7GOI1jf0C8IMo99BoAgMJhPG29KJhHKYXVg6EmhEAlZ2C1+35tG8eDgna7UIg4DX0U5E5CEQaWDH0YOFGrw4h0DeJUcqdHH2lDL6oqdvXoAfjtkK1sNkDOEI/NSsPT4sDTThW/3qBWawYhwEAPNfQihAhhpGh2x4pet9yXR+8wLNYuht7kMAgxyu7RUjlDwPG03+jd+PucRI++68Uw3GvgKfXokavkTLfvpyvi2pRePHo5QyBjSJ+afOYLydD3EEopSmrandolSbEKqORMxLtjGz1osIxwxHz9NXacazZgaEosFB5ijU5Vxl5sDqnRmpCZoPb6RyUyLDUuqk1Tdo6HnGHA+PCoOg29uwEQuyhjPPQJyBwGsb+Eb1g/dz4yhoAhpFclsEWP3dM5pZLLYGN58D4aAcULgbfQDSEEaoUMFj93Bn0FydD3kLp2M1odQyQA4QTITFRHJUafHKuAyuVWUyNW3vj16I1uHbGuRELcrFZr9hmfFxmWGoe69ujNj2U56tObB4QwjJzpHhow2TioFTLIPIQzRIMY7vBNTk4OWlpaAACXXXZZyM/z9ttv4+LFi26Pffzxx3jyySfx9ttvg2EYnDhxwrlt9vTJqKu54PWzIoTgwzdfhq7D9zn1j3/8A2PHjsX8+fO97nPrrbdi9OjRGD9+PO666y7Y7UJXspUTzhGVnMG+fftw7bXXOo9RKRhQCOG1q6++Gu3t3UUIbSwPhhCv4SdACN9Y7VxEOsd7ilR100PEROzEwYnOxzIT1BGvpW/ssLqFbURGpGtwxkfTFMdTnG814Vdj0z1uj1XKMTBR3auVN7VtJszMTfG7X65jfuyFNhNGZXSvagknG744hdKL7p2PZjsHAnjs3nVFvBC57mdyzAnYtGJit/1FYxKIR8+yLOTy4P9sf/jhh6CPEXn77bcxfvx4DBw40PnY119/jYceeggnT57E4MGD8eSTT2Lbtm3CRgrIGPi883n39ZexZPnNALzPn3jjjTfw+uuvY86cOV73ufXWW/H+++8DAG655RZs3boVv/vd72BjeRBCPN6luobXvvrqK4/PKyRyGZ/xcLWcAUcp7Fz3pG9fQ/Loe0hJTTuUMsY5kAAAshLVEQ/dNHVYuo23A4SErK/Km1qtCTaOx/BU76V9w9M1vRYysbE8GjosHgeOdEUUpYrWtClKA0uEEULcwgKUUlDq/W5AfJzlKB5//HGMHj0ac+bMwcqVK/H8889j3rx5ePjhhzF16lT85S9/wRdffIEZM2Zg0qRJWLBgARobGwEAra2tWLRoEfLy8rBq1So3T1Oj6fx+N23ahGnTpiE/Px+PPfYYAKC6uhpjx47FPffcg7y8PCxatAhmsxmffvopioqKcOutt6KgoABmsxmUUhQXF2Py5MkAgGuvvRanTp1CeXm58H5BIXPcpezevRuzZs3C5MmTsWLFChgMBmzZsgVNDfX4zfJrvHrrGzduxPfff4+7774b//M//wOO4/CHP/wB48ePR35+Pv76178CAK6++moQQkAIwfTp01FbWwtACN2oXAx1R0cHrrnmGowePRr//eD94HkeVjvndtfz61//GlOmTEFeXh7efesNqOQMOI7DnXfeifHjx2PChAl48cUXnWvsTMj2/fCN5NH3kJLadowdmOAWC8xIVKNRZwXPU48qh71BQ4fFo5c7KiMeVseAY0/1wOec4wO7bxMZnqbBp8dqQSkNKePvi3qdGTz1XXEjIqoQBjvgORQeuy7P7XdKKX6+2IFUjdJvf0Sz3op6nRnjHDXW7SYbLrSZMNJLnbwYujl69Ai2b9+OkpIS2O12TJ48GVOmTAEA2Gw2FBUVAQC0Wi1+/PFHEEKwdetWPPfcc/jzn/+MDRs2YM6cOVi3bh2+/PJLvPHGG91ea/fu3aioqMCRI0dAKcWSJUuwf/9+DB06FBUVFfjoo4/w+uuv48Ybb8T27dtx22234aWXXsLzzz+PqVOnAgCOHz+OiRMnOs8FhmHwv//7v3jqqafwzjvvgFLhLqWlpQVPPPEE9uzZg7i4ODz77LN44YUXsG7dOjz/5z/j9W07MHu851rwdevW4dtvv3W+7ssvv4zq6moUFxdDLpejrc1d7dxut+O9997DX/7yFwDoVnFz5MgRlJaWIjs7G1dddRX2fbMTy5ff4PYcb775JgYMGACTyYT8yVOx/IbrUVzdiLq6Ovz8888A4BbmUSuE57ewHBIQePNSNJAMfQ/geIqf6zpw/WT3EbpZCWrYOB5tJu9t4OFeR7PeisxED6Ebh+bNmUaDR0Nf6TIQ3Bu5aXEwWFk06T2Hh3qCWEMfiKGPd2jkV0XBo+d4wStXBNDgo1J0hgbkMgZmGyeU9XkJ+TBECHMcPnQIS5cuhVqthlqtxnXXXefc56abbnL+XFtbi5tuugn19fWw2WzOxpn9+/fjs88+AwBcc801SE5O7vZau3fvxu7duzFp0iQAgMFgQEVFBYYOHYphw4ahoKAAADBlyhRUV1d7XO+uXbuwePFit8duueUWPPnkk6iqqgIgJFx//PFHlJaWYvbs2QCEi9WsWbMcRwgXCTvLQ670/5nu2bMH9957rzNsNWDAALft9913H+bOnYvCwkKhvJXjkRDTad6mT5+O3NxcAMDKlStx4PBhXLv0erfn2LJlCz7//HNQCjRerEVtdSWmF0xAZWUlHnzwQVxzzTVYtGiRc38Zw0ApYy4Jj14K3fSAymYDDFbWWXEjkunw+CJVYtlqsIKn8Bi6EStvKpo8J2TPNRuRHKtAso8uwd6svBFr6ANJxgJC41Q0xM3sAZRWinStvDHZOMQoZF5j1sSR9ON8JPXi4jov0g8++CAeeOABnDx5Eq+++mpQzTOUUjz66KMoLi5GcXExzp49i7vvvltYt6rTKZHJZGBZ1uNz7N69283gAYBcLsfvf/97PPvss6AQFEcppVi4cKHztUpLS513GeJHYeN6nsjcsGEDmpub8cILLwAQqqNol9LKrneicoZxq4zat28f9uzZg0OHDuHgkSKMycsHb7cjOTkZJSUlmDdvHl555RWsWrXK7XlUCtklUWIpGfoeUOIhEQvA6VlHytCLA0cyPMjnJqgVyEpUe1WxrGw2eOyIdcVp6HvBwNZqzZAxxNl/4I/cCA5DcUUsGfSU3OtKZ/elYHDMds5jo5QrchmDSVNn4IsvvoDFYoHBYMDOnTs97qvT6TBokHAX+c477zgfnzt3Lj788EMAQrJUq9V2O/bKK6/Em2++CYNBOB/q6urQ1NTkc23x8fHQ6/XO12ZZFikp3ZPnd955J/bs2YO21hYwhGDmzJk4ePAgzp49CwAwGo04c+aM8zmNBkPAtfQLFy7Eq6++6rz4iKGbrVu34ptvvsFHH30EhnHvYXBNkB45cgRVVVXgeR7btm3DZXNmu+WtdDodkpOTERsbi1OnTuPET0VQyITwE8/zWL58OZ544gkcP37cbV1qhVCT76tUsy8gGfoecKK2HRqVvJuhdMogRCghK9bQewurjEjX+PToc1O9x+eF5xW6UnvDo6/VmpCVqA5YL2RYahzajDa0myIr62v30+3pimv3pcUuGAG/hp4hyJs4GUuWLEF+fj4WL16MCRMmIDExsdu+69evx4oVKzBlyhSkpqY6H3/sscewf/9+5OXl4bPPPsPQoUO7Hbto0SLccsstmDVrFiZMmIAbbrjBacS9ceedd+Lee+9FQUEBduzYgQULFnjcT6lU4r77H0RbSzPkMiAtLQ1vv/02Vq5cifz8fMyaNcspELb6nntw/+03YOnVizw+V1dWrVqFoUOHIj8/HxMnTnRe0O699140NjZi1qxZKCgowMaNG2FleZwq+QkP3fdb5/HTpk3DAw88gLFjx2LYsGG4ftkyAHDOkLjqqqvAsizGjh2L9ev+hPxJUyFjCOrq6jBv3jwUFBTgtttuw9NPP+22LrXCcyd0n0OsCOgr/6ZMmUIvFZb89QC96dUfuj3OcjzNffRLumlXWUTW8f6P1TR7zU5a3272uH3DjlN09P99RTmOd3tcZ7bR7DU76cv7zvp9jWu3HKC3bf0xLOt15fq/H/T4GXrj36caaPaanfT4+bawr6W0tNTrtsYOMy2p0VK2y2fojeoWAy2r76CtBgstqdFSi431uX9Nq5GWXtRRvV5PKaXUaDTSKVOm0GPHjgX+BiLA3XffTQ8dOuR1u9FqpyU1Wqoz2fw+V3lDB61qNoRzeZRSSuu0Jnqytp3yvPfvymrnaEmNlrboLd22VTUbaHlDR0CvZbKytKRGS7VGa8jrDQVP5yqAIurFrkoefYjYWB6n6/Vu2uIiMoYgPV4VsaapRp0FDOkcNN2VURkaWOx8Nw1wMRHrz6MHHPNjeyEJWqs1BRyfB4Bhab0vbuYJlqOQEeKx4ckTYvel0cpBxhC/Xb8yGQHLUaxevRoFBQWYPHkyli9f7ixh7Cts3boVM2fO9Lrdn86NK0oZ0yt6N2LFja8KMYVM6M71JIXgSSPHGyoFA4K+L4UgVd2ESFlDB2wc7+yI7UpmohoNHZFpmmrsEMa2eQt/jMzoTMgOdZmWI4Zi/MXoASFO/8/iizDbOI8KjKFgsXNo7LAGJTs8JDkWMoZEPCFrD3K+gNh92WG2I1Yl91uWKmeE/d997/2Iyt6GG6dyZQBJa6WcgdHKYsaMGbBa3QevvPfee5gwYUJIa7CynEepCVec4bUuhp56qNjxBeN4nr5eeSMZ+hApqRHqafMHd4+hAkKcPlKjxhr1Fp9ljyPSRXEzA64Y2zlcpLJFEDPrOirNE6JWemWLAXkDPb/nYLnY7pAnHhC4br9SHp35sYHIH7gieoQcpX6NDtApfczyFH28ydInnR69/4uVQiZ0lh784VDYLm48pbCzPJJifGvNA8Jdl8nmXlkkVuz4uwNzRa1gnKJ1fZVL13WIMiW1OqTEKTHIi+JiRoKgd0MjkI0X5A+81+snxiiQkaDqlpCtbDZi6ADPYmZd6Y35sTXOGvrgBokMS+19jfyusDwPRRDNb66aQ/4SsQCcIaFLXdiM5YUQVyCNgqIxDae4mY3lQeFdjMwVlUIIHbmqhopJVVVQd29CmM7f3IdoIhn6EDnhGB3o7ZY8K1ENk42D3uq5FjmceJM/cGVkenw3FcvKZqPPjlhXclLiQEh4a+lrtf516D0xLDUO1a2RnR9r52hQXqeM6dRZCSTUJXrAXC8qOkYCYYRmYBdEpWO/cMbp/ckLu+JJUpvbiHIAACAASURBVNpTaaY/RCkEax/26iVDHwIGK4uKJoPXsA3Q2TTV2MsJWRsrDKzO8DKwWmRkhgYVjQanceR4iqpWY0DxeUA4mYckx4bVk65pM0MhI0F32+amxcFi5yOmJ8Q5tM2DCd0AgiFRypiA7pjk/USq2M7TgMcDKpy69OF7z/7khV0R77pcG6dE1UpfU8S64iqF0FeRDH0I/FynA6XwWHEjkpkQ2kjBP+8ux2fHawPev8kxQjAz0bfUwsj0eJjtHOoccfE6rRk2lvc4Vcob4a68qdWaMDApJuBKFpFhERgr6IqzWSrI+aZZSTEeh7l4QqxSsV/ihp4LIpchYwhkhITVo7eyQpVTIHdf3jx6f6qVXVHKhBkFfTkhG9CZSwi5ihBSTgg5SwhZ62G7ihCyzbH9MCEkp8v2oYQQAyHkD+FZdnQ5Ues7EQsgpElTPE+x9UAVPjh8IeBjxK5Yv6GbDPchJL7mxHpjeJoGlS2GsIVMagLUoe+KqGJZFQFxMyA4+QNXYhQyxKkCq3cQZBCYsIZuoqFHz/I8rpwzzatOjsjmzZthNpuhkDOwe2k2CkWP3mi2uuVHfMEwBEqZ++wAG8s7J2AB8KpX78r8+fNRcaqkW4llcXGxVxnkSOP3LCSEyAD8DcBCALUAjhJCdlBKS112uxuAllI6ghByM4BnAdzksv0FAF+Hb9nRpaRGh8HJMUjxIVgmTnoKJrxQozXBbOdwur4jYOXLJrEr1l/oJl0UN9Nj/pj0ztLKAGroRXLThHr8izpz0AlUT9RpTVjgUgUUKBkJKsQqZb0rhfD1WqDhJABAzfPItTvmvfZEvTNzArD4Ga+bAxkS3pf16D/6+GNh/QF8Rps3b8Ztt90GpSzGq0cfih79h+++hVWr7w34vakUMmfohlIKK8cj3qW0MlBD7UncrLi4GEVFRbj66qsDXk9vEYhHPx3AWUppJaXUBuBjAEu77LMUgCi68SmAK4jj3ocQ8msAVQBOhWfJ0aektt1n2AYQ4n8pccqgQjdljnJMk43DeT8DvUU65Q98h26SYpVIi1ehwmHgK1uMSIpVYIAPMbOuDA9j5Y3JxqLFYAs4tOEKIQQj0zX4uU7X43UEgmh7w6zQ3A25jGDzpmeC0qOfffl8nK0WQn3R1qMvPS3IG7h+TN706C9evIj58+fjlmVXC5UyXarTQtGjnzZtGi7W1XVLxN5///3YsWMHAGDZsmW46667AAiyxC8+vQFWlsd7772H6dNnYMWiOVj73w+Cc0yocr0r8jQrQOSbnf/EjVfPx8hRo3DgwAHYbDasW7cO27ZtQ0FBQedQligRiGswCECNy++1AGZ424dSyhJCdABSCCEWAGsg3A14DdsQQlYDWA3Aoz5HX6LVYEWt1ozbZ2b73TczMbhJU2dc6u5LL3Y4Y9G+aNRboZARJMf6N9gj0zXOsYKVzQbkpsYFFYt01tI3G3D5KO+TgQKhLgh5Yk/MHpGKV/dXQme2IzGmF7TAXTzvVp0ZLQYbxg9M6FVrf6r4OHbt/FfAevR2jsdTL/4Nzz73HF7/+5ao69E/8/RT+N+n/+r8iHzp0b/wwgvYu3cvqCoe9TozON49th+KHv27772Ph//viW6J2MLCQhw4cABLlixBXV0d6uvrAQAHDhzA1UuX4+yZMny8bRt2f7sPNTob/vr4WnzwwQf4zW9+43yOo0ePep0VAACgHD7c+R+UHfkOGzZswJ49e7Bx40YUFRXhpZdeCuV0CCu9nYxdD+BFSqnPYCql9DVK6VRK6dS0tJ4ZkN7GOTrQS0esK8KkKavf/UTKGvXITFBDzhCU1gfmrTZ2WJAerw4ozDMqIx4VTQZQSh2llYHH5wEgJU6JxBhFWCpvakOsoReZNzodHE9x8GxLj9fiD5ajUDAk7ENXunK86DDmLVoMtVqN+Ph4n3r0V155JSYVTMTbr/wVZaeFKOr+/ftx2223AQhMj37y5MkoKytDRUUFAPRYj/7w4cOovXAeok/vqkdfUFCAd955B+fPn3c7TvS+/SVk9+zZg9/+9rc+9ehnXTYbk2dc5tXQl5aWYty4ccjIyEB9fT0OHTqEOXNm4/DB73D82HHMnT0LN15ZiO/27UVlZaXbcxw8eNA5K6DrdwMAN96wHEo5g5zRE/zmJ6JBIB59HYAhLr8PdjzmaZ9aQogcQCKAVgie/w2EkOcAJAHgCSEWSmn0L3EhUlLbDkKA8YP8d4dmJKhx7Hx3qVhvnGnQY/ygRNRqTTjVZV6pNxo7LH7DNiIj0jUw2TicaTSgSW8NuIZehBCC3LQ4nGvqeeimRivq0Ifm0U8emoR4tRz7yptw9YSsHq/HF8HKH4QKQwQ1RZ7Sbtr1XfXoH3nkEcyafyV27Po3XnnhGWdlkD+oQ4/+t7/9rdvj1dXV3fTozWbPd6O7d+/G9u3b3R6Ty+W478GH8dbLm52hG+rQo//oo4+8rkespbezPBB4FNENUY/+lbc/RJPB1q0GftCgQWhvb8euXbswd+5ctLW14ZNPPoFGo0FqciIoBW685VasXbfReecW7EVdrVYjOVaJxibqVcc/mgRy9h4FMJIQMowQogRwM4AdXfbZAeAOx883APjWIahWSCnNoZTmANgM4KlL2cgDgkc/Ik0DTQDVFFmJamhN9oAEj6wsh8oWI0ZnajBuYEK3wdTe8DYU3BNiQvabUw0AfE+V8sbwNE3YPHqlnAl5ApdcxqBwZCq+O9Pc693HLE8DEunqKTMvuwzf7dkFg9EckB69leWw4x+CETXZuKjr0a+45TYcPvAdWlqahffjR49er9dDEaBHH4gevZ0XzgtP5bozZ87E5s2bnVOonn/+eRQWFkLOEFxWeDl2/PNz1F1sgFLGQKvVdrvzmD17tt9ZAWL4VBwg4/q5RRu/hp5SygJ4AMA3AE4D+IRSeooQspEQssSx2xsQYvJnATwCoFsJZn+AUoqSmvaAwjaAS9NUAJU355qM4HiK0ZkJGJeVgCa9Fc16/2EfwaMPzNCLM2V3/Swa+uA8euEYDZr0Vugt9qCPdaWmzYTByTE9mqk7b1Q6GjusOF3fu39Mdo4PqoEmVKZPm455CxdjyqSCgPTor5o3G2kOPXqTjYu6Hj0jU+DWu37rvHD41KNfvRpXXXUVFl5xBWQM8TtpKhA9+ivnzsRrm58DABQVFblNgyosLATLshgxYgQmT56MtrY2FBYWghCCcePy8N9r/3/cvmIpll4xCwsXLnTG8UWmTZvmd1aAUs4gTikDzwu2Yv78+SgtLe0Tydio6893/deX9ehr2ow0e81O+u4PVQHtf+BMM81es5MeOtfid9/Pj9fS7DU7aVl9Bz14Vjjuu/Imn8cYrXaavWYn/dveioDWQymlUx7fTbPX7KS5j35JrXYu4ONEvvm5nmav2UmLL2iDPtaVa7ccoLe/cbhHz9GgMwf9/n3hSeOb43laUqOlDTrPWv/hxGCx00NlNVRntvnVo+d5nv5c105r2oy0vKGDnmvS9/r6KPWtR3++1UhPX9QF/ZxnwqRLf6pOR2tajUEfd6HVSE9d1NGTte20Tmvyul8gswLajFZaUqOleos96HUEg6RH34uIidiuM2K9EcxIwbIGPRQygmGpccjLEjyF0nrf4Zsm5wjBwCUERjqULIcOiA1KoU9ETOD2NHxTozWFHJ8XyUhQY2xWAvaVN/foeXzROUKw9z16uYxg45qHMXv6VL969CxPwfEUKrkMsUoZzHYuIgJ6vvTo2RBzGUo5A0sP18/xPFieh1IR/OurFAxYTpgE5ks6IZBZAQlqBWSEQGuM7AQ0f0gyxUFw7LwWShmDMVnxAe3vNPQBhG7ONOqRm6qBUs5AKWcwKCnGb0JWfN7MAOetAkKH7KHK1qAapVzJTomFnCE9MvR6ix3tJntYmq7mjU7Da/sr0WGxI0Ed/jJLexCyuz1FzjB45qWtyEpUI83PxVts21crhJh0m9EGK8s7BbaiActTt67SQFl+1XwYTGYoXOLrwerRd2rcBP/+XY/x5fyI4SJfyBiCxBgF2s12DORp0PIevYVk6INgb3kTZuQOCPhk0qjkiFfJA/Loyxv0mJLdWQ4nJGR9l1gG2izlipiQDbbiRkQhYzA0JbZHlTdiaWUwOvTemDcqDS/vO4eDFS1Y3AvVN2KnaiQ8eoYIgyxYP/FqoFMpUSVnINp2k42LrqHnKGKVwX9OR48cRnmjHjJCMCJdE1IZqy0IMbOuuB4TyvFdSY5Tos1kQ4fZjuQgGhJ7Eyl0EyDVLUZUNhtxxZj0oI7LTFSj3k/TlN5iR127GaMzO+8UxmUloLLF2G0wgitNAercuDLSkZANtobelUlDkrHvTBNqAuze7crnPwnVuWMyA7sz8sXk7GTEq+S9Fr4RQzeRKK8U9G78yyAAggcrqCwyUMkZyAiB2ce50ttQSsHxfEh3PoQIozfNdg6GEGW9nfLCIYaOCIQ+iUCURv0Rq5RBJWfQFuEB9r6QDH2AfFsmVBL8akxw2iyZATRNnXF0q47OcDH0AxNAKXxOqWrssCBGIUN8gMJZADAlOxn/vWAUrh4fuvf7+0WjICMEf/rnz0HHVX+u02HrgUqsnD7UOfmqJyhkDOb0YpmlnaMgCGwGajiQywIz9BY755yLSghBjFIGky16MrksT0ERvPCbSFKsEgoZg6YAKs08YXWIkYVSxcUQYaavUhacaqU3CBE61Y1WFrY+Il0sGfoA+basCSPSNW4zVwMhKwAZhPIGId7d1aMH4DNO36gXJksFc3IqZAz+a8FIJMaGHs8emBSD/7lyNPafacaOkov+D3DAcjzWfnYCKRoV1i4eE/Lrd2X+6HQ0dFicWkHhhOV4yJjwGIBAkDNMQM1PVpaHSuE+xcpi5yM6jMUV8eIU6gWRIQRpGhWMVhbGELx6G8uFVFwgMiBOGZTukz+SHDX1WlPPypDDhWToA8BgZXG4qhW/CjJsAwi69M16q88/3vKGDsQpZW5jCQcnxyBBLfdZedMYwGSp3uL2WTmYOCQJG78oRXuAt6hvHazGz3UdWH9dXlj1aS4fLchm9Eb4xs7TiMTnRQIJ3XA8hZ3joXYxbDFKOSgozFGachSOENeAOCXkTPBePaVUuPD1YNhuWrwKafGhNe95QilnoFHJoTXZIlIN5Q/J0AfA9xXNsHM0NEOfGAOeAs0G7ydvWYMeozLj3W47CSF+O2QbOyzOASeRRsYQPL1sAtrNdjz11Wm/+9e0mfDCv89gwdh0XD0hM6xr6Syz9N3hGQqhlgyGiswRuvFlHERZXZVLKaE4l9Y1fBNJPfqSEkGPXs4QjB8/PiA9epPJPcfDMASpGiX0FjvMNjYgPfqXXnoJI0eOxPhBSdDr2jzus2/fPlx77bUetwWiNx8qA+KUsLE8jNboh2+kqpsA+LasCfFquVtVTKCIA0jqdRZkJXavMqGU4kyjHlfmdTd+47IS8eGR8+A8lGlRSoPSuekNxg1MwD2FuXjlu3NYNmkwZg3v3hYPCGv9v3/+DIYAG5eO75UwyLzRaXh9fyX0Fjviw1Bm+eyRZ1HWVgaTTZhYFI5qjDEDxmDN9DU+95EzjCOx6XlSE8uyzkEZrh6sQibEmIXkffdzorf16J9/7hls/MvrAYduRD362Fj3UGiKRolmgxVNemtAevSzZ8/GtLkLcP01i0KqOOrNwSDOmnqTDRp1dE2t5NH7gecpvi1rxuWj0kLKyIvyBN5KLJv1VmhNdrf4vMi4gQmw2HlUeRiw0WFhYbHzQc9bDTf/dcVIDBkQgz99ftKrps+Okov47kwz/nDlaAxM6nlJpSfmjUoD2wtqlpRSRLISevOmp7Hk8mmYO7fQhx79Dtx63QLMmj4VCxYsQGNjIwDAYmjHLddfFxU9+rLTpaiuPNvNIfGnR9/VW5cxDFLiVHj26ScD0qMfMnIcYgZkgiHEp/5UR0cHrrnmGowePRr33nsveF64WLre9fz617/GlClTkJeXh9deew0AwHEc7rzzTowfPx4TJkzAiy++GPB3yTAEibEK6Mx2cNEeEemtZTZa//qaBEJJjZZmr9lJtx+rCen4NoOVZq/ZSd84UOlx+/4zTTR7zU56sKK527ZTdTqavWYn/edPtd22nWnooNlrdtJ/FdeFtK5w8l258B7+/E1Zt21tBiudvHE3XfLS95Tl+F5bg43l6Ph1u+iaT0tCfo6ubeU2lqMlNVrarLf0dHkBceTIETohP58eqain9c1tdMSIEXTTpk308ssvp7/73e+c+xVX1DilBl5//XX6yCOPUEopXfXb++jvfv8otbEc3blzJwVAm5uF8youLo5SSuk333xD77nnHsrzPOU4jl5zzTX0u+++o1VVVVQmk9GffvqJUkrpihUr6HvvvUcppfTyyy+nR48edb7+sWPH6O23304ppfStt96i999/P33hb6/RpStWUkopzcvLo1VVVbS5uZkWFhZSg0GQN3jmmWfohg0bKKWUZmdnO9fWFTvL0ZO17XTm7ELn6/7973+ny5cvp3a7IC3Q2tpKdSYbPVGjpVXNBp/Pt3fvXqpSqei5c+coy7J0wYIF9B//+Ee3dbS2tlJKKTWZTDQvL4+2tLTQoqIiumDBAudzabXBSX8YLHZaUqOlrQarz/04jqcNOrPf/UR+0RIIvmrOQ+U/p5tAiKB/HgpJsQqo5IzX7lixfNKTRz8iXQOljPGYkG10yh9EL3QjMndUGpZNGoSXvzvnHGwi8tRXp6Ez2/HM9RN6tUtQLLPcVx6+MstIyh8Agub5tdctgUqtRkxsnFc9+vM1tbjnlmWYMGECNm3ahFOnhOFthw99j2uX3QizjYu4Hv2119+IE8ePoqqqyvlYIHr0npDLGAyIU4LleNgd+YiuevRqTQIutJmgVsoCmlI2ffp05ObmQiaTYeXKlfj++++77bNlyxZMnDgRM2fORE1NDSoqKpCbm4vKyko8+OCD2LVrFxISEvy+litCTb0MWh8FC3qLHRVNejR2WHrFhgH9KHRztsmAmU/9By99WxGQLHCg7C1vwqQhSSGXXhFCHE1T3g19qkblcf6sUs5gZIbGY0K2MQT5g97k/64ZiziVHI9+dtJZ4vfD2Rb841gtVs/Nxdis4P5AQmHe6DQ0dFhQ3hieMstIyh+IiDr0XStvRD16nlI8/sc/YNXqe3Hy5Em8+uqrsFgszmMJiE9jQR169MXFxSguLsbZs2dx9913A0A3PXpvuuq7d+/GokWL3B8kDFbd9xCeffZZt9dauHCh87VKS0s9Tr3yRJrj78FTeaKN5VDdaoKcIchJiQvIgeiaF+r6+759+7Bnzx4cOnQIJSUlmDRpEiwWC5KTk1FSUoJ58+bhlVdecVPEDAShpl4Bo5V1JtFd38f5VqMjNCvoXIVDFsQT/cbQxyhluGx4Kp7ffQYLXvgOX5+s77Fn19RhwYlaHa4IYYC1K5kJajR6M/SNeozO9N6lOi5LqLzp+l7EO4T0IATNepMUjQp/unosis5r8dHRC7DYOfzx85PITonFQ1eMjMgaLh8l3HXtLQtPmSXLR9ajnz17Nr76ciesFgt0HXqPmuc2lode34GhQ4RZQO+8845z29y5c7H7i09hsnER16NneYqbbrkde/bsQXNz4Hr03lDIGchlDDosLOwc79Sjt9hsqG41ob2tDTmpcQHnzY4cOYKqqirwPI9t27Z1S/DqdDokJycjNjYWZWVl+PHHHwEI4xB5nsfy5cvxxBNP4Pjx4wG9nitda+p5StHUYcGZRgP0FhaZCWqMzNCEpYjAG/3G0A9KisErt0/Bh6tmIE4px+8+OI5bXj+M034UIH0h1mXPDzFsI5KVqEZ9R/emKY4XKm5GZ3j3dscNTECr0dZNm76pw4IEtRwxyuhpm3TlhimDMSs3Bc98VYb1O06hutWEp5ZNiJj+SmaiGmMy48NWZhlpj17UPF+xaA5WLl/qUfPcynL43X+vxV2334IpU6Yg1aFHDwCPPfYYjh/+AYvmTMP27dsjpkdPKRV0bmJUeOihh4LSo/dVOqmQCSO3WvRWrFq1CkOGDMGECfm4bv4sHNr9L6gVMmzZsgWDBw9GbW0t8vPznR53Vz36adOm4YEHHsDYsWMxbNgwLFu2zO21rrrqKrAsi7Fjx2Lt2rVOhc66ujrMmzcPBQUFuO222/D000/7/Kw8IdbUtxttQpim0YCGDgvi1XKMyohHeoK620SxsOMteB+tf+FIxtpZjr57qJpO3PANHbZ2J/3T5ycCTnK4svrdo3TmU3soz/csifjUV6V05B+/6vY8Vc0Gmr1mJ/34yHmvx/54roVmr9lJvz3d6Pb4b98togv+vK9H6+oNKpsNdOSfvqLZa3bS339SHPHXf/qr03T4o1/SDrMt6GO7Jrhq20z057r2cC0tIPR6PS2v76CnLzR51Dxv1JlpSY3Wa2K71SDooZttbK+sz5Mevd2RtG7qCH/S+nyrkZ6sbad2lqMXWo20pEZL20L4W442WodOfUmNlpbVd4R0frryi07GishlDG6fmY19f5iH38zKwUdHajBv0168dbAK9gBna1pZDt9XtGD+mPQe131nJahh43i0ddGoLnMmYr179GMHCtu6JmQb9YFPlookw1Lj8MfFYzAqQ4M/XT024q8/b3RgZZaU0m4x066wIYp09YTVq1fj1wtm45pfzfaoeW5heTc53654apwKJ5706HtT4TM9XgWeUlS2GKE12ZCRoO4zipDBkKBWIEGtiEiYxhP9umEqKVaJ9UvycMuMoXh8Zyk2fFGK0/UdeO6GiX6PPVLVBqONC1qt0hPiSMF6ncUt6SqKmYnSwZ5IUCswdEBst4RsU4cVM3JDkxrube6cPQx3XJYTMX0YV6a4qFle5UW47ec6HZ7++jR+ONeKBWMzcE9hLqbldK9QsXORlT8ABM3zC61GmO28x0osq0PMzBsquXARMNnYsGq3+ILleqZzM2PGDFit7qFJUY9erZAhQa1Ah8WO5Fgl0vtAlVkoMAxBTogzIMJBvzb0IqMy4vHuXdPx7K5yvPLdOVwxNsNjJ6or35Y1QSVncNnwVJ/7BYLrpKnxgzpjruUNegwdEIs4P+qT47IS3Dx6nqdo0kdP/iAQomHkAaHMcvaIzjJL13XUtZvx52/K8XlxHZJiFLh52lDs+rke/y5tRP7gRDxWmAie0s7KF45HbBDKoOFCLmPAehD2og5NF18GnBCCGIUM5ggqWYpJ61ClIg4fPuxz+8AkNWJNMqTGByfgJ9FJvwzdeIIQgkcWjsL4QQl49LOTPgdvU0rxn9NNuGx4SliSnVleJk0JFTf+pXrHDUxAVYvRqdXdZrLBztE+GbrpC3Qts+yw2PHsrjLMf34fdp6sx2/nDse+/5mPp6+fgB/WXoEnl42HwcKizWhHeYMezXoLWJ6PuKCZiIwh4HgKvkullT2AcXcAEKuUw2LnItaN2VPlSn8o5bLIJCz7Mb8YQw8I2e8XbyyA0cpi7fYTXssvzzUbcaHNhF/1sKxSJFWjgowhbjIIVpZDVYvRTYPeG6JkcZnDqw9lstQvCVHNck9pI94+WIV5m/bh5X3ncM2ELHz7+8uxdvEYp3pmjFKGW2dkY88jlyNVo4RSzqBeZ0FZvR6U0ojH6IFOg9l10pRzXJ6fKqZYpQwUiJiSJcvxICB9ZmyeRHd+EaEbV0ZmxGPt4jHY8EUpPj5ag5XTu5eg7XUOGel5fB4QPLT0eJVb09S5JiM4ngbk0ecN6kzITs0ZENJkqV8SWYkxGJMZj+d3CzXblw1PwR+vHusWNusKwxCoFTIMT9PAbGPRbLBBZ7YjJgqj+cQQCMfzcPXFLPbAxuWJCVmzjfWp/xIuWI5CJiNSWKUP84vy6EXumJWD2SNS8PjOUlR7EAz7T1kjxmTGu+nD95TMRLXTEweA8kbBOw/E0GcmqJEcq3AmZDs9esnQe+O2mdmYNDQJb905DR+smuHTyHclRinH0AGxmDAoMSqqg06Pnu/q0QtKmv5CJHKZMGB+Ut7oiMgUD03V4GzZKee2UGWKuxKoTPGIESNACHG+13ASiIzxvHnzUFRU1O3x4uLiXlXHDIZfpKFnGILnV0yEnCF45JNit6EgOrMdRdVazA+TNy+S1WV2bFmDHgqZ0PbsD6c2vTN0I3j0aR5kEyQEbpuZjc/vmx2W8thII8oT27uGbuw85KABvZ9YhRyukclQZYoppXjrrbdRV1fn9vjXX3+Nq666CgCQOXAQXv3L80E9byCGXpQp3rt3r9d9Zs+ejT179iA7Ozuo1w+Ur776CklJSSEd25cM/S8udCOSlRiDx389Hv/1cTFe+e4cHviV0KZ/oKIZLE/DUlbpSkaCGt+5TEA606DH8DRNwC3c47IS8M6h87BzPBr1Fmc8WaJ3aHjqKVhPl4X1OVVjxyDzj3/0uc/jjz+O999/H7GJyRg2dCgumzkdO3fuREFBAfbs/Q7LbrgR0wvG44knnoDNZkNKSgo++OADZGRkoLW1FStXrkRdXR0mTZ0OSnln34hGo3HKHmzatAmffPIJrFYrli1bhg0bNqC6uhqLFy/GnDlz8MMPPyAjayBeffdj7PzySxwtKsKNK29BbEwM9u0/iJTEOKdM8cmTJ3H5gitx/PAPKC8vx+jRo93ez+7du/HYY4/BarVi+PDheOutt/Dmm286ZYpTU1M9GvKNGzc6ZYqXLFmCZ555BmvWrMGuXbvAMAzuuecePPjgg5g0aZLfz/3+++/HlVdeiSVLlmDZsmVITk7Gm2++iTfffBPnzp3Dk08+iffffx9btmyBzWbDjBkz8Pe//x0ymQw5OTkoKipCamqq87tJS0vDkCFDMGXKFPzhD38AINx93HfffWhvb8cbb7yBGTNmYN26dTCbzfj+++/x6KOPugnTRZqALAUh5CpCSDkh5CwhZK2H7SpCyDbH9sOE3HTAzQAAIABJREFUkBzH49MJIcWOfyWEkGVdj40mSwsG4bqJA7F5TwVO1uoACGWVSbEKTBoa/JARX2QlqmG0cdBbBL2L8obAKm5Exg1MgI3lUdlsRKPO0mc0biTCx9GjR7F9+3YUFxfjlfc+RfFPnboqFqsVH365Fw89/AjmzJmDH3/8ET/99BNuvvlmPPfccwCADRs2YM6cOTh16hSWLVuG+rrabmWWu3fvRkVFBY4cOYLi4mIcO3YM+/fvh53jUVFRgWW33IVtu3+AIkaD7Z9ux/XXL8ekyZOx+ZU38fGuA6g3ctj+7wMYMSYPOrPdoevO4P7/egRPPfWU22u1tLTgiSeewJ49e3D8+HFMnToVL7zwAh566CEMHDgQe/fu9eqtr1u3DlOnTsUHH3yATZs24bXXXkN1dTWKi4tx4sQJ3HrrrQF/roWFhThw4AAAQdKgtLQUAHDgwAHMnTsXp0+fxrZt23Dw4EEUFxdDJpPhgw8+8PjdlJSU4Ouvv+4WqmFZFkeOHMHmzZuxYcMGKJVKbNy4ETfddBOKi4ujauSBADx6QogMwN8ALARQC+AoIWQHpbTUZbe7AWgppSMIITcDeBbATQB+BjCVUsoSQrIAlBBCvqCU9o4WZwg8vjQPR6va8PC2n7DjgTnYV96MeaPSwl5BIDZNNegsoAAu6iwYFUDFjUjeQCHGXFqvc3TFSmGb3sSf590bHDx4EEuXLkVMTAwSExJwxZWdUsC/vn4FAGF84PnqWtx0002or6+HzWbDsGHDAAD79+/HZ599BgBYtuQ6JCQmwWR3/1NzlSkGgA69HgePnUSBIgmDhmQjd0we4mMUmD1jGuy6RmSnCMJhg5JiMC4rAQYriw8O7MPMyxfgQpsJF9stoKBYcdPN+NuLm7zKFAOAzWbDrFmzQvps9uzZg3vvvdcpUzxgwICAjy0sLMTmzZtRWlqKcePGQavVor6+HocOHcKWLVvwzjvv4NixY5g2bRoAwGw2Iz3d/Y5e/G7UajXUarWbhDQAXH/99QB8yztHk0BCN9MBnKWUVgIAIeRjAEsBuBr6pQDWO37+FMBLhBBCKXUNwqkBRH9KbheSYpXYtCIft79xBHe/cxRtRlvY4/MAnM1N9ToLdGbBqx8ThEefmxoHpZxB6cUONHZYMX5g4MlFiUsPuYzANRcrVwnnj1rO4MEHH8QjjzyCJUuWYN++fVi/fn234xmGgBB08+ipQ6b4jrtWoa7dDKOVhVohg67pIjSxMRidGQ9CCGLVSmeoR0TGECTGKHD4+7349NNPEROvgUYth0LGIClOjd///vceZYo/+uij8H0wITBo0CC0t7dj165dmDt3Ltra2vDJJ59Ao9EgPj4elFLccccdIQmWiYgSz77knaNJIKGbQQBqXH6vdTzmcR+Ht64DkAIAhJAZhJBTAE4CuNeTN08IWU0IKSKEFIkSp5GkcGQa7rwsBz9WtkHGEFw+Ki3sr+HaNCU28gQTupHLGIzJjMeJWh1aDFaptLIfMnv2bHzxxRewWCywmkz4dvfXzm12jgdDCBQyBjqdDoMGCX+CXWWKP/zwQwBCslTX3g6LnXfrF1mwcBFefX0rSiobYLFzYExtSIQJafEqEOK5o9mTTHFqairiVHIkxSqRGKNAjFKOO++8M2wyxV0RZYpFI9rW5nkQuDdmzpyJzZs3Y+7cuSgsLMTzzz+PwsJCAMAVV1yBTz/91Km62dbW1m1Aiut3YzAYPEpIdyXY99ib9Ho2j1J6mFKaB2AagEcJId0sFKX0NUrpVErp1LS08BvZQFi7eAxGpmtw2fAUp350OEl3hFoadBaUN+ihUcmDLt8cl5WAY+e1oBR9Wv5AIjREmeL8/Hz8fyuvx8gx45wyxTZO6IglhGD9+vVYsWKFR5ni/fv3Iy8vD5999hkGDxkKnlJYHI1WOrMNORNnYeF1y3HHskW46co5uOeOW7t57l3xJ1MsolQqwypT7MqqVaswdOhQ5OfnY+LEic4LWqAyxYWFhWBZFiNGjMDkyZPR1tbmNPTjxo3DE088gUWLFiE/Px8LFy5EfX291+9m8eLFHiWkuzJ//nyUlpaioKAA27ZtC+h99hreZC3FfwBmAfjG5fdHATzaZZ9vAMxy/CwH0AKAeHiubyHE7PvkzFiDxU4NFnuvPf/kjbvp2u0n6IpXfqDL/vZ90Me/80MVzV6zk2av2Un3lDb0wgp/2XiSfo00er2eUkrp2YstdFx+AS0qKqKUUnr6oo6ebzUG9VwWG0tLarS0vt1EK5sNtKRGS8sbOnp0jnuSKf6lIH43RqPRo4R0JAlWpjiQGP1RACMJIcMA1AG4GcAtXfbZAeAOAIcA3ADgW0opdRxTQ4VkbDaAMQCqQ7wm9Tr+xMV6SmaiGg06M8406rF4vG9RNU/kDeyUM5aapfonq1evRmlpKYwmM66+/iZMnDQJHE9h43gkB1lOq3QoWTbprWAIQVZiDFI1yh71FWzdujXkYy91xO/GYrHgjjvu6CYh3Zfxa9kcRvoBCF67DMCblNJThJCNEK4gOwC8AeA9QshZAG0QLgYAMAfAWkKIHQAP4D5Kafjb1y4RshLVKK7Rod1kD0jjpiujMxNAhKE7zlCQRP9CDEm0m2y40GYCy1HwVAi9qIM09IQQpGpUsLE8MhPUUPTRvgtfMsV9CfG7uRQJyIWllH4F4Ksuj61z+dkCYIWH494D8F4P19hvyEhQo8UgxC9HBZGIFdGo5MhJicOFNhNS4iRD358Ry3tFbx7wL2bmiUvhzs+fTLFEz/nFdsZGA7HyBgDG+Jgq5YsJgxLB8rykFNjPEVUzWY6H1c6BgEid0BIhIxn6CCI2TaXFq0Ke/vN/146FzjFNXqL/IurdsLwwbEQpZyQ9domQkQx9BBFLIkOJz4ukx6sl+YNfAK4KlhY771eaWELCF9LZE0HEkYLBNEpJ/DIhhEDOMLBzPGwsD5VC+lOVCB3p7IkgQwbEoGBIEq4YG36JBYn+h5whMFk5UFCo5cEnYnNyciKiR88wDE6cOOHcFi49+t6kuLgYs2bNQl5eHvLz88Pe0FRUVISHHnrI5z7V1dUYP368x22ePvOeIBn6CKKSy/DP+2eHZeC4RP9HJiOwsIJWjejRh6qjEqoePeDZ6Ljq0Q8ePBhPPvlkUM8ZbUMfGxuLd999F6dOncKuXbvw8MMP+x0wEgxTp07Fli1bQj4+3IZeitFLSHjgwCdn0FLjWxogWFKHaFB44yif+7hqnqekZ2HEuHzs/883uGz6FBw8eBArV67EqFGj/OrRz5o1y03jJlg9+kGDBuFf//oXvvzySxQVFeHWW29FTEwMDh06BLVa7aZHf+2112L//v1h1aMHgF27duGPf/wjOI5Damoq/vOf/6CtrQ133XUXKisrERsbi9deew35+flYv349Lly4gMrKSly4cAEPP/wwHnroIaxduxZDhgzB/fffDwBYv349NBqNU0ceAAYOHIj09HQ0Nze7DRnhOA4jRoxAZWUldDodUv5fe3cfHVV9JnD8+yRAklUEilrUhAqSg0qEQBHJsRmPccubaBTDSzlb0UXQXVH22MISu1GgTddW1+5Za3lbFOvuUV6ynqagEpSkkqKG0ERIQEqIKYTwJmJ4kZCB/PaPuRkmk0lmQmYyl+vzOWdO7tx7594nv5l55je/e+e5fftSWFiIy+XC5XKxcuVKrr/+ep566ikqKipwu90sXLiQzMxMioqKeOmll1i/fj3Hjh1j+vTp1NXVkZaWxqZNm9i+fbt3H7NmzQra5gkJnbvanfbolbIJ/5rnOz4rAzzj9W63m9LSUn7yk5+EXI9+//79rfbRVj16gL179/Lkk09SWVlJ7969ycvLIysry1sXvry8nISEBMrKyhg2bJj3F7YxMTHMnz8/rPXojx07xqxZs7ztsXbtWsBTz2f48OHs2LGDX/7ylzz88MPex3z++eds3LiRkpISFi1ahNvtZurUqaxZs8a7zpo1a1rVhi8pKaGxsZGbbrqpxfzY2FgGDx7Mrl27KC4uZsSIEWzZsoVz585x4MABkpOTyc3NJSMjg5KSEgoLC5k3bx5nzrS8POmiRYvIyMigsrKSrKysFs9LqG3eWdqjVyqAYD3vSPCveT52/AQAYoQWyam2Nng9+nvvvZc+fVpfPMe/Hv3p06fZu3cv/fv3Z8CAAaSmpgLt11V///33GT9+fIt506dPJzc3N2z16D/55BNcLpf3f2uuP19cXExeXh4AGRkZHD9+nJMnT3r/57i4OOLi4rj22ms5cuQIw4cP5+jRo9TV1XHs2DH69OlDUlKSdz+HDh3ixz/+MW+88QYxMa37venp6Xz00Ud88cUXZGdns2LFCu666y5v7fqCggLy8/N56SXPpRQbGhpafcAWFxfzzjvvADBu3LgWz0uobd5ZmuiVsinB6jGLcMUVF68tHEo9+rYYqx79448/3mJ+TU2Nt6Y6eHqzZ8+e9X844Eluzcm2Wbdu3aJej94//ubjGZMnT2bdunUcPny4xQfmyZMnuffee8nNzWX06NEBt+lyuViyZAl1dXUsXryYF198kaKiIm/lS2MMeXl5rYasjhw5ckkxt9XmnaVDN0rZhH/N800bPVVH/H8nFWo9+hMnTrTax9ixY3nttde84/UHDx70lhVuS6B69H379m21Xjjr0Y8ePdrbk4aL9efT09O9l/krKiri6quv5qqr2v+V+dSpU3n77bdZt24dkyd7KrU0Njby4IMP8vDDD5OVldXmY0eNGsXWrVuJiYkhPj6e1NRUli1bhsvlAjzt+corr3iPh5SVlbXaxp133ukdPiooKAj4vPgLdy17TfRK2USrmucpt9Hzql6tyl2EWo++f//+rfYxZswYpk+fTlpaGrfddhtZWVlBE0o06tFfc801LF++nEmTJjFs2DBvT3zhwoVs376doUOHsmDBghYfdG0ZMmQIp06d4oYbbuC6664DPGP1H330EatWrSI1NZXU1FTKy8sBz/Vq8/PzAU+POykpydvjT09P59SpU96Cazk5ObjdboYOHcqQIUPIyclptf/nn3+egoICUlJSWLt2Lf369aNnz/Z/S+Pb5mHp5bdVvzhat2jWo1ffbnaqR2+HmueBfJvr0V+qhoYG43Z7rgGwdetWM2zYsE5vMxL16JVSXcTuNc+/zfXoL9X+/fuZMmUKTU1N9OjRgxUrVnR5DJrolbKRy7nm+aW6XOrRX6rk5OSAY/ddSRO9UiqqtB595OnBWKWUcjhN9Eop5XCa6JVSyuE00SullMNpolfKobQefdtCrUf/yCOPsG7dulbzQ6k3byea6JW6jGg9+vDobD36ztab72p6eqVSARSuWs7Rv1WHdZvXfm8gdz8yu911fOvRJyUl8f3vf5/169eTmppKcXGx1qPvwnr0zT744ANeeOEFTp48ycsvv8zEiRNb1JsvKSlh7ty5NDQ0kJCQwOuvv87gwYOprKzk0UcfpbGxkaamJvLy8khOTg7txRJm2qNXyib869GXlpZ6lzU2Nmo9erq2Hn2zmpoaSkpK2LBhA0888QQNDQ0tlt98881s2bKFsrIyFi9ezLPPPgvA0qVLmTt3LuXl5ZSWlpKYmBhw+11Be/RKBRCs5x0J/vXo77vvPu8yrUcfnXr0AFOmTCEmJobk5GQGDhzoLczWrL6+nhkzZrB3717vRWIA0tLSyM3Npba2lkmTJkWtNw/ao1fqsuBfj37OnDns3LmTZcuWtephtsdY9ejLy8spLy+nqqqKmTNnAm3Xc/dXUFDAmDFjWsxrrx5987527drFypUrQ461o4LVo1+9enWH69ED3m8ubd3Pycnh7rvvpqKiwltmGjwffvn5+SQkJDBhwgQ2b97c6f/xUoWU6EVknIjsEZEqEVkQYHmciKy2ln8qIjda838oIttFZKf1NyO84SvlHP716NevXx9wPa1H33X16AHWrl1LU1MT+/bto7q6utVxCN/nY9WqVd751dXVDBw4kKeffprMzMwWZyZ1taCJXkRigVeB8cCtwI9E5Fa/1WYCJ4wxg4DfAM0f618C9xljbgNmAG+GK3ClnKZVPfrbbqNXr16t1tN69F1Xjx6gf//+jBo1ivHjx7N06VLi4+NbbH/+/PlkZ2czfPjwFt+C1qxZQ0pKCqmpqVRUVLQ4ntDl2qpf3HwD0oCNPvezgWy/dTYCadZ0NzwJXvzWEeArIK69/Wk9ehUtWo8+OK1Hbw+RqEd/A3DA534tcEdb6xhjzotIPdDXSvjNHgL+Yow55/dYRGQ2MBsI2AtR6ttC69GrSOiSs25EZAie4ZwxgZYbY5YDywFGjhxpAq2j1LeB1qP3cFI9ejsIJdEfBJJ87ida8wKtUysi3YBewHEAEUkE3gEeNsbs63TESilH0Xr0kRfKWTfbgGQRGSAiPYBpQL7fOvl4DrYCZAGbjTFGRHoDG4AFxpg/hytopZRSoQua6I0x54E5eA647gbWGGMqRWSxiNxvrbYS6CsiVcAzQPMpmHOAQcBzIlJu3a4N+3+hlFKqTSGN0Rtj3gXe9Zv3nM90AzA5wON+AfyikzEqpZTqBP1lrFJKOZwmeqUcSuvRt2/cuHH07t2biRMnhn3bodSrr6mpISUlJeCyQG3eGZrolbqMaD368Jk3bx5vvhmZH+t3tl59uBO9Vq9UKoCv/7iPxrozYd1mj+uvoPd9gUvhNtN69BdFuh79PffcQ1FRUZvPxYULFxg0aBDV1dXU19fTt29fCgsLcblcuFwuVq5cyfXXX89TTz1FRUUFbrebhQsXkpmZ2aJe/bFjx5g+fTp1dXWkpaWxadMmtm/f7t3HrFmzgrZ5QkJC0NdXe7RHr5RNaD36i7qyHn1bYmNjGTx4MLt27aK4uJgRI0awZcsWzp07x4EDB0hOTiY3N5eMjAxKSkooLCxk3rx5nDnTsoOwaNEiMjIyqKysJCsrq8XzEmqbd5b26JUKIFjPOxK0Hv1FXVWPPpj09HRvFc3s7GxWrFjBXXfdxe233w542jM/P5+XXnoJgIaGhlYfsMXFxbzzzjuA57iA7/MSapt3liZ6pS4D/vXon3nmGe6//36KiopYuHBhyNsxVj36xx9/vMX8mpqaVvXcz549G3AbBQUF3mTbrL169G+99VbI8XVGsHr0hw8fDrk338zlcrFkyRLq6upYvHgxL774IkVFRaSnpwOe/zEvL6/VkNWRI0cuKea22ryzdOhGKZvQevQXRboefahGjRrF1q1biYmJIT4+ntTUVJYtW4bL5QI87fnKK694j4eUlZW12sadd97pHT4qKCgI+Lz4C9Y+HaWJXimb0Hr0F0W6Hj14PjQmT57Mhx9+SGJiIhs3bgRa1qOPi4sjKSnJewWq9PR0Tp065S24lpOTg9vtZujQoQwZMoScnJxW+3/++ecpKCggJSWFtWvX0q9fP3r27NluzL5tHpZeflv1i6N103r0Klq0Hn1wWo++4xoaGozb7TbGGLN161YzbNiwTm8zEvXolVJdROvRO8/+/fuZMmUKTU1N9OjRgxUrVnR5DJrolbIRrUfv4aR69MnJyQHH7ruSJnqlVFRpPfrI04OxSinlcJrolVLK4TTRK6WUw2miV0oph9NEr5RDaT369oVSj/6RRx5h3bp1reaHUm/eTjTRK3UZ0Xr04dOZevSdrTff1fT0SqUCeO+99zh8+HBYt9mvX79WVR/9aT36i6Jdj77ZBx98wAsvvMDJkyd5+eWXmThxYot68yUlJcydO5eGhgYSEhJ4/fXXGTx4MJWVlTz66KM0NjbS1NREXl4eycnJQfcXCdqjV8omtB79RXaoR9+spqaGkpISNmzYwBNPPEFDQ0OL5TfffDNbtmyhrKyMxYsX8+yzzwKwdOlS5s6dS3l5OaWlpSQmJnZov+GkPXqlAgjW844ErUd/kV3q0QNMmTKFmJgYkpOTGThwoLcwW7P6+npmzJjB3r17ERHcbjcAaWlp5ObmUltby6RJk6LWmwft0St1WfCvRz9nzhx27tzJsmXLWvUw22OsevTl5eWUl5dTVVXFzJkzgbbrufsrKChgzJgxLea1V4++eV+7du1i5cqVIcfaUcHq0a9evbrDvXnA+82lrfs5OTncfffdVFRUeMtMg+fDLz8/n4SEBCZMmMDmzZs7vO9w0USvlE1oPfqL7FKPHmDt2rU0NTWxb98+qqurWx2H8H0+Vq1a5Z1fXV3NwIEDefrpp8nMzGxxZlJX00SvlE1oPfqL7FKPHqB///6MGjWK8ePHs3TpUuLj41tsf/78+WRnZzN8+PAW34LWrFlDSkoKqampVFRUtDie0NXE98i8HYwcOdL4HoRSqqvs3r2bW265JaoxnD59miuvvJJvvvkGl8vF8uXLbVWq+LHHHuOxxx7zXohDRUeg16qIbDfGjAy0fkg9ehEZJyJ7RKRKRBYEWB4nIqut5Z+KyI3W/L4iUigip0Xktx3+b5T6lpk9ezapqamMGDGChx56yFZJHjz16DXJX36CnnUjIrHAq8APgVpgm4jkG2N2+aw2EzhhjBkkItOAXwFTgQYgB0ixbkqpdmg9eg8n1aO3g1BOrxwFVBljqgFE5G0gE/BN9JnAQmt6HfBbERFjzBmgWEQGhS9kpSLHGNPqrAoVWVqPvmMuZbg9lKGbG4ADPvdrrXkB1zHGnAfqgdaH5dsgIrNFpFRESpuP2CvV1eLj4zl+/PglvZGU6grGGI4fP97qgHAwtvjBlDFmObAcPAdjoxyO+pZKTEyktrYW7WwoO4uPj+/wr2xDSfQHAd+fkiVa8wKtUysi3YBewPEORaJUlHXv3t37S0ylnCSUoZttQLKIDBCRHsA0IN9vnXxghjWdBWw2+v1XKaVsIWiP3hhzXkTmABuBWOA1Y0yliCwGSo0x+cBK4E0RqQK+wvNhAICI1ABXAT1E5AFgjN8ZO0oppSIopDF6Y8y7wLt+857zmW4AAv622BhzYyfiU0op1UlaAkEppRxOE71SSjmcJnqllHI4TfRKKeVwmuiVUsrhNNErpZTDaaJXSimH00SvlFIOp4leKaUcThO9Uko5nCZ6pZRyOE30SinlcJrolVLK4ZyT6N1n4eNXPX+VUkp5OSfR15XBxmfh499GOxKllLIVxyR6kzia+j7zMR+9AqcORzscpZSyDcck+rrinZw65OLg6SzY/PNoh6OUUrbhmER/zahk9rv30mTu55ttn8ChHdEOSSmlbMExif7LE1/z52uPcdAcpe7cPMz7/wZ6fXKllHJOou/Vqxfd4+IoumIPF2L7cmD392DPu8EfqJRSDueYRN9Qb7jyy1u4EBvDu91LcUsmZ/J/B+cbox2aUkpFlWMSfVxCd7pJAj1PDKY+9hxbuu+m9ug0TMl/Rzs0pZSKKsck+t3ndrCk/wJiuidw5ZkB1HQ7TlUc1OTvgW++inZ4SikVNY5J9KnXpPLA0Pv4/aCfc0ESiGu4mm3d9lErP+DUH/4z2uEppVTUOCbRd4/tTvYd2fzi7xexYcgSGi/0pLs7nj/12M2uvyTRdHRPtENUSqmocEyiN8Zw4fRpxt44lt8/8DqVo9+nwd0HmqA44St2L3k72iEqpVRUhJToRWSciOwRkSoRWRBgeZyIrLaWfyoiN/osy7bm7xGRseELvaXGqir+esdoaqb9iL9b9QeWXf8k3cfW0dDYizPSwJbzV/HVn/4Yqd0rpZRtBU30IhILvAqMB24FfiQit/qtNhM4YYwZBPwG+JX12FuBacAQYBzwO2t7YRdzxRX0nfUYxjRxfNlyDj86iyk/zee+/X+iZ30Ch2Pr+ePGKprOuyOxe6WUsi0xQX49KiJpwEJjzFjrfjaAMebffdbZaK3zsYh0Aw4D1wALfNf1Xa+t/Y0cOdKUlpZ26p+6cOoU32zbxpmtH3Pmk485W/03/jzuHzh01Vl6XohDkE5tXymlIuGqczE89uv5l/RYEdlujBkZaFm3EB5/A3DA534tcEdb6xhjzotIPdDXmv+J32NvCBDgbGA2QP/+/UMIqX2xPXvSMyODnhkZALiPHqVXUSEF22po7OaYwxJKKYfp1nQhMtuNyFY7yBizHFgOnh59uLff/dprSZoylZlTwr1lpZSyv1C6tweBJJ/7ida8gOtYQze9gOMhPlYppVQEhZLotwHJIjJARHrgObia77dOPjDDms4CNhvP4H8+MM06K2cAkAyUhCd0pZRSoQg6dGONuc8BNgKxwGvGmEoRWQyUGmPygZXAmyJSBXyF58MAa701wC7gPPCkMSYyg1BKKaUCCnrWTVcLx1k3Sin1bdPeWTd6CopSSjmcJnqllHI4TfRKKeVwmuiVUsrhbHcwVkSOAX/rxCauBr4MUziRojGGh8YYHhpjeEQ7xu8ZY64JtMB2ib6zRKS0rSPPdqExhofGGB4aY3jYOUYdulFKKYfTRK+UUg7nxES/PNoBhEBjDA+NMTw0xvCwbYyOG6NXSinVkhN79EoppXxooldKKYdzTKIPdgHzLo6lRkR2iki5iJRa874jIptEZK/1t481X0Tkv6y4d4jIiAjF9JqIHBWRCp95HY5JRGZY6+8VkRmB9hXmGBeKyEGrLctFZILPsoAXno/ka0FEkkSkUER2iUiliMy15tumLduJ0W5tGS8iJSLymRXnImv+ABH51Nrnaqs8Ola589XW/E9F5MZg8UcwxlUi8oVPW6Za86Py3gnKGHPZ3/CUT94HDAR6AJ8Bt0Yxnhrgar95vwYWWNMLgF9Z0xOA9wABRgOfRigmFzACqLjUmIDvANXW3z7WdJ8Ix7gQ+GmAdW+1nuc4YID1/MdG+rUAXAeMsKZ7An+1YrFNW7YTo93aUoArrenuwKdWG60BplnzlwL/ZE3/M7DUmp4GrG4v/gjHuArICrB+VN47wW5O6dGPAqqMMdXGmEbgbSAzyjH5ywTesKbfAB7wmf8MgVzbAAADGklEQVR74/EJ0FtErgv3zo0xH+G5VkBnYhoLbDLGfGWMOQFsAsZFOMa2ZAJvG2POGWO+AKrwvA4i+lowxhwyxvzFmj4F7MZzHWTbtGU7MbYlWm1pjDGnrbvdrZsBMoB11nz/tmxu43XAPSIi7cQfyRjbEpX3TjBOSfSBLmDe3gs70gxQICLbxXPhc4DvGmMOWdOHge9a09GMvaMxRSvWOdbX4Neah0TsEKM1dDAcTy/Plm3pFyPYrC1FJFZEyoGjeJLfPuBrY8z5APv0xmMtrwf6RjpO/xiNMc1tmWu15W9EJM4/Rr9YopqjnJLo7eYHxpgRwHjgSRFx+S40nu9ytjqv1Y4xWZYANwGpwCHgP6IbjoeIXAnkAf9ijDnpu8wubRkgRtu1pTHmgjEmFc/1pEcBN0c5pFb8YxSRFCAbT6y34xmO+dcohhiUUxK9rS5Cbow5aP09CryD5wV8pHlIxvp71Fo9mrF3NKYuj9UYc8R6ozUBK7j4lTxqMYpIdzwJ9H+NMf9nzbZVWwaK0Y5t2cwY8zVQCKThGe5ovsyp7z698VjLewHHuypOnxjHWcNjxhhzDngdG7VlIE5J9KFcwLxLiMgVItKzeRoYA1TQ8gLqM4A/WNP5wMPW0frRQL3PEECkdTSmjcAYEeljfe0fY82LGL/jFQ/iacvmGANdeD6irwVrTHglsNsY87LPItu0ZVsx2rAtrxGR3tZ0AvBDPMcTCoEsazX/tmxu4yxgs/Xtqa34IxXj5z4f6oLnGIJvW9rivdNCVx31jfQNz9Huv+IZ4/tZFOMYiOcMgM+AyuZY8IwlfgjsBT4AvmMuHtV/1Yp7JzAyQnG9hefruhvP+ODMS4kJ+Ec8B7uqgEe7IMY3rRh24HkTXeez/s+sGPcA47vitQD8AM+wzA6g3LpNsFNbthOj3dpyKFBmxVMBPOfzHiqx2mUtEGfNj7fuV1nLBwaLP4IxbrbasgL4Hy6emROV906wm5ZAUEoph3PK0I1SSqk2aKJXSimH00SvlFIOp4leKaUcThO9Uko5nCZ6pZRyOE30SinlcP8PkXs+3nm1ky0AAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_gradients(bad_trial)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `VanishingGradient` rule provided by Tornasole alerts for this automatically." + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAD4CAYAAAAq5pAIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAANEUlEQVR4nO3db6hc9Z3H8c8nmoKaPEg2bIhJ3LRFhLqwVoIsblwjtSXqgxjE0oBLdK97KzTQgA9WXLHiUpDFdtn4IHCLIWnMWgpajLXS2hA27gNLrpqNUTfRjdHmEhNjxBpQoua7D+akXPXOmZtzzswZ832/4DIz5ztnzpeTfO75d8/8HBECcPab0XYDAAaDsANJEHYgCcIOJEHYgSTOHeTCbHPqH+iziPBU02tt2W2vsL3P9uu276rzWQD6y1Wvs9s+R9J+Sd+WdEjSLkmrI+KVknnYsgN91o8t+xWSXo+IAxFxUtIvJK2s8XkA+qhO2BdK+uOk14eKaZ9he9T2uO3xGssCUFPfT9BFxJikMYndeKBNdbbsE5IWT3q9qJgGYAjVCfsuSRfb/qrtr0j6nqRtzbQFoGmVd+Mj4hPbayX9VtI5kjZGxMuNdQagUZUvvVVaGMfsQN/15Y9qAHx5EHYgCcIOJEHYgSQIO5AEYQeSIOxAEoQdSIKwA0kQdiAJwg4kQdiBJAg7kARhB5Ig7EAShB1IgrADSRB2IAnCDiRB2IEkCDuQBGEHkiDsQBKEHUiCsANJEHYgCcIOJEHYgSQIO5BE5SGbMTgzZpT/Tr788su71m655Zam2zkjZct/8cUXS+ddtWpVaf3EiROVesqqVthtH5T0gaRPJX0SEUubaApA85rYsl8TEcca+BwAfcQxO5BE3bCHpN/Zft726FRvsD1qe9z2eM1lAaih7m78soiYsP2Xkp6x/b8RsXPyGyJiTNKYJNmOmssDUFGtLXtETBSPRyX9StIVTTQFoHmVw277AtuzTz+X9B1Je5tqDECzHFFtz9r219TZmkudw4H/jIgf95iH3fgKzjvvvNL6/v37u9YuvPDCWsu2XVqv+v9Hkj7++OPS+nXXXVda37FjR+Vln80iYsp/tMrH7BFxQNLfVO4IwEBx6Q1IgrADSRB2IAnCDiRB2IEkKl96q7QwLr31xcKFC7vW5syZ09dlr1+/vrR+9dVXd631unR27bXXVuopu26X3tiyA0kQdiAJwg4kQdiBJAg7kARhB5Ig7EASfJX0WWBiYqJSbTpuv/320vqyZctK68ePH+9au//++yv1hGrYsgNJEHYgCcIOJEHYgSQIO5AEYQeSIOxAEtzPnlyvr2t+6qmnSuv79u0rrd9www1dawcOHCidF9VwPzuQHGEHkiDsQBKEHUiCsANJEHYgCcIOJMF19rPcokWLSuu9rqO/++67pfWbbrqptP7ee++V1tG8ytfZbW+0fdT23knT5tp+xvZrxWN/RyIAUNt0duM3SVrxuWl3SdoeERdL2l68BjDEeoY9InZK+vx3C62UtLl4vlnSjQ33BaBhVb+Dbn5EHC6evy1pfrc32h6VNFpxOQAaUvsLJyMiyk68RcSYpDGJE3RAm6peejtie4EkFY9Hm2sJQD9UDfs2SWuK52skPdFMOwD6peduvO1HJS2XNM/2IUk/kvSApF/aHpH0pqTv9rPJ7OwpL5v+2UUXXdS19uSTT5bOe+mll5bWr7rqqtI619G/PHqGPSJWdyl9q+FeAPQRfy4LJEHYgSQIO5AEYQeSIOxAEtzi+iUwMjJSWh8bG+vbsjdu3Fhr/q1bt3at7dy5s3TeU6dO1Vp2VnyVNJAcYQeSIOxAEoQdSIKwA0kQdiAJwg4kwXX2IXDNNdeU1p9++unS+syZM5ts5zNmzCjfHtS5Fn7rrbeW1rds2VL5szPjOjuQHGEHkiDsQBKEHUiCsANJEHYgCcIOJFF7RBjU9/7775fWd+3aVVq/5JJLutbK7idvwuLFi0vrq1at6lrrNZw0msWWHUiCsANJEHYgCcIOJEHYgSQIO5AEYQeS4H521FI2XLQkPffcc11re/bsKZ13xYoVlXrKrvL97LY32j5qe++kaffZnrC9u/i5vslmATRvOrvxmyRN9Sv23yPisuLnN822BaBpPcMeETslHR9ALwD6qM4JurW29xS7+XO6vcn2qO1x2+M1lgWgpqph3yDp65Iuk3RY0k+6vTEixiJiaUQsrbgsAA2oFPaIOBIRn0bEKUk/k3RFs20BaFqlsNteMOnlKkl7u70XwHDoeT+77UclLZc0z/YhST+StNz2ZZJC0kFJ3+9jjxhic+Z0PV0jSTr//PO71t56662m20GJnmGPiNVTTH64D70A6CP+XBZIgrADSRB2IAnCDiRB2IEk+Cpp1DJv3rzS+uzZs7vW3njjjabbQQm27EAShB1IgrADSRB2IAnCDiRB2IEkCDuQBNfZUWru3Lml9fXr11f+7A8//LDyvDhzbNmBJAg7kARhB5Ig7EAShB1IgrADSRB2IAmGbEapK6+8srT+7LPPltbfeeedrrUlS5aUzvvRRx+V1jG1ykM2Azg7EHYgCcIOJEHYgSQIO5AEYQeSIOxAEtzPjlL33HNPrfkffPDBrjWuow9Wzy277cW2d9h+xfbLtn9YTJ9r+xnbrxWP5QN1A2jVdHbjP5F0Z0R8Q9LfSvqB7W9IukvS9oi4WNL24jWAIdUz7BFxOCJeKJ5/IOlVSQslrZS0uXjbZkk39qtJAPWd0TG77SWSvinpD5LmR8ThovS2pPld5hmVNFq9RQBNmPbZeNuzJD0maV1E/GlyLTp300x5k0tEjEXE0ohYWqtTALVMK+y2Z6oT9K0R8Xgx+YjtBUV9gaSj/WkRQBN67sbbtqSHJb0aET+dVNomaY2kB4rHJ/rSYQLnnlv+zzBjRvnv5JMnT1b+7DvuuKO0vmLFitJ6r2GXt2zZUlrH4EznmP3vJP2DpJds7y6m3a1OyH9pe0TSm5K+258WATShZ9gj4r8lTXkzvKRvNdsOgH7hz2WBJAg7kARhB5Ig7EAShB1Igltch8DNN99cWl++fHlp/aGHHupaW7duXem8t912W2n92LFjpfW1a9eW1o8cOVJax+CwZQeSIOxAEoQdSIKwA0kQdiAJwg4kQdiBJBiyeQisXr26tP7II48MqJMv6nUdfcOGDQPqBNPFkM1AcoQdSIKwA0kQdiAJwg4kQdiBJAg7kATX2YfArFmzSuv33ntvaf3OO++svOxNmzaV1kdGRip/NtrBdXYgOcIOJEHYgSQIO5AEYQeSIOxAEoQdSKLndXbbiyX9XNJ8SSFpLCL+w/Z9kv5J0jvFW++OiN/0+CyuswN91u06+3TCvkDSgoh4wfZsSc9LulGd8dhPRMSD022CsAP91y3s0xmf/bCkw8XzD2y/Kmlhs+0B6LczOma3vUTSNyX9oZi01vYe2xttz+kyz6jtcdvjtToFUMu0/zbe9ixJ/yXpxxHxuO35ko6pcxz/r+rs6v9jj89gNx7os8rH7JJke6akX0v6bUT8dIr6Ekm/joi/7vE5hB3os8o3wti2pIclvTo56MWJu9NWSdpbt0kA/TOds/HLJD0r6SVJp4rJd0taLekydXbjD0r6fnEyr+yz2LIDfVZrN74phB3oP+5nB5Ij7EAShB1IgrADSRB2IAnCDiRB2IEkCDuQBGEHkiDsQBKEHUiCsANJEHYgCcIOJNHzCycbdkzSm5NezyumDaNh7W1Y+5Loraome/urboWB3s/+hYXb4xGxtLUGSgxrb8Pal0RvVQ2qN3bjgSQIO5BE22Efa3n5ZYa1t2HtS6K3qgbSW6vH7AAGp+0tO4ABIexAEq2E3fYK2/tsv277rjZ66Mb2Qdsv2d7d9vh0xRh6R23vnTRtru1nbL9WPE45xl5Lvd1ne6JYd7ttX99Sb4tt77D9iu2Xbf+wmN7quivpayDrbeDH7LbPkbRf0rclHZK0S9LqiHhloI10YfugpKUR0fofYNj+e0knJP389NBatv9N0vGIeKD4RTknIv55SHq7T2c4jHefeus2zPitanHdNTn8eRVtbNmvkPR6RByIiJOSfiFpZQt9DL2I2Cnp+Ocmr5S0uXi+WZ3/LAPXpbehEBGHI+KF4vkHkk4PM97quivpayDaCPtCSX+c9PqQhmu895D0O9vP2x5tu5kpzJ80zNbbkua32cwUeg7jPUifG2Z8aNZdleHP6+IE3Rcti4jLJV0n6QfF7upQis4x2DBdO90g6evqjAF4WNJP2mymGGb8MUnrIuJPk2ttrrsp+hrIemsj7BOSFk96vaiYNhQiYqJ4PCrpV+ocdgyTI6dH0C0ej7bcz59FxJGI+DQiTkn6mVpcd8Uw449J2hoRjxeTW193U/U1qPXWRth3SbrY9ldtf0XS9yRta6GPL7B9QXHiRLYvkPQdDd9Q1NskrSmer5H0RIu9fMawDOPdbZhxtbzuWh/+PCIG/iPpenXOyP+fpH9po4cufX1N0v8UPy+33ZukR9XZrftYnXMbI5L+QtJ2Sa9J+r2kuUPU2xZ1hvbeo06wFrTU2zJ1dtH3SNpd/Fzf9ror6Wsg640/lwWS4AQdkARhB5Ig7EAShB1IgrADSRB2IAnCDiTx/1ppMKNb4Fc+AAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "input_image = (bad_trial.tensor('Net_input_0').step(2700).value[42]*255).reshape(28,28)\n", + "plt.imshow(input_image, cmap=plt.get_cmap('gray'))\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXwAAAD4CAYAAADvsV2wAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAPCElEQVR4nO3dTYilV53H8e+v0zpaMWMLaRiSTncFIkpo4kQKiQYcsLOIbUjDrCKlELOoGfAlI0IwUwtXPZuIKEQMRdSNhS4yCSPS0e6gDswiwcoLMUnHENTuvInlQkesRabxP4t7Q6rbe6u7+rlVT+We7weKW8+5p+7510PXL0/OPfc8qSokSdNvV98FSJK2h4EvSY0w8CWpEQa+JDXCwJekRuzuu4CNXH755TU7O9t3GZL0lvH444//oar2jnpuRwf+7OwsKysrfZchSW8ZSU6Ne84pHUlqhIEvSY0w8CWpEQa+JDXCwJekRnQK/CT3JHk+ydNJHkqyZ0y/m5P8KsmLSb7cZcy3iuVlmJ2FXbsGj8vLfVckqXVdr/BPAAer6jrgBeDuczskuQT4JvBx4Frgk0mu7Tjujra8DAsLcOoUVA0eFxYMfUn96hT4VXW8qs4MDx8F9o3o9iHgxar6dVW9DvwAONJl3J1ucRHW1s5uW1sbtEtSXyY5h38H8PCI9iuBl9YdvzxsGynJQpKVJCurq6sTLG/7nD69uXZJ2g7nDfwkjyR5ZsTXkXV9FoEzQOdJi6paqqq5qprbu3fkp4N3vP37N9cuSdvhvFsrVNVNGz2f5HbgFuBQjb591ivAVeuO9w3bptbRo4M5+/XTOjMzg3ZJ6kvXVTo3A3cBt1bV2phuvwDem+TqJG8HbgN+2GXcnW5+HpaW4MABSAaPS0uDdknqS9fN0+4F/g44kQTg0ar61yRXAPdX1eGqOpPkc8BPgEuA71TVsx3H3fHm5w14STtLp8CvqmvGtL8KHF53fAw41mUsSVI3ftJWkhph4EtSIwx8SWqEgS9JjTDwJakRBr4kNcLAl6RGGPiS1AgDX5IaYeBLUiMMfElqhIEvSY0w8CWpEQa+JDXCwJekRhj4ktQIA1+SGmHgS1IjDHxJaoSBL6lXy8swOwu7dg0el5f7rmh6dbqJuSR1sbwMCwuwtjY4PnVqcAwwP99fXdPKK3xJvVlcfDPs37C2NmjX5Bn4knpz+vTm2tWNgS+pN/v3b65d3Rj4knpz9CjMzJzdNjMzaNfkGfiSejM/D0tLcOAAJIPHpSXfsN0qrtKR1Kv5eQN+u3iFL0mNMPAlqREGviQ1wsCXpEYY+JLUCANfUvNa2cDNZZmSmtbSBm5e4UtqWksbuHUK/CT3JHk+ydNJHkqyZ0Sfq5L8LMlzSZ5NcmeXMSVpklrawK3rFf4J4GBVXQe8ANw9os8Z4EtVdS1wA/DZJNd2HFeSJqKlDdw6BX5VHa+qM8PDR4F9I/q8VlVPDL//M3ASuLLLuJI0KS1t4DbJOfw7gIc36pBkFrgeeGyDPgtJVpKsrK6uTrA8ta6VlRjanJY2cEtVbdwheQT4hxFPLVbVfw37LAJzwD/XmBdM8i7gv4GjVfXghRQ3NzdXKysrF9JV2tC5KzFgcBU3rX/YaleSx6tqbuRz5wv8C3jx24F/AQ5V1dqYPm8DfgT8pKq+dqGvbeBrUmZnB8vtznXgAPz2t9tdjbR1Ngr8Tuvwk9wM3AX80wZhH+DbwMnNhL00SS2txJDG6TqHfy9wGXAiyVNJ7gNIckWSY8M+NwKfBj427PNUksMdx5U2paWVGNI4na7wq+qaMe2vAoeH3/8PkC7jSF0dPTp6Dn8aV2JI4/hJWzWhpZUY0jjupaNmeCs9tc4rfElqhIEvSY0w8CWpEQa+JDXCwJekRhj42nJuWibtDC7L1JZq6fZx0k7nFb62VEu3j5N2OgNfW8pNy6Sdw8DXlnLTMmnnMPC1pVq6fZzU1VYvcDDwtaXctEy6MG8scDh1CqreXOAwydDvfMerreQdryS1YlJ3Zdvojlde4UvSDrAdCxwMfEnaAbZjgYOBL0k7wHYscDDwJWkH2I4FDm6tIEk7xFbflc0rfElqhIEvSY0w8CWpEQb+FHMfeknr+abtlHIfeknn8gp/SrkPvaRzGfhTyn3oJZ3LwJ9S7kMv6VwG/pRyH3pJ5zLwp5T70Es6l6t0pthWf0xb0luLV/iS1AgDX5IaYeBLUiMMfElqRKfAT3JPkueTPJ3koSR7Nuh7SZInk/yoy5iSpIvT9Qr/BHCwqq4DXgDu3qDvncDJjuNJki5Sp8CvquNVdWZ4+Ciwb1S/JPuATwD3dxlPknTxJjmHfwfw8Jjnvg7cBfz1fC+SZCHJSpKV1dXVCZYnSW07b+AneSTJMyO+jqzrswicAf5mx/UktwC/r6rHL6Sgqlqqqrmqmtu7d+8mfhVJ0kbO+0nbqrppo+eT3A7cAhyqqhrR5Ubg1iSHgXcAf5/ke1X1qYuoV5J0kbqu0rmZwVTNrVW1NqpPVd1dVfuqaha4DfipYS9J26/rHP69wGXAiSRPJbkPIMkVSY51rk6SNDGdNk+rqmvGtL8KHB7R/nPg513GlCRdHD9pK0mNMPAlqREGviQ1wsCXpEYY+JLUCANf2mbLyzA7C7t2DR6X/+bz6dLW8J620jZaXoaFBVgbfkzx1KnBMXj/YW09r/ClbbS4+GbYv2FtbdAubTUDX9pGp09vrl2aJANf2kb792+uXZokA1/aRkePwszM2W0zM4N2aatNXeC7AkI72fw8LC3BgQOQDB6XlnzDVttjqlbpuAJCbwXz8/57VD+m6grfFRCSNN5UBb4rICRpvKkKfFdASNJ4UxX4roCQpPGmKvBdASFJ403VKh1wBYQkjTNVV/iSpPEMfElqhIEvSY0w8CWpEQa+JDXCwJekRhj4ktQIA1+SGmHgS1IjDHxJaoSBL0mNMPAlqREGviQ1wsCXpEYY+JLUCANfkhrRKfCT3JPk+SRPJ3koyZ4x/fYkeWDY92SSD3cZV5K0eV2v8E8AB6vqOuAF4O4x/b4B/Liq3g98ADjZcVxJ0iZ1CvyqOl5VZ4aHjwL7zu2T5N3AR4FvD3/m9ar6Y5dxJUmbN8k5/DuAh0e0Xw2sAt9N8mSS+5NcOsFxJUkX4LyBn+SRJM+M+Dqyrs8icAZYHvESu4EPAt+qquuBvwBf3mC8hSQrSVZWV1c3/QtJkkbbfb4OVXXTRs8nuR24BThUVTWiy8vAy1X12PD4ATYI/KpaApYA5ubmRr2eJOkidF2lczNwF3BrVa2N6lNVvwNeSvK+YdMh4Lku40qSNq/rHP69wGXAiSRPJbkPIMkVSY6t6/d5YDnJ08A/Av/RcVxJ0iadd0pnI1V1zZj2V4HD646fAua6jCVJ6sZP2kpSIwx8SWqEgS9JjTDwJakRBr4kNcLAl6RGGPiS1AgDX5IaYeBLUiMMfElqhIEvSY0w8CWpEQa+JDXCwJekRhj4ktQIA1+SGmHgS1IjDHxJaoSBL0mNMPAlqREGviQ1wsCXpEYY+JLUCANfkhph4EtSIwx8SWqEgS9JjTDwJakRBr4kNcLAl6RGGPiS1AgDX5IaYeBLUiMMfElqhIEvSY3oFPhJ7knyfJKnkzyUZM+Yfl9M8mySZ5J8P8k7uowrSdq8rlf4J4CDVXUd8AJw97kdklwJfAGYq6qDwCXAbR3HlSRtUqfAr6rjVXVmePgosG9M193AO5PsBmaAV7uMK0navEnO4d8BPHxuY1W9AnwVOA28Bvypqo5PcFxJ0gU4b+AneWQ4937u15F1fRaBM8DyiJ9/D3AEuBq4Arg0yac2GG8hyUqSldXV1Yv5nSRJI+w+X4eqummj55PcDtwCHKqqGtHlJuA3VbU67P8g8BHge2PGWwKWAObm5ka9niTpInRdpXMzcBdwa1Wtjel2GrghyUySAIeAk13GlSRtXtc5/HuBy4ATSZ5Kch9AkiuSHAOoqseAB4AngF8Ox1zqOK4kaZPOO6Wzkaq6Zkz7q8DhdcdfAb7SZSxJUjd+0laSGmHgS1IjDHxJaoSBL0mNMPAlqREGviQ1wsCXpEYY+JLUCANfkhph4EtSIwx8SWqEgS9JjTDwJakRBr7UqOVlmJ2FXbsGj8t/c786TZtO2yNLemtaXoaFBVgb3rbo1KnBMcD8fH91aWt5hS81aHHxzbB/w9raoF3Ty8CXGnT69ObaNR0MfKlB+/dvrl3TwcCXGnT0KMzMnN02MzNo1/Qy8KUGzc/D0hIcOADJ4HFpyTdsp52rdKRGzc8b8K3xCl+SGmHgS1IjDHxJaoSBL0mNMPAlqRGpqr5rGCvJKnDqIn/8cuAPEyznrcxzcTbPx9k8H2+ahnNxoKr2jnpiRwd+F0lWqmqu7zp2As/F2TwfZ/N8vGnaz4VTOpLUCANfkhoxzYG/1HcBO4jn4myej7N5Pt401ediaufwJUlnm+YrfEnSOga+JDVi6gI/yc1JfpXkxSRf7ruePiW5KsnPkjyX5Nkkd/ZdU9+SXJLkySQ/6ruWviXZk+SBJM8nOZnkw33X1KckXxz+nTyT5PtJ3tF3TZM2VYGf5BLgm8DHgWuBTya5tt+qenUG+FJVXQvcAHy28fMBcCdwsu8idohvAD+uqvcDH6Dh85LkSuALwFxVHQQuAW7rt6rJm6rABz4EvFhVv66q14EfAEd6rqk3VfVaVT0x/P7PDP6gr+y3qv4k2Qd8Ari/71r6luTdwEeBbwNU1etV9cd+q+rdbuCdSXYDM8CrPdczcdMW+FcCL607fpmGA269JLPA9cBj/VbSq68DdwF/7buQHeBqYBX47nCK6/4kl/ZdVF+q6hXgq8Bp4DXgT1V1vN+qJm/aAl8jJHkX8J/Av1XV//ZdTx+S3AL8vqoe77uWHWI38EHgW1V1PfAXoNn3vJK8h8FswNXAFcClST7Vb1WTN22B/wpw1brjfcO2ZiV5G4OwX66qB/uup0c3Arcm+S2Dqb6PJflevyX16mXg5ap64//4HmDwH4BW3QT8pqpWq+r/gAeBj/Rc08RNW+D/AnhvkquTvJ3Bmy4/7Lmm3iQJgznak1X1tb7r6VNV3V1V+6pqlsG/i59W1dRdwV2oqvod8FKS9w2bDgHP9VhS304DNySZGf7dHGIK38SeqpuYV9WZJJ8DfsLgXfbvVNWzPZfVpxuBTwO/TPLUsO3fq+pYjzVp5/g8sDy8OPo18Jme6+lNVT2W5AHgCQar255kCrdZcGsFSWrEtE3pSJLGMPAlqREGviQ1wsCXpEYY+JLUCANfkhph4EtSI/4fCqIpvyP0+eIAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The network predicted the value: 1\n" + ] + } + ], + "source": [ + "plt.plot(bad_trial.tensor('Net_output0').step(2700).value[42], 'bo')\n", + "plt.show()\n", + "print('The network predicted the value: {}'.format(np.argmax(bad_trial.tensor('Net_output0').step(2700).value[42])))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This concludes this notebook. For more information see the APIs as \n", + "- https://github.com/awslabs/tornasole_core: basic library\n", + "- https://github.com/awslabs/tornasole_mxnet: data generation for MXNet\n", + "- https://github.com/awslabs/tornasole_tf: data generation for TensorFlow\n", + "- https://github.com/awslabs/tornasole_tf/tree/pytorch/tornasole_pytorch: data generation for PyTorch\n", + "- https://github.com/awslabs/tornasole_rules: data analysis in Numpy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + }, + "pycharm": { + "stem_cell": { + "cell_type": "raw", + "source": [], + "metadata": { + "collapsed": false + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/examples/pytorch/scripts/pytorch_hook_demos.py b/examples/pytorch/scripts/pytorch_hook_demos.py new file mode 100644 index 0000000000..ab7b57f84d --- /dev/null +++ b/examples/pytorch/scripts/pytorch_hook_demos.py @@ -0,0 +1,161 @@ +# Credit to the official pytorch mnist example set https://github.com/pytorch/examples/blob/master/mnist/main.py for help with this + +from __future__ import print_function +import argparse +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torchvision import datasets, transforms +from torch.autograd import Variable +from tornasole.pytorch.hook import * +from tornasole.pytorch.torch_collection import * +import tornasole.pytorch as ts + + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + + self.add_module('conv1', nn.Conv2d(1, 20, 5, 1)) + self.add_module('relu0', nn.ReLU()) + self.add_module('max_pool', nn.MaxPool2d(2, stride=2)) + self.add_module('conv2', nn.Conv2d(20, 50, 5, 1)) + self.add_module('relu1', nn.ReLU()) + self.add_module('max_pool2', nn.MaxPool2d(2, stride=2)) + self.add_module('fc1', nn.Linear(4*4*50, 500)) + self.add_module('relu2', nn.ReLU()) + self.add_module('fc2', nn.Linear(500, 10)) + + + def forward(self, x): + x = self.relu0(self.conv1(x)) + x = self.max_pool(x) + x = self.relu1(self.conv2(x)) + x = self.max_pool2(x) + x = x.view(-1, 4*4*50) + x = self.relu2(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + +def train(args, model, device, train_loader, optimizer, epoch): + model.train() + count = 0 + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(Variable(data, requires_grad = True)) + loss = F.nll_loss(output, target) + loss.backward() + count += 1 + optimizer.step() + if batch_idx % args.log_interval == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + +def test(args, model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss + pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability + correct += pred.eq(target.view_as(pred)).sum().item() + + test_loss /= len(test_loader.dataset) + + print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, len(test_loader.dataset), + 100. * correct / len(test_loader.dataset))) + + + +# Create a tornasole hook. The initilization of hook determines which tensors +# are logged while training is in progress. +# Following function shows the default initilization that enables logging of +# weights, biases and gradients in the model. +def create_tornasole_hook(output_dir, module=None, hook_type='saveall'): + # Create a hook that logs weights, biases, gradients and inputs/ouputs of model every 10 steps while training. + if hook_type == 'saveall': + hook = TornasoleHook(out_dir=output_dir, save_config=SaveConfig(save_steps=[i * 10 for i in range(20)]), save_all=True) + elif hook_type == 'module-input-output': + # The names of input and output tensors of a module are in following format + # Inputs : _input_, and + # Output : _output + # In order to log the inputs and output of a module, we will create a collection as follows: + assert module is not None + get_collection('l_mod').add_module_tensors(module, inputs=True, outputs=True) + + # Create a hook that logs weights, biases, gradients and inputs/outputs of model every 5 steps from steps 0-100 while training. + hook = TornasoleHook(out_dir=output_dir, save_config=SaveConfig(save_steps=[i * 5 for i in range(20)]), + include_collections=['weights', 'gradients', 'bias','l_mod']) + elif hook_type == 'weights-bias-gradients': + save_config = SaveConfig(save_steps=[i * 5 for i in range(20)]) + # Create a hook that logs ONLY weights, biases, and gradients every 5 steps (from steps 0-100) while training the model. + hook = TornasoleHook(out_dir=output_dir, save_config=save_config) + return hook + +def main(): + # Training settings + parser = argparse.ArgumentParser(description='PyTorch MNIST Example') + parser.add_argument('--batch-size', type=int, default=64, metavar='N', + help='input batch size for training (default: 64)') + parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', + help='input batch size for testing (default: 1000)') + parser.add_argument('--epochs', type=int, default=1, metavar='N', + help='number of epochs to train (default: 1)') + parser.add_argument('--lr', type=float, default=0.01, metavar='LR', + help='learning rate (default: 0.01)') + parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='SGD momentum (default: 0.9)') + parser.add_argument('--log-interval', type=int, default=10, metavar='N', + help='how many batches to wait before logging training status') + parser.add_argument('--output-uri', type=str, help="output directory to save data in", default='./tornasole-testing/demo/') + parser.add_argument('--hook-type', type=str, choices=['saveall', 'module-input-output', 'weights-bias-gradients'], default='weights-bias-gradients') + parser.add_argument('--mode', action="store_true") + parser.add_argument('--rule_type', choices=['vanishing_grad', 'exploding_tensor', 'none'], default='none') + args = parser.parse_args() + + device = torch.device("cpu") + train_loader = torch.utils.data.DataLoader( + datasets.MNIST('./data', train=True, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=args.batch_size, shuffle=True) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST('./data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=args.test_batch_size, shuffle=True) + + model = Net().to(device) + + if args.rule_type == 'vanishing_grad': + lr, momentum = 1.0, 0.9 + elif args.rule_type == 'exploding_tensor': + lr, momentum = 1000000.0, 0.9 + else: + lr, momentum = args.lr, args.momentum + + optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum) + + hook = create_tornasole_hook(output_dir=args.output_uri, module=model, hook_type=args.hook_type) + hook.register_hook(model) + + for epoch in range(1, args.epochs + 1): + if args.mode: + hook.set_mode(ts.modes.TRAIN) + train(args, model, device, train_loader, optimizer, epoch) + if args.mode: + hook.set_mode(ts.modes.EVAL) + test(args, model, device, test_loader) + +if __name__ == '__main__': + main() diff --git a/examples/pytorch/scripts/simple.py b/examples/pytorch/scripts/simple.py new file mode 100644 index 0000000000..21728f3400 --- /dev/null +++ b/examples/pytorch/scripts/simple.py @@ -0,0 +1,107 @@ +from __future__ import print_function +import numpy as np +import random +import argparse +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torch.autograd import Variable +from tornasole.pytorch.hook import * +from tornasole.pytorch.torch_collection import * + + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.add_module('conv1', nn.Conv2d(1, 20, 5, 1)) + self.add_module('relu0', nn.ReLU()) + self.add_module('max_pool', nn.MaxPool2d(2, stride=2)) + self.add_module('conv2', nn.Conv2d(20, 50, 5, 1)) + self.add_module('relu1', nn.ReLU()) + self.add_module('max_pool2', nn.MaxPool2d(2, stride=2)) + self.add_module('fc1', nn.Linear(4*4*50, 500)) + self.add_module('relu2', nn.ReLU()) + self.add_module('fc2', nn.Linear(500, 10)) + + + def forward(self, x): + x = self.relu0(self.conv1(x)) + x = self.max_pool(x) + x = self.relu1(self.conv2(x)) + x = self.max_pool2(x) + x = x.view(-1, 4*4*50) + x = self.relu2(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + +# Create a tornasole hook. The initilization of hook determines which tensors +# are logged while training is in progress. +# Following function shows the default initilization that enables logging of +# weights, biases and gradients in the model. +def create_tornasole_hook(output_dir, module=None, hook_type='saveall', save_steps=None): + # Create a hook that logs weights, biases, gradients and inputs/ouputs of model + if hook_type == 'saveall': + hook = TornasoleHook(out_dir=output_dir, save_config=SaveConfig(save_steps=save_steps), save_all=True) + elif hook_type == 'module-input-output': + # The names of input and output tensors of a module are in following format + # Inputs : _input_, and + # Output : _output + # In order to log the inputs and output of a module, we will create a collection as follows: + assert module is not None + get_collection('l_mod').add_module_tensors(module, inputs=True, outputs=True) + + # Create a hook that logs weights, biases, gradients and inputs/outputs of model + hook = TornasoleHook(out_dir=output_dir, save_config=SaveConfig(save_steps=save_steps), + include_collections=['weights', 'gradients', 'bias','l_mod']) + elif hook_type == 'weights-bias-gradients': + save_config = SaveConfig(save_steps=save_steps) + # Create a hook that logs ONLY weights, biases, and gradients + hook = TornasoleHook(out_dir=output_dir, save_config=save_config) + return hook + +def train(model, device, optimizer, num_steps=500, save_steps=[]): + model.train() + count = 0 + # for batch_idx, (data, target) in enumerate(train_loader): + for i in range(num_steps): + batch_size=32 + data, target = torch.rand(batch_size, 1, 28, 28), torch.rand(batch_size).long() + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(Variable(data, requires_grad = True)) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + +parser = argparse.ArgumentParser(description='PyTorch MNIST Example') +parser.add_argument('--batch-size', type=int, default=64, metavar='N', + help='input batch size for training (default: 64)') +parser.add_argument('--epochs', type=int, default=1, metavar='N', + help='number of epochs to train (default: 1)') +parser.add_argument('--lr', type=float, default=0.01, metavar='LR', + help='learning rate (default: 0.01)') +parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='SGD momentum (default: 0.9)') +parser.add_argument('--tornasole-frequency', type=int, default=10, help='frequency with which to save steps') +parser.add_argument('--steps', type=int, default=100, help='number of steps') +parser.add_argument('--tornasole_path', type=str, help="output directory to save data in", default='./tornasole-testing/demo/') +parser.add_argument('--hook-type', type=str, choices=['saveall', 'module-input-output', 'weights-bias-gradients'], default='saveall') +parser.add_argument('--random-seed', type=bool, default=False) + +args = parser.parse_args() + +if args.random_seed: + torch.manual_seed(2) + np.random.seed(2) + random.seed(12) + +hook_type = 'saveall' +device = torch.device("cpu") +save_steps = [(i+1) * args.tornasole_frequency for i in range(args.steps//args.tornasole_frequency)] +model = Net().to(device) +hook = create_tornasole_hook(args.tornasole_path, model, hook_type, save_steps=save_steps) + +hook.register_hook(model) +optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) +train(model, device, optimizer, num_steps=args.steps, save_steps=save_steps) diff --git a/examples/pytorch/scripts/torch_resnet.py b/examples/pytorch/scripts/torch_resnet.py new file mode 100644 index 0000000000..d3bad58888 --- /dev/null +++ b/examples/pytorch/scripts/torch_resnet.py @@ -0,0 +1,98 @@ +import torch.nn.parallel + +import torch.optim +import torchvision.models as models + +import argparse +import torch +import torch.nn as nn +import torch.optim as optim +from torchvision import datasets, transforms +from torch.autograd import Variable +from tornasole.pytorch.hook import * +import time + +model_names = sorted(name for name in models.__dict__ + if name.islower() and not name.startswith("__") + and callable(models.__dict__[name])) + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') +parser.add_argument('--data_dir', default='~/.pytorch/datasets/imagenet', + help='path to dataset') +parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50', + choices=model_names, + help='model architecture: ' + + ' | '.join(model_names) + + ' (default: resnet50)') +parser.add_argument('--epochs', default=90, type=int, metavar='N', + help='number of total epochs to run') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', + help='mini-batch size (default: 256), this is the total ' + 'batch size of all GPUs on the current node when ' + 'using Data Parallel or Distributed Data Parallel') +parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, + metavar='LR', help='initial learning rate', dest='lr') +parser.add_argument('--momentum', default=0.9, type=float, metavar='M', + help='momentum') +parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, + metavar='W', help='weight decay (default: 1e-4)', + dest='weight_decay') +args = parser.parse_args() + +def main(): + start = time.time() + # create model + net = models.__dict__[args.arch](pretrained=True) + device = torch.device("cpu") + net.to(device) + # register the hook + + hook = create_tornasole_hook('./output_resnet', net, save_interval=50) + + hook.register_hook(net) + loss_optim = nn.CrossEntropyLoss() + optimizer = optim.SGD(net.parameters(), lr=1.0, momentum=0.9) + + + print("Loaded training") + batch_size = 64 + # train the model + for epoch in range(1): + for i in range(4096): + # Synthetic data generated here + data_in = torch.rand(batch_size, 3, 64, 64) + target = torch.zeros(batch_size).long() + data_in, target = Variable(data_in), Variable(target) + output = net(data_in) + loss = loss_optim(output, target) + if i % 10 == 0: + print("Step", i, "Epoch", epoch) + optimizer.zero_grad() + loss.backward() + optimizer.step() + end = time.time() + print("Time taken:", end - start) + +# Create a tornasole hook. The initilization of hook determines which tensors +# are logged while training is in progress. +# Following function shows the default initilization that enables logging of +# weights, biases and gradients in the model. +def create_tornasole_hook(output_dir, module, trial_id='trial-resnet', save_interval=100): + # With the following SaveConfig, we will save tensors for steps 1, 2 and 3 + # (indexing starts with 0) and then continue to save tensors at interval of + # 100,000 steps. Note: union operation is applied to produce resulting config + # of save_steps and save_interval params. + save_config = SaveConfig(save_interval) + + # The names of input and output tensors of a block are in following format + # Inputs : _input_, and + # Output : _output + # In order to log the inputs and output of a model, we will create a collection as follows + + # Create a hook that logs weights, biases, gradients of model while training. + hook = TornasoleHook(out_dir=output_dir) + return hook + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/examples/tensorflow/notebooks/tf-sentiment-script-mode/Loss_Accuracy.ipynb b/examples/tensorflow/notebooks/tf-sentiment-script-mode/Loss_Accuracy.ipynb new file mode 100644 index 0000000000..e78f01e32f --- /dev/null +++ b/examples/tensorflow/notebooks/tf-sentiment-script-mode/Loss_Accuracy.ipynb @@ -0,0 +1,199 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using TensorFlow backend.\n" + ] + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "from keras.datasets import imdb\n", + "from tornasole.trials import LocalTrial" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING: Logging before flag parsing goes to stderr.\n", + "I0730 09:44:29.710710 4704150976 local_trial.py:20] Loading trial sentiment at path ts_output/\n", + "I0730 09:44:29.720427 4704150976 local_trial.py:58] Loaded 4 collections\n" + ] + } + ], + "source": [ + "lt = LocalTrial( 'sentiment', 'ts_output/', parallel=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['batch',\n", + " 'size',\n", + " 'loss',\n", + " 'acc',\n", + " 'mean_squared_error',\n", + " 'embedding_1',\n", + " 'conv1d_1_0',\n", + " 'conv1d_1_1',\n", + " 'dense_1_0',\n", + " 'dense_1_1',\n", + " 'dense_2_0',\n", + " 'dense_2_1',\n", + " 'val_loss',\n", + " 'val_acc',\n", + " 'val_mean_squared_error']" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "lt.tensors()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def tplt( trial, tname ): \n", + " t = lt.tensor(tname)\n", + " steps = t.steps()\n", + " _t = [t.value(s) for s in steps]\n", + " plt.plot( steps, _t, label=tname)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAD8CAYAAACMwORRAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJztvXl4W+WZ9/95JEuW991OYiexnThAEhJIAkmAsjTA0E7btJTOD6YLbWmZDnSj7dtpZzrTTtt523euvi3QBd60Qwdoh6W0BdpSKCFhKeCQhbAkkNhxNjuJF9mxLdtaLD2/P845sjbbsiNv0v25Ll22jh4dPcfH+p77fJ/7uR+ltUYQBEFIL2wz3QFBEAQh9Yi4C4IgpCEi7oIgCGmIiLsgCEIaIuIuCIKQhoi4C4IgpCEi7oIgCGmIiLsgCEIaIuIuCIKQhmTN1AeXl5fr2tramfp4QRCEOcnu3bu7tNYV47WbMXGvra1l165dM/XxgiAIcxKl1NFk2oktIwiCkIaIuAuCIKQhIu6CIAhpyIx57okIBAK0trbi9XpnuiuzEpfLRU1NDQ6HY6a7IgjCLGdWiXtraysFBQXU1tailJrp7swqtNa43W5aW1upq6ub6e4IgjDLGdeWUUrdo5TqUEq9OcrrSil1p1KqWSn1ulJqzWQ74/V6KSsrE2FPgFKKsrIyuasRBCEpkvHc/xu4ZozX3wU0mI+bgbvOpEMi7KMjfxtBEJJlXFtGa/28Uqp2jCabgfu0sV5fo1KqWCk1X2t9MkV9FIQZocvjY0dLN3+7av5MdyVt+NPrJzlwqi9ue252Fp+8uA5nVuJ4c/uBDl492jPmvkvynNy4sRabbXJBUEe/l5ea3Ww+b8G4gVQgGOKXLx7G4x0GoDTPyY0XjW4nP72/nfqKPJZU5E+qb5MhFZ57NXA84nmruS1O3JVSN2NE9yxatCgFHy0IU8eDrxzjB385yIb6KynLz57p7sx5DncN8LkH9hDSEKmB1jLOdeV5/M2KeXHv6/L4uOVXexgKBBlNc619rKopZu3ikkn171cvH+XObc1UFmRz0dLyMds+vOs4//uJt6O2XVBXyooFRXFtD3cN8A/37+Lq5fO4+6NrJ9W3yZAKcU/050646rbWeguwBWDdunWyMrcwqznZa4xvtPYMibingB9va8KZZeOFr76TioKRv6dvOMjqf/8LO1q6E4r7z59vwTccZOuXLmNpZeLIt8vjY913t7LjsHvS4n6gvR+A27c2sXHJ6GN//uEQP9t+iPMXFfO7f7yIpg4PV//oeZo7PAnF/cfbmghpeOVIN6GQnvSdxURJRZ57K7Aw4nkNcCIF+50x3v/+97N27VpWrFjBli1bAHjyySdZs2YNq1evZtOmTQB4PB4+8YlPcO6557Jq1Sp++9vfzmS3hRTT0e8DoO300Az3ZO5zuGuAR19t46MbFkcJO0B2lp21i0tobHHHvc/t8XHfy0d53+oFowo7QHl+Nsuq8mls6Z50H5s6PLgcNl450s3LCfpi8cjuVtpOD/HFK5ehlKK2LI8sm+KgeXGIxDruRaW5dA/4aerwTLp/EyUVkfvjwGeVUg8C64HeVPjt//6Hfew/Ee/NnQnLFxTyzfeuGLfdPffcQ2lpKUNDQ1xwwQVs3ryZT3/60zz//PPU1dXR3W38A33nO9+hqKiIN954A4CenrE9QWFu0dFnRe6DM9yTuY8Vtd986ZKEr2+oK+OHWw9yetBPca4zvH3LC0bU/tl3Noz7GRvqy3hkdyuBYAiHfWJxq284yFH3IJ+6pI5H97YZ0Xt9fPTuHw7x0+3NnL+omEsbDOvGmWWjtjyPpvZ44baO+47rz+MDP3uJxhY3Z80rmFDfJksyqZAPAC8DZymlWpVSNymlPqOU+ozZ5AmgBWgGfg7cMmW9nSbuvPNOVq9ezYYNGzh+/Dhbtmzh0ksvDeeXl5aWArB161ZuvfXW8PtKSiZ3OyjMTqzIvbVHIvczwYpeP7I+Pmq3WF9fhtbwyuGRyNvt8XHfS+NH7eF91JUx6A/yRlvvpPoYDGlWVBdxy+VLeeVw4ug9Nmq3aKjMj4vKI4/7/EUlVBfnJLw7mSqSyZa5YZzXNXDrWG0mQzIR9lTw7LPPsnXrVl5++WVyc3O5/PLLWb16NQcOHIhrq7WW9MQpZkeLm1/89TB3f2Qt9nG8yt7BALf8z26+94FVLCrLPaPPDYU0ndMg7t5AkH/81W6+dNVZnFsT79fORvzDIW66dyfHupO7o+n3DhtR+2X1o7ZZvbCI7CwbjS3dXG367lteaMGbZNQOsL7eCLoaW9ysWWQEWj946gB/eD3eJc6yKf7zulWsXWy856AZdS+ryqe2LI+fPdvMP9y/m9I8Z9T7Ovp8UVG7RUNVAU/tO4U3EMTlsANw97OHoo57Q30Z2w90TJvvPqtmqM4Gent7KSkpITc3l7fffpvGxkZ8Ph/PPfcchw8fDtsypaWlXH311fzkJz/h9ttvBwxbRqL31PLnN0/x9P52TpweYmHp2IK980g3Lza72Xmk+4zFvXvQz3DIGPOfSlvmwKl+th/o5IK60jkj7r/d08oLTV1ctbyKPKc9qfdcuqyCygLXqK9bvvuOw0Zk6/b4uD8Jrz2S8vxsGirz2dHSzS2Xw5GuAe567hArq4uoi/l/eHLfKR7beyIs7k3t/dhtirryPLKz7Pzndav5/Z7WuM+wKcUnL6mLC+qWVeUT0tDSOcDyBYVorXnm7Q7+ZsW88HFvqC/lt3taaerwTIs1I+IewzXXXMPdd9/NqlWrOOuss9iwYQMVFRVs2bKFa6+9llAoRGVlJU8//TTf+MY3uPXWW1m5ciV2u51vfvObXHvttTN9CGmFNUjV2jO+uFu3xd0D/jP+3HbTb68uzqG1Z2jK7tKsPnf0+VK+76nAPxziJ9uaOW9hMVs+ujalf5MN9WX8aOtBegcD/PyFwwwFgnwuyag9ch+/22P47j/e1kyWTfHzj62Nu7B87J5XoiySpnYPi8tyyc4yLlaXLavgsmXjrocRpqHSEOumjn6WLyjkUOcAXR4fG+vLovoGTJvvLuIeQ3Z2Nn/+858Tvvaud70r6nl+fj733nvvdHQrY7HEz4iey8Zua14IugbOXCgtv/38RcX88fWT9AwG4m7RU4HV547+uVFW4rd7DM/5Pz6wMuUXuw2m7/7kvpPc9/IR3rsq+ag9ch/3Nx7lT6+f5NG9bXz8otqEdwwb6kv5zycP0OXxUZ6fzcGOfhom+FmR1JXnYbep8KCqdeHYECHuNSU5Yd/9xotqJ/1ZySIlf4VZy+lB/4R874MdhlB2e848crcyZSzvdqqsGevOpH0ORO6RUftEotpksXz37/zxLYYCQT6/aemE92H57t949E2ybIp/GMXnt0T3lcPd4UyZZVWTj6adWTZqy3LD57Oxxc28QheLI+wgpRTr60vZcbgbrad+mo+IuzBricw+GE/cQyFN8wRtmZ4BP95AMOFrltiev6g4qc+P7cup3uQi8bAtM42Re/cYxz0WVtT+hSsbpsSiys6ys2ZRCR7fsBm1T1xsy/OzWVqZj8c3zIfXLx7V5z+3uohcp53GFnc4U2aidwmxLKsqoLnDg9aaxpZuNtSXxv2dNtSXTVu+u4i7MGuxoqAFRa5xI+fWniG8gRAAXUmIeyikefedL/D9P7+d8PWOfi8luQ7qzVogE4ncn9x3iou+/wyvt54es92Ab5jWniGybIr2Pt+0RHP93gBX/fA5fvBUfPbXeNz70hFW1RRx+RRE7RaXNJRjU0wqag/vY2k5LoeNz4yRneOw21hXW0pjizsiU+bMfPCGynyOuAfYf7KPLo8vypKx2Bjhu081Iu7CrKWp3UOe084FdaXjRs5NpiWzsDSH7iQ894Md/Zzs9fL8wc6Er7f3+agscFGU46DAlTWhyP2146cJabhja9OY7Q51GqKyZlEJ/uEQfUPDSX/GZLn3pSO4B/y83jqxXPDewQBvn+rn6uVVU5r+e9MldfzltssmFbVbfOVvzuLJL1xKZeHo2Tlg+O4H2z00trixKaivyJv0Z4KRDhnS8MArx8z9x4t7TUkOP/vwGt61cuqL0Ym4C7OWpo5+llbms7Akl1N9XoaDoVHbWtHXhrqypDz3xkNG5NTSNRDOjImko99HZaEx4aamJHdC4m7dcj/zdseY0bvV50vMnOn2KbZm+r1GFgoYF7eJ3Cm8etyYfb1mknVbksXlsJ+xPZKfnUVt+fhCvb7OEN/HXm2jtiwvnCkzWRqqjH7/fk9bnN9uoZTi3efOH3UyVyoRcRdmLQfbPTRUFVBTkkMwpDmVQIQtmjr6qSrMprY8jwF/cFxPubGlO1xeNtEtckeflyoz8qspyZmQLXOwvZ93nl1Jca5jzOi9qaMfp93GulpDMBNdZFLJvS8doXcowLXnV3N6MEDXBAae9xztwaZgdU3xFPZwellVU0SOw86APxgW5jPBypgZ8AdZn8Bvn25E3IVZiZUps6wqn5oSIwIaK3puavewrKqAMjNd0T2G7x4KaXYcdvOec+dT4MqKKzZlzU6tLLAi9xzazFz38bB89PMXFvOpS+rGjN6b2j3UV+SxoCgHmNpcdytq33R2JdeuqTE/P77Q1WjsOXaac+YXkpedPtnTjogLa8MZ2EAW2Vn2cLSeyJKZbkTcz4D8/OkrvJ+uPPpqW8ICcZa10VBpRO4QLe5b97eH65BYmTINlQXhXPRIa6a5o59Hdo/MNjzY0U/PYICNS8q4sLaUHTGRuzU7dSRyz2XAH+T0YGDc47F89IaqfG68qJainNGj94Pt/TRUFYTtn0hb5mTvEA+a3u1E6Bnw819/PUwoFH0hsqL2L1zZEI5Sk83YCIY0rx7rmXQp3dmMJcKpiNwBlpkXCRF3IaM52N7PbQ/v5YdPH4x7zZoM0lCVz/xiF0qNZKyEQpqv/vZ1PvfAHryBIG2nhxgKGLfWZfmGuEdOZLrnxSN85Tev8YY5iLjDjNQ31Jexob6Mlq6BcF47jETQkZE7JJlrH+53AQUuB9dfsJDtBzrwDUfbRIN+I8JvqMwn15lFQXZWVOT+q8ajfO13b4Tz/JPlgZ3H+M4f97MrZtWix/ae4KIlZayqKaayIJtCV1bCErWJOHCqnwF/MJzzn05cs3IeZ1UVcGFdaUr2d/WKKi5dVkHtGZa/SAWz9x7rz1+DU2+kdp/zzoV3fX/Ul//pn/6JxYsXc8stRmHLb33rWyileP755+np6SEQCPDd736XzZs3j/tRHo+HzZs3J3zffffdxw9+8AOUUqxatYr777+f9vZ2PvOZz9DS0gLAXXfdxUUXXZSCg5693PlME1obNWFiiykdbO8n12lnQVEONpuiqsAVFtemDk84l/2hncdZWGqI77KqfMryDEGOjNyt993xzEF+ceMFNLa4qS7OYWFp7siU8MPdvG/1AmAkgq6M8NyN/QyOW//F8tEXm6USVlQXhWuOnDO/MNzOyslfZkaMlYXZUbnu1kWi7fTQhAbfLItpR4s7LFhdHh9NHR4+uNawY5RSNFQVJB257zlmXCjSMXJfUpHPU7ddmrL9XbumJmx7zTQSuUdw/fXX89BDD4WfP/zww3ziE5/g97//PXv27GH79u18+ctfTsp7dblcCd+3b98+/uM//oNt27bx2muvcccddwDw+c9/nssuu4zXXnuNPXv2sGLFzFTFnC4OtvfzpzdOsqQij96hAG/FrKvZZE4HtwQ/clDTGgBdUmFU73uzzXjv0soCSs3IPXIiU2vPIFk2xda3DP97x+HusKgvX1BIQXZW1KBqZ1zkPr7nH+636aNnmfXELfGOjZKbIiJ8gKpCV1Tk3hSuqZP8QG4gGGLXEUPcGw+PHI91p7I+IjpdVpVPU3tyGTN7jvZQnp8dvsgJc4PZG7mPEWFPFeeffz4dHR2cOHGCzs5OSkpKmD9/PrfddhvPP/88NpuNtrY22tvbmTcvfjmwSLTW/PM//3Pc+7Zt28Z1111HebmR/mbVht+2bRv33XcfAHa7naKiuVEhcLLc+UwTuQ47P75hDe++8wUaW7qjlihravdwacRkmZqSnLDVYEXe39m8kr//xQ5+/kILVYXZFOU40FrjsKuwLaO1pq1niA+tq+GJN07xld+8RveAnw3mNHW7TXFhXWmUuFtZK5YXPpLrPr7QHmzv5/wI+8LKoGiOiZIPxkT4lQXZ7DYjZG8gGC6nO5EUzDfaehn0B6kuzmH30R58w0Gys4xZmHlOOyurR/6+DZUFPDB4nC6Pf9w7gz3Heli7uHjGsz+EiSGRewzXXXcdjzzyCA899BDXX389v/71r+ns7GT37t3s3buXqqoqvN7xU9ZGe5/UgB+J2m+8qJblCwpZXJYbJa69gwE6+n1RhZxqSnI52eslEAyFI29rQLTfOxzOdlBKUZrnDNsynR4fvuEQ58wv5FOX1I3kw8dU62vpHPHdO/p9FOc6ovKek8l1j/TRLawMitjIvTkmwrcid601hzo9WOOhE4ncrb/hLVcswRsIhScqNba4WVdbGrU60cig6ti++1H3AEfcg2lpyaQ7Iu4xXH/99Tz44IM88sgjXHfddfT29lJZWYnD4WD79u0cPXo0qf2M9r5Nmzbx8MMP43YbX0Rryb5NmzZx1113ARAMBunrS+0Sg7OJLc+3kOuw86l3GNPD19eV8srh7nCGx74ThihFZjBUm7nuf23qCkfeSim+eGVDXNuyvOywLWMJck1JDjdeXEuhKyvst1tYQv9CUxdgRO5VMTVJakpyONTpGdPGiPXRLZZVxvvbB9r7oybrVBRk4zNnqVqWTZ7TPuoFxRsI8nd3v8xje9vC2xpbullWlc+7zdmPjYfcYb89NnvDmmqfaGm4J944ybrvbmXtd57mb+/8Kw67YvN51aMetzA7EXGPYcWKFfT391NdXc38+fP58Ic/zK5du1i3bh2//vWvOfvss5Paz2jvW7FiBf/yL//CZZddxurVq/nSl74EwB133MH27ds599xzWbt2Lfv27ZuyY5xpjnQNsHphcThtcUN9WZTvfs+LhynOdXBhXXS5VCCc0miJ1cYlZXx78wo+trE23LYs3xmuLzMi7rkUuhzcecP5fPf9K6P6s3xBIfUVefz8hRZCIR01O9XinWdXcsQ9yHOjlCuAeB/doqEqn6PuwXDGzKleL609Q5y3cGRCkJV22dHvpamjnyybYkN92aji/tDO47xypJvvPfE2vuFg2G/fUF9GSZ6Ts+cVsONwdzhd1LKhLKyMmdjIPRAM8f0/v01etp13nTuP95+/gO9duyrcP2HuMHs99xnEWvAaoLy8nJdffjlhO49n9GyDsd534403cuONN0Ztq6qq4rHHHptEb+ceHt8wi/NHIuf14WJK3YRCsPWtDr5y9TLyIybMWIOaT+9vj4q8lVJRwg5QmufkiHsAGLE1qouNi8PlZ1XG9cduU3xhUwNfeHAvT+07RUeflyUV0cuofXBNDT/Z1sztW5u4bFlFQmst1ke3aKgqIBjS4YwZa7WhyGjaGrxt7/NxsN1DbXkedeV5vHioK87K8waC/OzZZuYXuTjZ6+XhncdZUV3EoD8Y3ueG+jIe3HmMhaU5cX679XdrqCoI21QWv3+1jWPdg/ziY+u4cnlV3DEKcweJ3IVpp987HDXTsbo4h0Wlhu9+xzMHKcpxxC1msKDYiBz9wVC4ZvdolOVlhz331p4hSnId486sfM+qBdRX5HH71iY6PfGRuzPLxq1XLGXv8dOjRu+xmTIWlgdvWTONLW4KXFlRqZFWZNze5zUnZOVTU5KDNxCKm2370M7jtPf5+L8fWs26xSX8dPuhcAE0K/1xQ30p3kCI3+1pY22M324RmzETCBr12ldWF7LpnPiLoDC3EHE/Q9544w3OO++8qMf69etnuluzGo9vmIIYsd1QX8rzBzvZ+lYHn35HHQUuR9Tr2Vl2qkzBHW/2X1m+M1xfpq1nKBz1j4Xdpvj8Oxs40N5PIKipSpBBct3aGqqLc7jjmaaE3rtV6CyW+gprlR5rIYdu1teVRi34bV1MjvcMctQ9YNbUiU/BtKL2C+tK2bikjC9euYxTfV7ufu4QDZX5lOcb+7EsLd9wKM6SsVhaWUDPYCB88XjUjNq/uGlZxg/6pwOzTtyno6Z1Kjn33HPZu3dv1GPHjh1T8lnT+bfRWsel76Vqvx7fMPmuWHEvwzccShi1W1hit3EccS+NqC/T2jOYdH72e1cvoN6sJpioXKwzy8YtVyzh1WPx0fugf5jj3UMJa4JbGTNN7R7a+7wc7hoIVyS0sGapvnzITUgbUXV1xOQpCytq/6K5YMbFS8tYt7gEbyAUddErNX13GP1iaA38Prb3BH9t6uIn2yVqTydmlbi7XC7cbvecE/jpQGuN2+3G5Zqega2dR3q48ofP8dS+Uyndr284RDCkyc+Ojsw3Likjy6a4+dL6uKjdYllVAfXleeOKdbh4mMdHa89Q0uJutym+eNUyAGrLEpeM/dDahSwocsVF71Ya4tmjLHzcUJnPwY7+hGtrWlQWZodngzZUFkSI+0jkfu/LR1i3uCR8gVNKcZvZ50tjFtG4bFkFJbkOzq1OPGfi7HmF2G2K7/xxPx/5rx0cdQ9y25UStacLs2pAtaamhtbWVjo7R89IyGRcLhc1NdMztfm4OYnm9q1NXHVOVVRpgDOh32ssSJGfHV07e35RDtu/cnl44DMR//qec/AGQuOKj1Vf5mC7B99wKClbxuJ9qxdw/sLiqFTJSIzofSnfePRNXmjq4tJlFWitufOZZqqLcxIO2IJxYdr6VgfPH+yiIDuL5QsK49pUFrg41DmA3aaoK8/DmWWjKMcRjtzb+7y0dA7w93+7KOpvcPHScl746hVxF7HbrlrGTZfUJfTbwUi//Mttl4bTRnMc8QOvwtxlVom7w+Ggrq5uprshMDJ9/62TffxlfzvXrBx7Rm6yeHymuLvi//VGE1SLXGcWuc7xP6PUrC/z2nGj1O5Ep82P148PravhZ9ubuX3rQd7RUM5zBzvZe/w037v23HCN+FiWVuYTDGn+9MYJLl5SHuW3W1hjCrVlueH9GGUXjMh9rKg/UZ9dDjsux9gLUCypyGfJ1K2aJ8wgs8qWEWYP7gE/DruivjyPO55piishO1k84cg9sfWSCizP3aqjPpHIPRmys+zccsVS9hw7zQtNXdy+tYnq4hw+OEbBKMuLj/XGI7F8/kjfPlrcu+OybARhNETchYS4PT7K8rL53Kal4eg9FYQj9ylc9KHQlYXDrnjrpJGdUj0FBa8+tK6GBUUuvvyb19h7/DS3XrF01KgdjBozVrA+qribGToNUeKeS2vPIFpro9JjbWnCqF8QYklK3JVS1yilDiilmpVSX0vw+mKl1DNKqdeVUs8qpWZHzctZztP72/lLigcsY+ny+LjzmSaCE4y8uwf8lOY5ee+qBeHoPRUD3Za4FySwZVKFVV/GHwxRkuuYkguJFb139vuoLs7hurVj/8u7HHZqy/JG9dthJHKPrqlj5Lq/dbKflq6BWbEIhDA3GFfclVJ24KfAu4DlwA1KqeUxzX4A3Ke1XgV8G/heqjuabvQOBvjSQ3u545nR19hMBY++2sYPnz7Im20TW+3ePeCnLN9Jlt3G9Rcu5K2TfUmtRDQeHp+xj6lers3y3VNtyUTyoXU1vKOhnH/523PGjNotPri2hhsvqh018l67uIR1i0uiJmlZ/f/dnuiyC4IwHsl8wy4EmrXWLQBKqQeBzcD+iDbLgdvM37cDj6ayk+nIf714mH7fcFTd8anAylVv6vCwemHyixt3D/jD60FasyfdA35K8pIY0RyDEc99asW93MyYmcoa5NlZdu6/KfkJa7desXTM16uLc3jkH6MXaLH6/+jetjGjfkGIJZlvWDVwPOJ5KxD7H/0a8EHgDuADQIFSqkxrHbU4pVLqZuBmgEWLFk22z3Oe3sEAv/zrYQDcHv+UlgG2Ss1OZDFkGPHcgfBPt8eXcAbmROifBlsGRgZV5/oCE9Z4QZfHz6azKyfvt2sNwQAEBs3HEPgHjJ+R2wKD4B+M37biA1CXuhWLhKknmW9Yov+mWPP1K8BPlFIfB54H2oDhuDdpvQXYArBu3bqMnalkRe0fOL+a37/aZkzHH2XizpmgtQ7XM0l2STUwprgP+IPhfPHwotMpuMsY8A2TZVNkJ2FjnAkj4j7za1kmRSiYQHSHKAwM8B7X64T8g/xdTim8sjeBEA9BYCDBthgh18Hx+xGFAkcuOHJgwfki7nOMZMS9FVgY8bwGOBHZQGt9ArgWQCmVD3xQaz0xkzdD6B0K8MsXD3PNinlcvLSc37/aRveAf0rEvb3PR793GJuKX+ZtLCwRtwTSsjhiC1hNBo9ZNGyqZ0FaNVZSErlrDcPeiIh2NDEda9sokbK1LTj6Qtg/AXACb5kPiyyXIbyO3BERduaBqxAK5o1sc+SC0/o9L8G23JGHM2JfWS6Q2apzlmTEfSfQoJSqw4jIrwf+PrKBUqoc6NZah4CvA/ekuqPpwoOvHKPfO8znNzWEl3NzD/hZPMp09zPBEvSNS8p4sdnNgG844UDmp+7dyWVnVfLRDYuN/pgVFa1p/CXh6fzR4v6lh/dSkuvkX98TO74+Ov2+4dT77cFAnHCeFTjMxbb9nNXrhTf02FZERKQcFmN/jC0Rd7M6DrasCCE1Rdf6PafE3BYhpAnbGj+/v/Uoja1eHvncJrJceSPvsY09QUnIbMb9lmmth5VSnwWeAuzAPVrrfUqpbwO7tNaPA5cD31NKaQxb5tYp7POc5oh7kPL8bJYvKCQQDAHxopkqLCvm3efO58VmN4c6PayqiR5UDYU02w90YlNqRNzN9UctW8Zht1HoyqJ7IDq6/GtTF10eHx/ZsJi68nEuTgEveE5R0NfM2qzTcNgxQYEdRXQDgxCKcwC5ErjSCTyZqDNq9Mg1tzxCdGMj35j2ibZZ4mxP3Z3Y5vesZkOfl6xyKeglJE9SIZTW+gngiZht/xbx+yPAI6ntWnrS5w1QmGP82S3xjBXNVNHU3k9pnjNcgfBge7y4uwf8BEM6qjjViC0zUvYW2u4XAAAe6klEQVS2PD87ypYJhjRdHh8hDT/Z1sz//bvVY3fm6Ivwq2v5d+v5vWO0Hc1uyC6A/KrobVa0m9BuiN02N+2Gc+YXyqxUYcLMqtoymUDfUIBC018PZ6FMUTpkk7noQ21ZLg67SrgYcke/YQ1FlpW1xN26+IDhv0feYbhNYS/Pz+bRvW187p1LqR0req9aAe+/i//zzFGyXPl8+d3njSLEYjcIQiqQ8gPTTJ93mMIcQ9xznHZyHPbwqkGpRGvNwfZ+GqryybLbqC/PT7gYckefL9yvPq8xwajLY9SViVxQozTPGZUt026+74tXNpBlU/x4W/PYHSqYB+f9PU+xkZaSi6HuHVCzFirPgZJayK+A7HwRdkFIERK5p4DWnkFcDns4Q2Ms+ocCLIzI4CjLd044cnd7fPR5h8f0uTv6jUwZqwhVQ1U+r5mFtKLbecO/t/UMUTjfQfeAj9I8Z1RGS1m+kz3HTse9b2V1ER/ZsJj/fukIG5eUkeeMFufiXCcbl4zMqvR441dhEgQh9ci3LAXcfN9uFpXmcvdH147b1vDcRwbbyvImJu5aaz7zq9209/l4/qtXjNrOypSxJh0tqyrgj6+fZNA/TK5z5LRbETgYi0KcM7/QrCsTfaEqy8umZ9BPKKSx2VT4fZUF2fzDZfU88MoxvvKb1xL25U+fv4QVC4w64Z6pyJYRBCEO+ZadIVprDnV66PclV3elzzsc9tzBsDs6PckPqL7Y7GbnEWO1nn5vYNT8eGtV+3Dkbor8oY4Bzq0ZWZCho9+L027DHwyFffcujz+c2x7Zz2BI0zsUoCTPGY7cKwqycdhtPPu/Lo+b5DTgC/Khu1/iqX3trFhQRDCkGfQHE9ZyFwQhtYjnfoZ0efz4hkO09gwx6I9PyYvEGwjiHw6Fs2XAyEhJNhVSa83tWw+GEz3GWuO0ucPIlLGsIquMbOxkpvY+H3XleeQ47OGMGasiZCRlMROZ2vt8lOc7w6v8VBa4OHteYdTDKIRVGq58OeCfnroygiCIuJ8xVrSrtREVj4U1YBkZuZebnnsy5XRfOuRm19EePnmxsVpVogFSi4Ptnqg6MIvNjJmDMRkzHf0+KguzzUUhjGNJKO6mTWNF5539XioKxl/P9eoVVbx9qp/j3YPTVjRMEAQR9zMmMj98vCn+fUOGuEV67qV5TvzDIQb8Y9f9sKL2eYUuvnL1WWRn2RKmNlptm9r7w6vbgzERqb48n+aYC0JHn5eqQld4xR9vIIjHNxw3OFwaseg0GJG7tSzcWFy1vAqAv+xvH3OJPUEQUouI+xliibvdpsYtzjUSuUenGALjpkO+fMjw2m+9Ygk5TjtLKvLDvnosHf1GNk1DZUHU9oaq/KjIPRTSdPYbIm2s+DMUV1fGIt6W8YZXDhqLxWV5nD2vgL/sOxWxOLaIuyBMNSLuZ0jb6UGKcx0srcgft6xu35Ah7pGDoJZodo0zS/WJN0+Sn53Fh9YZNdyWVY3+eXvNhaFXxNT+bqgsiBob6B70MxzSVBYYkXvvUICjbsOaiRX3ktyRypDW7FSrzvt4XLW8ip1HusO2j4i7IEw9Iu5nSGvPEDUlOTRU5ScRuRuiWhQxoBr2sseJ3BtburmgtiS8mn1DVQEner30e+OzdBpb3LgctrhSA8uq8qPGBqzCZVbkDiOLSsdmyzizjPoybo8vPDs1mcgd4Orl8whpeGyvUUxUbBlBmHpE3M+Q1p4haopzaags4HjPIENjeOdW5B6bCglj10rv7PfR3OGJWmLNSm1MlDHT2NLNusWlcUu/NZgevDU20NFv3C1UmJE7EJ7oFJvnDlBm1pex3leZZOS+srqQ+UUutr3dAUjkLgjTgYj7GaC1prVnkJqSnJGouHP06D3suefE2zJjTWTacdhY0Gp9hLhb+euxdwunB/28faqP9XWlxLK4LM+sMWO8pyMqcjfF/bhRhj/WlrG2dQ/4wxF/spG7UorLz6oIPy/ITn3tekEQohFxPwPcA368gVDYloGxM2b6hoZx2m1RqxDlOrPIcdjDWSiJaGxxk+e0szLCQ19YmmtkzMR83o7D3WgNG5bEL6TsCNeYMSP3Pityz6Y0z0mOw07b6SEcdhU16GthFQ+zIvdkPXeIvqDlZUv9GEGYakTczwArU6amJDccFY+WwQIj5X5jVyGKLcoVS2NLNxfUlZJlHzlddptKmDEz4rcXxe4GgKURYwPt/V5Kch1kZ9lRSoWj99i6MhZWTr4VuSdTS8fCYTP67nLYoo5DEISpQb5lZ4CV/VFTmjOSRz5K7jlEl/uNZKziYYn8douGqvw4z72xpZu1i0vIzkocHS+LGBvo6IvOeBkR98SiXZrnpGfQEPeyPGecpz8WWXbjYpHnFL9dEKYDEfczwIrcq4sNUVxaNXruORjZMgU58eI+VuRu+e2JxH1ZVQFtp4fCk4Msv31DXXxbiwZzbKC5w0N7v4+KCN/cypgpS+C3G/3MJhjSNLV7ot6XDFk2Q9ytbB9BEKYWEfczoLXHyHG38taXjZMxY0Tu8ZFrWV72qJ57Ir/dIjZjZiy/3cKatdrU0U+nOTvVworcy/ITi7uVHvn2qf4J+e1A2IpxOeRfThCmA/mmnQFWjrtFwzgZM7Hlfi3Kxqgv09jSzbra0oQ+dWwxsMYWN9lZo/vtMJIxc+BUv1FXJkHknihTJnK7xzecdKaMhUTugjC9iLhHsO9EL10TKL/b2jMUtmRgJCoeLWOmb2g4oedemufENxxiMCbiH8tvB1hkZsw89eYpHt51nO1vd4zpt4ORMVNXnseOw90Mh3TiyH0ccYeJZcrAiLhnT8CnFwRh8sg3LYKb/nsX//tPbyXVdiTHPTe8bXFZHnlOe3iyTiyRi2NHMlKUK9p3/8NrxozOS5aWJ9yf3aZYvbCYZ97u4KuPvM4R9yBXnFU5bt8bqgrCM1EjI/C6ijwKsrPCOfSxlEUMtCZTNCwS684jxymRuyBMB5K6EMHpIT8vHXKjtU6YChhJZI67hcNu42MX1XL3c4f4Ykc/SyMKd4VruSeI3MvDE5l8LCrLDbe/+7lDbKgvjVpcI5b7Pnlh+G7DphTzi8aPqBsq8/mT6QBFzjItdDnY/a9X4bAnPvbIyD2Zcr+RWPt0jXFXIQhC6pDI3SQU0ngDIU71ecPFs8YiMsc9kk+/o54ch507n4leMDrR7FSL0pha6QAPvHKMjn4fX9i0bMx+uBx2akpyqSnJZUFxzrgXJSAqMo+NwJ1ZtlH34cyyUWAOCE84cg/nuYu4C8J0IOJu4h0e8bsbW9zjtg/nuEdE7mBEtx/bWMsfXj8RlYMeruWeMFsmugSBNxDkrmcPsb6uNGpx6VQRWed9oimNVl+TrStjETQHi7MlW0YQpgX5pplEDmYmI+5tVo57jLgDfPoddeQ47Px4W1N4W/8YkXu4vozpuVtR+xevHDtqnyxWxow1O3UilJmzUismMDsVwBcw/r4SuQvC9CDibmLlpmfZFI0t3eG0xFO9Xr708F56B6NL6x7vGaQoxzHKjNNsPraxlsdfO0GLmRZplftN1D7XmYXLYeNXjUf58C8a+dHTB6csaoeRjJnKCfrmYNyZlE5wdiqANxACIEfEXRCmBRF3kyEzsrywrjTKd79960F+t6eNrW+1R7V/o60vyt6I5cPrF6G1se4pjJT7LUqQLQPwsY21zC9y4QuEOGd+If/87nPO+JjG4qZL6vjIxsUTft8H19TwqXfUTfh9Q+HIXf7lBGE6SCpbRil1DXAHYAd+obX+fszri4B7gWKzzde01k+kuK9TimXLXHFWJS8dctPY4sZuUzyyuxUwrJoPrq0BDIvlzbZebrl8yaj7qynJIc9pD/vuiRbHjmSqxTyW/++CRZN63zUr503qfcMh404oV2rLCMK0MO43TSllB34KXAW0AjuVUo9rrfdHNPsG8LDW+i6l1HLgCaB2Cvo7ZVhLz62sLqI8P5vGFjevtZ7GZlOsqSmi8fCID7/raA/BkB51chEYNcyXVhWEJzQlWhw7k/jUO+ro7Pfy8YtqZ7orgpARJHOPfCHQrLVu0Vr7gQeBzTFtNGAVPykCTqSui9OD17QNcp121teX8uzBTn6zq5UbLljIe1cv4Hj3EG2njUHUxhY3DrtizaKSMfe5rHKkkFifNxBXyz2TKHQ5+N61q8iTVZgEYVpIRmmqgeMRz1vNbZF8C/iIUqoVI2r/XEp6N41YtkyO086G+jJODwawKcU/Xr6U9WaVxR1mFk1jSzera4rHnW3ZUJVPl8dHz4DfKBqWoJa7IAjCVJCMuCdSo9gKVzcA/621rgHeDdyvlIrbt1LqZqXULqXUrs7Ozon3dgoJi7vDzkbTbrn+woXMK3Jx9rwCinIcNLa48fiGebOtd0xLxqIhYim8Pm/iujKCIAhTQTL3yK3AwojnNcTbLjcB1wBorV9WSrmAciCqyIrWeguwBWDdunXxJRBnkEhbZmFpLr/8xAVcWGusQ2qzKdbXldLY0s2uI93j+u0WVkneg+399A0FEtZyFwRBmAqSidx3Ag1KqTqllBO4Hng8ps0xYBOAUuocwAXMrtB8HCJtGTCyZiL94Q31ZRzrHuR3e9oMv31x8bj7rC4eyZjp8yau5S4IgjAVjCvuWuth4LPAU8BbGFkx+5RS31ZKvc9s9mXg00qp14AHgI/rRMXJZzGWuI9W2MqK1P/w+glW1xQnldIXmTFjeO4SuQuCMD0kFUqaOetPxGz7t4jf9wMXp7Zr04s3EMTlsGGzJR7wtHz33qFAUpaMRUNlPs8e6ESp0XPcBUEQUk1m5uUlYNA/PGY0bvnukHg909FYZmbMuD2+hLXcBUEQpgIRd5NBf3DcuifXrJxHRUF2Un67RYNZ0z2kJXIXBGH6kFDSxBsIjpu3fu2aGq5dUzOh/TZE1J8Rz10QhOlCIneTQX+Q3ClYAs7KmIHEtdwFQRCmAhF3k2RsmcmglGKpme8utowgCNOFiLtJMrbMZLFmqsqAqiAI04WIu8lU2TIwMlNVIndBEKYLCSVNhvxBchxT8+d47+oFnDg9RF153pTsXxAEIRYRd5OhQJAc59TcyCwozuHfN6+ckn0LgiAkQmwZk/EmMQmCIMwlRNyBUEjjDYRk8WZBENIGEXfAOxxdEVIQBGGuI+LOSEXIqcqWEQRBmG5E3DEyZQCxZQRBSBtE3DEyZUBsGUEQ0gcRd8SWEQQh/RBxZ8SWcYktIwhCmpCR4t7aM8izB0bW7h4KDANInrsgCGlDRor7ludb+If7d2Mt8yq2jCAI6UZGivux7kF8wyH6fUbELtkygiCkGxkp7q09QwB0e/yAZMsIgpB+ZJy4a61p7RkEwD1giLvYMoIgpBsZJ+7uAT/eQAiAblPcw9kyWSLugiCkBxkn7pYlA+D2+ADDlnE5bNhsaqa6JQiCkFIyUNwHw7+P2DJS7lcQhPQiA8XdiNwddhVhy0i5X0EQ0ouMC1dbewYpynFQmJMVYcsMS6aMIAhpRVKRu1LqGqXUAaVUs1Lqawle/5FSaq/5OKiUOp36rqaG1p4hakpyKM3LjsqWkUwZQRDSiXEjd6WUHfgpcBXQCuxUSj2utd5vtdFa3xbR/nPA+VPQ15TQ2jPEkoo8hoOaU31ewMiWkboygiCkE8lE7hcCzVrrFq21H3gQ2DxG+xuAB1LRuVRj5bjXlORSmucc8dwDErkLgpBeJCPu1cDxiOet5rY4lFKLgTpg25l3LfV0mznuNSU5lOY7cXv8aK0Z9AdlQFUQhLQimQHVRMnfepS21wOPaK2DCXek1M3AzQCLFi1KqoOpxMqUqSnJZTio8QdDeHzDDPmDMqAqCEJakUzk3gosjHheA5wYpe31jGHJaK23aK3Xaa3XVVRUJN/LFDEi7jmU5jkBI5oXW0YQhHQjGXHfCTQopeqUUk4MAX88tpFS6iygBHg5tV1MHdYEpmrTlgHo8vgZ9A+LLSMIQloxrrhrrYeBzwJPAW8BD2ut9ymlvq2Uel9E0xuAB7VVJH0W0tozZOS4uxyUmZG72+PDGwiRIzNUBUFII5JSNK31E8ATMdv+Leb5t1LXranByJTJAaAsPxuAE6cNq0ZsGUEQ0omMKj9gTWACwpH7cdOHF1tGEIR0ImPE3chxH6KmJBcwFsPOddrDPrxkywiCkE5kjLhbWTHVxTnhbWX5TtrElhEEIQ3JGHGPTIO0KM3L5ni32DKCIKQfGSPuxyPSIC3K8pz0DgUAsWUEQUgvMkbcXz12GmeWjSUV+eFt1kQmQBbrEAQhrcgYcW9scbN2UUlU9cey/BFxF1tGEIR0IiPEvXcwwP6TfayvL43aXhYVuYu4C4KQPmSEuO880o3WsKG+LGp7aV52+Hep5y4IQjqREeLe2OLGmWXjvIXFUdslchcEIV3JDHE/7GbNouK46Fw8d0EQ0pW0F/feoQD7TvTFWTIwki2TnWXDZktUtl4QBGFukvbivvNwYr8doMz03MWSEQQh3Uh7cR/Nbwdj4lKu0y457oIgpB1pr2o7Dncn9NstSvOcZGel/TVOEIQMI61Vrc8bYN+J3oSWjEVZnlMid0EQ0o60VrUjXQOENCyfXzhqm79ZOQ9fIDSNvRIEQZh60lrcB/1BAPKyRz/MWy5fOl3dEQRBmDbS2pYZChjiLhUfBUHINNJb3M3IXVIdBUHINDJC3GX2qSAImUZai/ug2DKCIGQoaS3uQ/5hQBbiEAQh80hzcTdSHMWWEQQh00hrcR8MDOPMsmGXomCCIGQYaS3uQ/6gZMoIgpCRzDlxP+oe4I+vn0iq7ZA/KJaMIAgZSVLirpS6Ril1QCnVrJT62iht/k4ptV8ptU8p9T+p7eYIT755is/+z6v0eQPjth0MBCVTRhCEjGTcNBKllB34KXAV0ArsVEo9rrXeH9GmAfg6cLHWukcpVTlVHa4pyQWgrWeIwvmOMduKLSMIQqaSTOR+IdCstW7RWvuBB4HNMW0+DfxUa90DoLXuSG03R6gpyQGgtWdo3LZiywiCkKkkI+7VwPGI563mtkiWAcuUUi8qpRqVUtekqoOxjIj74LhtDVtGctwFQcg8klG+RHmEOsF+GoDLgRrgBaXUSq316agdKXUzcDPAokWLJtxZMBbXyHHYk4rcvf4g8wtdk/ocQRCEuUwykXsrsDDieQ0Qm67SCjymtQ5orQ8DBzDEPgqt9Rat9Tqt9bqKiopJdVgpRU1JTpKR+7AMqAqCkJEkI+47gQalVJ1SyglcDzwe0+ZR4AoApVQ5hk3TksqORmKIe5Keu4i7IAgZyLjirrUeBj4LPAW8BTystd6nlPq2Uup9ZrOnALdSaj+wHfhfWmv3VHW6piQ3aXHPlQFVQRAykKRGG7XWTwBPxGz7t4jfNfAl8zHl1JTk0DsUoM8boNCVOB1Say157oIgZCxzboYqROe6j4ZvOITWUu5XEITMZI6K+/i57rJQhyAImcycFPdqU9zbxsiYsRbqkBmqgiBkInNS3MvynLgctnEid2OhDpnEJAhCJjInxd3IdR87Y0YW6hAEIZOZk+IOZq776TFsmfASeyLugiBkHnNb3MeK3GVxbEEQMpg5LO65nB4M0D9KXXfJlhEEIZOZw+JuZsycThy9D/olW0YQhMxlDou7MZGptTuxuIstIwhCJjOHxX3suu5iywiCkMnMWXEfL9d9xJaRPHdBEDKPOSvu4+W6DwWCOLNs2G2J1hoRBEFIb+asuINhzRzrHs2WGRZLRhCEjGVOi/u6xSXsP9nHm229ca8N+oOSKSMIQsYyp8X9oxtrKXBlceczTXGvDUktd0EQMpg5Le5FOQ5uuqSOv+xvZ9+J6Oh9yB8UW0YQhIxlTos7wCcurksYvQ8FxJYRBCFzmfPiXpTj4JMX1/HUvujofdAflHK/giBkLHNe3AE+eUkdBdlZ/PLFI+Fthi2TFocnCIIwYdJC/YpyHKyoLuRI10B4m2HLSOQuCEJmkhbiDlBZ4KKj3xd+PugP4pIBVUEQMpS0Efeqwmza+7xorQFjEpMMqAqCkKmkjbhXFrjwDYfo8w6jtZZsGUEQMpr0EffCbAA6+rz4hkOENGLLCIKQsaSNuFcVugBo7/OFy/1K5C4IQqaSNuJeWWBG7v3e8EIdIu6CIGQqSYm7UuoapdQBpVSzUuprCV7/uFKqUym113x8KvVdHZvKiMjdquUutowgCJnKuIngSik78FPgKqAV2KmUelxrvT+m6UNa689OQR+TIj87izynnY5+L96ALNQhCEJmk0zkfiHQrLVu0Vr7gQeBzVPbrclRVeiiIyJyF1tGEIRMJRlxrwaORzxvNbfF8kGl1OtKqUeUUgtT0rsJUlGQTUe/l0H/MCC2jCAImUsy4p5onTod8/wPQK3WehWwFbg34Y6UulkptUsptauzs3NiPU2CqkIX7X2+CFtGxF0QhMwkGXFvBSIj8RrgRGQDrbVba23N/f85sDbRjrTWW7TW67TW6yoqKibT3zGpLDBmqYotIwhCppOMuO8EGpRSdUopJ3A98HhkA6XU/Iin7wPeSl0Xk6eq0JileqrPCyCLdQiCkLGMm06itR5WSn0WeAqwA/dorfcppb4N7NJaPw58Xin1PmAY6AY+PoV9HhVrlurRLmPRbFlmTxCETCWpXEGt9RPAEzHb/i3i968DX09t1yZOZYGR637EbZT+lchdEIRMJW1mqIJRGRLgqHsQp91Glj2tDk8QBCFp0kr9rFmqp/q8YskIgpDRpJW4W7NUQTJlBEHIbNJK3GEkehe/XRCETCb9xN2sDim2jCAImUz6ibtE7oIgCOkn7lUSuQuCIKSfuFsTmWRAVRCETCbtxL1KbBlBEIT0E3drlmqOLNQhCEIGk37iLraMIAhC+om72DKCIAhJFg6bS+RnZ/G1d53NO8+unOmuCIIgzBhpJ+4An7lsyUx3QRAEYUZJO1tGEARBEHEXBEFIS0TcBUEQ0hARd0EQhDRExF0QBCENEXEXBEFIQ0TcBUEQ0hARd0EQhDREaa1n5oOV6gSOTvLt5UBXCrszG0n3Y5Tjm/uk+zHO1uNbrLWuGK/RjIn7maCU2qW1XjfT/ZhK0v0Y5fjmPul+jHP9+MSWEQRBSENE3AVBENKQuSruW2a6A9NAuh+jHN/cJ92PcU4f35z03AVBEISxmauRuyAIgjAGc07clVLXKKUOKKWalVJfm+n+TAal1EKl1Hal1FtKqX1KqS+Y20uVUk8rpZrMnyXmdqWUutM85teVUmtm9giSQyllV0q9qpT6o/m8Tim1wzy+h5RSTnN7tvm82Xy9dib7nSxKqWKl1CNKqbfNc7kxnc6hUuo28//zTaXUA0op11w/h0qpe5RSHUqpNyO2TficKaVuNNs3KaVunIljGY85Je5KKTvwU+BdwHLgBqXU8pnt1aQYBr6stT4H2ADcah7H14BntNYNwDPmczCOt8F83AzcNf1dnhRfAN6KeP5/gB+Zx9cD3GRuvwno0VovBX5ktpsL3AE8qbU+G1iNcaxpcQ6VUtXA54F1WuuVgB24nrl/Dv8buCZm24TOmVKqFPgmsB64EPimdUGYVWit58wD2Ag8FfH868DXZ7pfKTiux4CrgAPAfHPbfOCA+fv/A26IaB9uN1sfQA3GF+WdwB8BhTEhJCv2XAJPARvN37PMdmqmj2Gc4ysEDsf2M13OIVANHAdKzXPyR+Bv0uEcArXAm5M9Z8ANwP+L2B7VbrY85lTkzsg/nEWruW3OYt6+ng/sAKq01icBzJ/WQrBz8bhvB74KhMznZcBprfWw+TzyGMLHZ77ea7afzdQDncAvTevpF0qpPNLkHGqt24AfAMeAkxjnZDfpdQ4tJnrO5sS5nGvirhJsm7PpPkqpfOC3wBe11n1jNU2wbdYet1LqPUCH1np35OYETXUSr81WsoA1wF1a6/OBAUZu5xMxp47RtBk2A3XAAiAPw6aIZS6fw/EY7ZjmxLHONXFvBRZGPK8BTsxQX84IpZQDQ9h/rbX+nbm5XSk133x9PtBhbp9rx30x8D6l1BHgQQxr5nagWCllLcoeeQzh4zNfLwK6p7PDk6AVaNVa7zCfP4Ih9ulyDq8EDmutO7XWAeB3wEWk1zm0mOg5mxPncq6J+06gwRyxd2IM8Dw+w32aMEopBfwX8JbW+ocRLz0OWCPvN2J48db2j5mj9xuAXus2cjaitf661rpGa12LcY62aa0/DGwHrjObxR6fddzXme1nXSQUidb6FHBcKXWWuWkTsJ80OYcYdswGpVSu+f9qHV/anMMIJnrOngKuVkqVmHc4V5vbZhczbfpPYjDk3cBB4BDwLzPdn0kewyUYt3GvA3vNx7sxPMpngCbzZ6nZXmFkCR0C3sDIYJjx40jyWC8H/mj+Xg+8AjQDvwGyze0u83mz+Xr9TPc7yWM7D9hlnsdHgZJ0OofAvwNvA28C9wPZc/0cAg9gjCEEMCLwmyZzzoBPmsfaDHxipo8r0UNmqAqCIKQhc82WEQRBEJJAxF0QBCENEXEXBEFIQ0TcBUEQ0hARd0EQhDRExF0QBCENEXEXBEFIQ0TcBUEQ0pD/H8XxIbt+srKcAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "tplt( lt, 'acc')\n", + "tplt( lt, 'val_acc')\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAD8CAYAAACMwORRAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzsvXeYZGWZ9/95KoeuzmF6pnumJ2dmgAFEJIoIroKRoKL4srCroqK7pp8JUVfFffV1XQzoIu4uCiziMhINJMmTc2By94TOsaq74vP745xTXdVd1V09U10d5v5cV1/Tp85Tp54zNfOtu77P/dy30lojCIIgTC9sEz0BQRAEIf+IuAuCIExDRNwFQRCmISLugiAI0xARd0EQhGmIiLsgCMI0RMRdEARhGiLiLgiCMA0RcRcEQZiGOCbqhSsrK3VDQ8NEvbwgCMKUZMOGDW1a66rRxk2YuDc0NLB+/fqJenlBEIQpiVLqcC7jxJYRBEGYhoi4C4IgTENE3AVBEKYhOXnuSqkrgR8DduBXWuvvDTn/I+BS89AHVGutS/M5UUEQpgfRaJSmpiYGBgYmeiqTGo/HQ11dHU6n86SeP6q4K6XswN3A24AmYJ1Saq3Weqc1Rmv92ZTxnwLOPKnZCIIw7WlqaiIQCNDQ0IBSaqKnMynRWtPe3k5TUxNz5849qWvkYsucC+zTWh/QWkeAB4BrRhh/A/C7k5qNIAjTnoGBASoqKkTYR0ApRUVFxSl9u8lF3GcBjSnHTeZjmSY0B5gLPJPl/K1KqfVKqfWtra1jnasgCNMEEfbROdW/o1zEPdMrZOvNdz3wsNY6numk1voerfUarfWaqqpRc/BPiv5InN+9foSuUGRcri8IgjAVyGVBtQmoTzmuA45lGXs98MlTnVSuaK0ZiCbwuuwAvHqgnS/9fiuH2kM0doT4wpVLCjUVQRCmEEVFRfT19U30NMaVXMR9HbBQKTUXOIoh4B8cOkgptRgoA17J6wyz0B2KcvNv1rH+cCdVATczSzxsaepmToWP+VV+ntvTKuIuCMJpy6i2jNY6BtwGPA3sAh7SWu9QSt2plLo6ZegNwANa62yWTd5o7wtzwy9fZWtTN/9w8TwuWVSF22nnHy+ez1OfuYj3nV3HzuM9tPRIqpUgCNnRWvP5z3+eFStWsHLlSh588EEAjh8/zkUXXcTq1atZsWIFf/vb34jH49x0003JsT/60Y8mePYjk1Oeu9b6CeCJIY99fcjxHfmbVnZOdA/woV+9ytGufn750TVcvGi4d3/JomruemoPz+1t5do19RmuIgjCZOCbf9zBzmM9eb3mspnFfONdy3Ma+8gjj7B582a2bNlCW1sb55xzDhdddBG//e1vefvb385XvvIV4vE4oVCIzZs3c/ToUbZv3w5AV1dXXuedb6bcDtX/Wd/Iie4BfvOxczMKO8DS2gA1xW6e3yMZOYIgZOfFF1/khhtuwG63U1NTw8UXX8y6des455xz+PWvf80dd9zBtm3bCAQCzJs3jwMHDvCpT32Kp556iuLi4ome/ohMWFXIk+WTly7gXatm0lDpzzpGKcXFi6p4cvsJYvEEDvuU+wwThNOCXCPs8SKbi3zRRRfxwgsv8Pjjj3PjjTfy+c9/no985CNs2bKFp59+mrvvvpuHHnqIe++9t8Azzp0pp3o2mxpR2C0uXVxN70CMjUcm91cnQRAmjosuuogHH3yQeDxOa2srL7zwAueeey6HDx+murqaW265hZtvvpmNGzfS1tZGIpHgfe97H9/61rfYuHHjRE9/RKZc5J4rFyysxGFTPLenhXPnlmcdd8faHZw3t5yrVtYWcHaCIEwG3vOe9/DKK6+watUqlFLcddddzJgxg9/85jf84Ac/wOl0UlRUxH/+539y9OhRPvaxj5FIJAD47ne/O8GzHxlVgOSWjKxZs0aPd7OOa3/xCn0DMZ74zIUZz8fiCRZ/7Snec+Ys/vUDq8Z1LoIgGOzatYulS5dO9DSmBJn+rpRSG7TWa0Z77pSzZcbCJYurRkyJPN49QDyhCYZjBZ6ZIAjC+DKtxf2C+ZUArD/cmfH84fYQAMFIxmoJgiAIU5ZpLe5LagM47YotTZkXVY90mOIukbsgCNOMaS3uboedpbXFbG3sznhexF0QhOnKtBZ3gDPqSth2tJtEYvjCcaMl7hERd0EQphfTXtxX1ZXSF45xoG14BbjByF08d0EQphfTX9zrjVauWzJYM4fbgwD0iS0jCMI0Y9qL+/yqInwuO1uHLKp2h6L0DMQo9jiIxBJE44kJmqEgCJOZoqKirOcOHTrEihUrCjib3Jn24m63KVbMKmFLU3rkblkyS2qN4j8hsWYEQZhGTNvyA6msqivhN68cJhJL4HIYn2eWuC+rLeb1gx30RWKU+JwTOU1BOP148ktwYlt+rzljJVz1vaynv/jFLzJnzhw+8YlPAHDHHXeglOKFF16gs7OTaDTKt7/9ba655poxvezAwAAf//jHWb9+PQ6Hgx/+8Idceuml7Nixg4997GNEIhESiQS///3vmTlzJtdeey1NTU3E43G+9rWvcd11153SbQ/ltBD3M+pKicQOsre5lxWzSoBBcV9aGwAkHVIQTheuv/56br/99qS4P/TQQzz11FN89rOfpbi4mLa2Nt70pjdx9dVXj6lJ9d133w3Atm3b2L17N1dccQV79+7l5z//OZ/5zGf40Ic+RCQSIR6P88QTTzBz5kwef/xxALq7M6drnwqnhbivqjMXVZu6UsQ9SIXfRXXAA4i4C8KEMEKEPV6ceeaZtLS0cOzYMVpbWykrK6O2tpbPfvazvPDCC9hsNo4ePUpzczMzZszI+bovvvgin/rUpwBYsmQJc+bMYe/evZx//vl85zvfoampife+970sXLiQlStX8s///M988Ytf5J3vfCcXXpi5/tWpMO09d4D6ci9lPmfaZqYjHSFmV/jwmc21JR1SEE4f3v/+9/Pwww/z4IMPcv3113P//ffT2trKhg0b2Lx5MzU1NQwMjK1NZ7YijB/84AdZu3YtXq+Xt7/97TzzzDMsWrSIDRs2sHLlSr785S9z55135uO20jgtInelFCvrStPKEBzpCHHW7DL8buOvQNIhBeH04frrr+eWW26hra2N559/noceeojq6mqcTifPPvsshw8fHvM1L7roIu6//34uu+wy9u7dy5EjR1i8eDEHDhxg3rx5fPrTn+bAgQNs3bqVJUuWUF5ezoc//GGKioq477778n6POUXuSqkrlVJ7lFL7lFJfyjLmWqXUTqXUDqXUb/M7zVPnrNml7G3u5XB7kGg8wbGuAWaX+ygyxT0ku1QF4bRh+fLl9Pb2MmvWLGpra/nQhz7E+vXrWbNmDffffz9LliwZ8zU/8YlPEI/HWblyJddddx333XcfbrebBx98kBUrVrB69Wp2797NRz7yEbZt28a5557L6tWr+c53vsNXv/rVvN/jqPXclVJ2YC/wNqAJWAfcoLXemTJmIfAQcJnWulMpVa21bhnpuoWo555KS88AF/3gWd6xopbbL1/ERT94lrvefwaXLq7mnO/8hW9ds5wbz28o2HwE4XRF6rnnznjXcz8X2Ke1PqC1jgAPAENzhG4B7tZadwKMJuwTQXWxh4+e38AfNh/lz7uaAZhd7sPvNjz3PvHcBUGYRuTiuc8CGlOOm4DzhoxZBKCUegmwA3dorZ/KywzzyD9ePJ/7XzvCvz69B4A5FT68Tjs2JdkygiBkZ9u2bdx4441pj7ndbl577bUJmtHo5CLumRI9h3o5DmAhcAlQB/xNKbVCa522518pdStwK8Ds2bPHPNlTpczv4pYL5/Gjv+zFZbdRE/CglMLvckhlSEEoIFrrMeWQTzQrV65k8+bNBX3NU22Bmost0wTUpxzXAccyjHlUax3VWh8E9mCIfRpa63u01mu01muqqqpOds6nxM0XzqXc76Ku3IvNZvzj8rntErkLQoHweDy0t7efsnhNZ7TWtLe34/F4TvoauUTu64CFSqm5wFHgeuCDQ8b8L3ADcJ9SqhLDpjlw0rMaR4rcDv79hjMJxwYLhfndDslzF4QCUVdXR1NTE62trRM9lUmNx+Ohrq7upJ8/qrhrrWNKqduApzH89Hu11juUUncC67XWa81zVyildgJx4PNa6/aTntU48+YFlWnHRW6xZQShUDidTubOnTvR05j25LSJSWv9BPDEkMe+nvK7Bj5n/kw5/C6H2DKCIEwrTovyA6Phd9slFVIQhGmFiDuW5y6RuyAI0wcRdwxxl/IDgiBMJ0TcMRZUpXCYIAjTCRF3wOeyMxBNEJM+qoIgTBNE3GGwMmRUFlUFQZgeiLhDsqa7LKoKgjBdEHGHlG5MIu6CIEwPRNwZtGUk110QhOmCiDuDtkxIIndBEKYJIu6kRu4i7oIgTA9E3Enx3GUjkyAI0wQRdwYjdyn7KwjCdEHEHUmFFARh+iHiDniddtQ49lG98487+eGf947LtQVBEDKRUz336Y7NpvA5x6/s7wtvtFJZ5BqXawuCIGRCIneT8awM2RWK0B+VujWCIBQOEXeT8aoMqbWmKxRlICKLtYIgFA4RdxOf2z4unntfOEYsoemXomSCIBQQEXcTv8tBcByi665QFEDEXRCEgpKTuCulrlRK7VFK7VNKfSnD+ZuUUq1Kqc3mz9/nf6rjS9E4tdrrDEUAxJYRBKGgjJoto5SyA3cDbwOagHVKqbVa651Dhj6otb5tHOZYEMarj2qnRO6CIEwAuUTu5wL7tNYHtNYR4AHgmvGdVuHxu8cnFbLLjNxjCU1UOj0JglAgchH3WUBjynGT+dhQ3qeU2qqUelgpVZ+X2RUQv2t8UiEtzx0kehcEoXDkIu4qw2N6yPEfgQat9RnAX4DfZLyQUrcqpdYrpda3traObabjjJHnHieRGHprp4bluYP47oIgFI5cxL0JSI3E64BjqQO01u1a67B5+Evg7EwX0lrfo7Veo7VeU1VVdTLzHTeSxcPyHL1L5C4IwkSQi7ivAxYqpeYqpVzA9cDa1AFKqdqUw6uBXfmbYmHwuY2yv6E8R9ddqZG77FIVBKFAjJoto7WOKaVuA54G7MC9WusdSqk7gfVa67XAp5VSVwMxoAO4aRznPC6kNuyoyeN1OyVyFwRhAsipcJjW+gngiSGPfT3l9y8DX87v1AqL3zU+ZX+7QhHcDhvhWIJ+8dwFQSgQskPVxLJl8l1fpjMUpbbEA8CARO6CIBQIEXeTomST7Px77jNMcRdbRhCEQiHibuIfhybZsXiCnoEYM0u8AGLLCIJQMETcTWpLPBR7HDy8oQmt85Pr3t1vLKbWlkrkLghCYRFxN/G5HHzubYt4cV8bf9rZPOr4g21BegeiI46xMmVqzchdPHdBEAqFiHsKH37THBbVFPGtx3aOKMSxeIKr//1Ffvbc/hGv191v5LhbC6piywiCUChE3FNw2G3c8a7lNHX288sXDmQdZ0TtMRo7+0e8XmfQiNwri9w47UpsGUEQCoaI+xDevKCSq1bM4O7n9tHeF844ZufxHgDaejOft7DqypT5XHicdhF3QRAKhoh7Bj52wVwGogm2NHVlPL/zmCHurVnE38KqK1Pqd+J12sVzFwShYIi4Z2DxjAAAu0/0ZjxvRe6to0TuXf0R7DZFwO3A67KL5y4IQsEQcc9AidfJzBIPezKIu9Y6Gbl390cJx7ILdmcoSqnXiVIKr9gygiAUEBH3LCyaEcgo7i29YdqDEZaY0X1bX2TYGIuuUIRSnxPA9NylKqQgCIVBxD0Li2cE2N/aN6w1nmXJXLzIqEc/kjXTGYxS5nMBGJ57Fltm45FOPvDzl8WTFwQhb4i4Z2HJjADRuOZgWzDtccuSucgU95EyZrr6o5Ra4u7KbstsOtLFukOdHO0aObVSEAQhV0Tcs7C4phgYvqi683gP9eVe5lb6gZEzZlJtmZE8936z+1NnMLvFIwiCMBZE3LMwv9qP3abYO0Tcdx3rYVltMRVFRkQ+oi0TilCW6rlnsWWs7k8dIu6CIOQJEfcsuB125lb60yL3YDjGwfYgy2pLcDvslPqcWcV9IBpnIJpIsWVsWT11EXdBEPKNiPsILJ4RYE9zT/J494letIZlMw3LpqrInVXcrQ1MqQuq2W0ZU9xDk1vco/GEWEeCMEUQcR+BJTUBGjv6kzXerUyZpLgH3Fk9d6v0wFDPPVM54eAU8dx//dJB3vaj5/NWElkQhPFDxH0EFpm57G80G9bMzmM9yQ1OYIp7lsh9qLi7nXa0hnBseK67Fbm3T3JxP9gWoq0vkvdWhIIg5J+cxF0pdaVSao9Sap9S6ksjjHu/Ukorpdbkb4oTh7VRac+JXg61BXl86zHOnlOGUgowqj22ZYncuzPYMpC5prvluU/2yL3L/MCyql0KgjB5GVXclVJ24G7gKmAZcINSalmGcQHg08Br+Z7kRFFf5sPnsrPpSBf/8F8bsNkU37x6efJ8VcBNKBInmCGS7Rwq7i5D3DP57qHo1FhQteY32dcGBEHILXI/F9intT6gtY4ADwDXZBj3LeAuYCCP85tQbDbFwpoAD65v5I2WXn5yw5nUl/uS56uK3EDmdMhMnjtkbthh5blPtGiGIrFhO3JTse6pU8RdECY9uYj7LKAx5bjJfCyJUupMoF5r/Vge5zYpWFxTBMAXrlzChQur0s5VBUxxN62ZrlCE//unPXzmgU387vUjeJw2PKaoW39mityDYcuWmVi7470/fZkf/Xlv1vPWt5HJbh8JggCOHMaoDI8l0yWUUjbgR8BNo15IqVuBWwFmz56d2wwnmI9dMJdFNQFufsvcYeeS4m5G7g+tb+Qnz+yjvtxLXZmXD543eI+WLZPJc7cEvy8cIxyL43bY834foxGLJ9jb3Mv8qqKM57XWSVG3RF4QhMlLLuLeBNSnHNcBx1KOA8AK4DlzoXEGsFYpdbXWen3qhbTW9wD3AKxZs2ZK5NMtrS1maW1xxnNDxf3VAx3Mq/LzzD9dMmzsoC0z3PYIRWIUuR30hWN0BqPMKCm8uLf1RUho6MnS9LsvHCOWMN6ysUbu/ZE497xwgI9fMh+XQxK0BKEQ5PI/bR2wUCk1VynlAq4H1lontdbdWutKrXWD1roBeBUYJuzTkTKfC5syxD2e0Kw72MGb5lVkHOvNYsskEpqBaIK6Mi8A7cGRG4CMFyd6jKWSnoHMaY6pltFYPfefPb+fH/1lL797/cjJT1AQhDExqrhrrWPAbcDTwC7gIa31DqXUnUqpq8d7gpMZu01RYaZD7jzWQ284xnlzyzOO9bqMv+qhtowl9pa4T5TvfqLbEPfe/syvn7rYO1Zxj5mLtL1ZvhUIgpB/crFl0Fo/ATwx5LGvZxl7yalPa+pglSB49UA7QNbIPduCqpXjXldmZOFMVMZMS+8okbs5L5fdNuYPIMuKiWTYwCUIwvggBugpYpUgePVAO/Mq/dQUezKOy7aJKWSmQc4qNSL3jlGabo8XVuSezXO3fPaGSt+YI3dL3MMjpFkKgpBfRNxPkaqAmxPdA7x+qIPzskTtkLKJKZI5cq8t9aAUdExQJorluUdiiYwZPdYGpnmVRWMXd7tE7oJQaETcT5GqgJuW3jC9AzHeNC+z3w7gcYxsyxS5HZR6nROWQ97cM7j3rDeDNdMVimK3KeZU+OgMRsdUPMxtRe4i7oJQMETcTxFrlypk99vB2O3qdtiGibsVyftcDsr9rgkrQXCiewCzZE5Ga6YjFKHU66Tc7yISTyQ/lHJBPHdBKDwi7qdIpZnrPpLfbuF1DW+SbXnuPpd9QsW9uSfMbLO0Qk+GjJnOYIQyv4syv1ErZyzztAqtibgLQuEQcT9FrMj9vBEsGYtMDTusCNjrslPmmxhx7wvH6AvHWFhtVMHMlDHTGYpQ7nMlC6F1jWFtIG5ufhJxF4TCIeJ+isyp8GG3KS5ZXD3qWEPc0wUulLRl7FQUuSYkFdLKlFlk1tHJlI/eGYxS6nNS7jcKoY1lnklxl2wZQSgYOeW5C9mZWerl1S+/NVmKYCQyNclO2jJOB2U+F53BCFrrpJVRCKzF1IWmuPf0D4/cO0IRzpxdmuwJO5aFX4ncBaHwSOSeB3IRdjA99ywLql7Tc48ldNaNRADtfWFu/c/1yU1H+cCK3AdtmfTIXWtNV8jw3MstcT+ZyF3EXRAKhoh7AfE4h2fLhKJxnHaFy2Gj3D96VPzsnlb+tLOZ53a35m1eVo77vCo/dpsaZsv0hWNE45oyn5NirxOlTi5yl01MglA4RNwLiDeDLdMfiSd3r1qZKCP1Ut3S2AXAjmPdeZtXc88AxR4HPpeDgMcxzJaxyg2U+VzYbcrIxx/LgqqWyF0QCo2IewHxOIfbMsFwDJ/LWPooz8HP3pwU9568zetE9wAzzKbfxR7nMFvGsmCsbxZlvrEt/A7aMrnnxguCcGqIuBeQjKmQ0Tg+szSBJZ7ZhHMgGmfX8R5sCnYd7yGRyE9J/OaegWSOfrHXMWyHakeyZaAp7n5Xsll2Lki2jCAUHhH3AuJ1DRf3/kgcn3uIuGeJ3Hcc6yGW0Fy2pJpgJM6h9mDyXCyeIHySkfGJngFmmOIecDuHbWKyvkmkRe5jqAyZ9NyjIu6CUChE3AtIJs89FInhcxq2jM9lx+2wZbVlLL/dat+Xas187dHtXPeLV8c8p1g8QWtveNCW8Toy2DLGsWUblfmcJxW5Z+ofKwjC+CDiXkA8TjvhWCLNTumPxJMVI5VSI5Yg2NzYxYxiD29ZUIXTrpLiHokleGzLcbY0ddEXzp5GmYn2oNFeL2nLeJzDbJnOYASbgoDHXBsYY5kEa0E1U7VJQRDGBxH3ApJskp1inwQjg547MGIJgi1NXayuL8XlsLGwOpDMmHntYDu94Rhaw84xLrRaOe5JW8aTwZYJRYyWgjZjY1Wpz0U4lhj2LSQbVuQejWui4rsLQkEQcS8gg02yB0UxNXIHspYg6AxGONweYlV9KQDLZxaz81gPWmv+tKM5WTN929GxpUhaOe6ptkwwEk+2xgNT3E2/HRhzCYLUbxNizQhCYRBxLyCZmmSHIjH8rsEqEGU+F0faQ2n11QE2Nxl+++oUcW8PRjjRM8CfdzZz2ZJqaordbB+juFuvU11s7LIt9hjCnSrIHcEIZT5n8ngsJQja+8I8uulo8jjXaF8QhFNDxL2AeFzDW+2Fhtgy151TTygS5+/+7UVe3t+WfHxLYxdKwcq6EgCWzzL+fOD1Rk70DHDF8hpWzioZe+TePYDDpqj0G+Ju+eqpG5m6QtFkNUgYzJrJpQTBvz+7j/5onNsuXQCIuAtCochJ3JVSVyql9iil9imlvpTh/D8qpbYppTYrpV5USi3L/1SnPoO2jGF5xBOacCyRZstcsKCStbddQInXwYd/9RrfeHQ7O451s7mxi0XVAYrchvgurS1GKbj3xYPYbYrLllSzYlYJ+1v7Rl1UbewI8e3HdvLlR7bx1I4TVAfcST+92GtE6KkZMx3BSFLQgWQUP9ou1caOEP/96mGuXVPPilnFAGNq8iEIwskzqrgrpezA3cBVwDLghgzi/Vut9Uqt9WrgLuCHeZ/pNGCoLWP9mRq5AyysCbD2trfwgbPr+d3rjfzdv73Ic3taWVVfkhxT5HbQUOGnNxzjvLnllPpcrJxVMuqiqtaa2x/czH0vH+LPO08wEInzjpW1yfOWLWOJu9aazlAkacUAySh+NFvmh3/ei00pbr98EV7TehLPXRAKQy4lf88F9mmtDwAopR4ArgF2WgO01qlq4gfys3VymuF1GZ+llsCFzAjb6xr+NvjdDr7//jP40lVL+OPWY/x5ZzPvObMubcyymcUcbAtyxbIaAFaaVs22o92cOzdz85DHtx1nw+FOvvfelVx/7uxh54faMsFInGhcJxdRAUrM6H6kdMgdx7r5381HufWiecwo8XCkI2Tcu0TuglAQchH3WUBjynETcN7QQUqpTwKfA1zAZXmZ3TTDMyRbJtmow2nP+pwyv4uPnN/AR85vGHbuzPpSntx2nLctnwFAdbFnxEXVgWic7z25m6W1xXxgTX3GMSVDbBkrOk/13B12GyXe7BuZYvEEX35kG+U+F5+42PDarW8nErkLQmHIxXPP1DViWGSutb5baz0f+CLw1YwXUupWpdR6pdT61tb8laydKli2jLWgaom7351d3Efiw2+aw2OfupBZpd7kYyMtqt770kGaOvv52t8txW7L3AzEsmWsjUwdGcQdzI1MWTz3X790iK1N3dxx9XJKTH/e+mCzmpMIgjC+5BK5NwGpYV4dcGyE8Q8AP8t0Qmt9D3APwJo1a04768brGuq5Z7dlcsHjtLNsZnHaYytmlfDX3S0EwzH87sHrtveF+emz+7l8aQ1vXlCZ9ZpFSVvGEG4rl70sxZYBKPU5eWV/Ozf+x2v0hWOcPbuMWy+eRygc51//tIfLl9bwzjMGvXxfhkwhQRDGj1wi93XAQqXUXKWUC7geWJs6QCm1MOXw74A38jfF6cPQTUyp/VPzRXJR9Xj6ouqL+9roC8f49FsXjPh8u01R5B6sL3Og1ShOVl/uSxt3xbIZlHgd9IVjOO02fv3yIS78/rPceO9ruBw2vvOeFWmtAr3JyF3EXRAKwagho9Y6ppS6DXgasAP3aq13KKXuBNZrrdcCtymlLgeiQCfw0fGc9FTFMyRbJhg2W+yN4LmPleSialM35zQMLqruOdGLw6ZYMqM421OTFHsGy/5uP9pNTbGb6oAnbczHL5nPxy+Znzw+1Bbkp8/t4w+bjvLd956RrFVjMfRbiyAI40tOfoDW+gngiSGPfT3l98/keV7TErfDht2mknnoli2Tz8i9uthDdcA9zHffc6KX+VVFuByjf1kr9g7Wl9l+tJsVM0tGeQY0VPq56/2r+Jf3rMRhH/4abocNpSRbRhAKhexQLSBKKWaVemk00wIHF1RPznPPxhl1JWw1yxVY7GnuZdGMQE7PD3gMWyYUibG/tY8Vs0YXd4tMwg7GvfsylDwWBGF8EHEvMA2V/mSTDUvovHmM3AHOqCvlQFsw2ei6LxyjqbOfxTVFOT3fKvu763gvCc2YxH0kvC4gWkOYAAAgAElEQVQ7IbFlBKEg5DdkFEZlboWPDYc60FrnlOd+MpxRZyyqbj/aw/nzK9jb3AvA4hz8djBsmb0tvcmSwlbpgJOm8XVY+2nuS8Rx7gvAA7XgKgJ3Ebj84AoYf2Y8tn78xo/KnMIpCEI6Iu4FZm6ln2AkTmtfmFAkjstuy2plnCxn1BmVI7c2dRnifsIU95ox2DL9MbYf7abC70rWej9p7C6omE+4swlfrB86DkC4DyLmTzzXxh/KFPmizOI/6nFgyPMDYHeO/rKCMAURcS8wDZV+AA61hQhFYnm3ZMDYYFRX5mWruai6+0QvPpedujLvKM80MGyZKFubulk+qyQtpfGkmLkarr+fO+9+iRKvk//8P+emn49FTKEPDv4Z7k057jM/DLIc950Yfj5X7K4s3x6K5NuFMKURcS8wc5PiHhxW7jefpC6q7m3uZWFNIFn5cTSKvQ4S2liEfevS6rzNyeu0MZBpQdXhAkc5+DLXwxkziQREQ+liHwmmf1vIeJzygdLXMjm+XbiKjL8fQRgjIu4FZlapF4dNcbA9SP+4inspT2w7QUcwwp4TYxPpgFmCQGtySoPMFZ/LQUvvwOgDTxWbzRBNdxFQk59ryrcLYYoh4l5gHHYbsyt8HGwNEo7F8Z1k6YHROMNs6vHcnhbag5GcF1NhsL4M5C9TBozNWlM2FXI8vl3E+nP4NjHB3y4WXwWzzsrPPQsFRcR9AphbYaRDlnid4+K5w6Ao/8/6JiD3xVQwbBkwdqrm6tPngtc1hcU939hsg1F0lm8Xu473cOcfd3LvTefk9u8kr98uzMdK6kTcpygi7hNAQ6Wfl/a3saC6iMoi97i8RrHHybwqP68caAdgcY4bmGDQllmRj8XUFHwuu5QfGAPrDnXwyoF2DrYFhxWIy8h4fLuQ1gxTFtnENAE0VPoZiCY41BYaN88dYJWZElnud1FZlPuiXLFZGXJlHi0ZMGwZKRyWO91mSeWRmqKMKzYb2Mbv36cwvoi4TwDzzIyZvnBs3Dx3GBTnxTWBMUXgM0u9nD2njCvMJiD5wuuyE44lSCQkGsyFbrO+T3swPMEzEaYiYstMAFauO+S3aNhQrJ6rY7FkwKhe+fuPvznv80ntIZvvejrTkaS4901Q5C5MaSRynwBqiz24zeqM47WgCrB8ZgkrZhVz6ZL85aqfCtJqb2xY4j5htowwpZHwaQKw2RQNFX72NPfic47fW+Bx2nnsUxeO2/XHytAessLIDNoyIu7C2JHIfYJoqDQ6G42nLTPZsNYXTrfIffeJHt71kxdp7R2bdz5oy4jnLowdEfcJwvLdx9OWmWx4XcY/t9MtY2b9oU62He3msa0jtR4eTo/YMsIpIOI+QcytMMTd7z6NxN20oE43W6alxyi58PjW42N6nnjuwqkg4j5BzKsyGmcUuU+fkrODfVRj4/o6z+xu5ld/OzCurzEWmnsMW2X94U5OdOdWWycWTxA0PwTbxJYRTgIR9wlizZwy7nrfGVy0qHKip1IwktkykURertfeF+bDv3qN4939aY//z/omfvbc/ry8Rj5o7h2g3G9sIntye27Re4/ZoLzc76JnIEY0np+/M+H0ISdxV0pdqZTao5Tap5T6Uobzn1NK7VRKbVVK/VUpNSf/U51e2GyKa8+px+04nWwZ415DkfxE7q8e6ODFfW1sPJzeL7YjGKE9GMm7IGqtT6qqZXNPmLNml7FkRiBna8ayZKwS0Z1izQhjZFRxV0rZgbuBq4BlwA1KqWVDhm0C1mitzwAeBu7K90SFqY9lywzkKVvGah/YGUoXvq7Q+Gz+ee1gB+f9y195zazXkystPQPUFLt5x8rarNbMe3/6Er97/UjyeKi4t8lGJmGM5BK5nwvs01of0FpHgAeAa1IHaK2f1VqHzMNXgbr8TlOYDgxG7nESCc1nHtjEM7ubT/p6SXEfEtVaYp/v2vG7jvegNfzihXQ//xuPbuer/7st43MisQTtwQg1xR7esbIWGG7NhGNxNh7pYsPhzuRjlrjPqzLEXRZVhbGSi7jPAhpTjpvMx7JxM/DkqUxKmJ6klh94ZncLj24+xv9uGlt6YCqWuHekRO5a62TkPta88tE40mHEL8/sbmFfi/HaGw538ptXDvPs7taMz2k1F0Nrit0sqC7KaM1kmm9S3M3IXerLCGMlF3HPVHEqY+UnpdSHgTXAD7Kcv1UptV4ptb61NfN/BmH6YrMp3A4b/ZE49750EDCi4ZMhHItzqN0QW0scwfhWEDG99nyLe2NHiJklRumIX/3tIFprvv34TgBO9AwQz1AQrdlMg6w2m4xfvKiKTY1dacXTLPuoJYO4z60sShuTTxIJzS9fOMB5//IX3jA/KIXpQy7i3gTUpxzXAcPCLaXU5cBXgKu11hn/V2mt79Far9Far6mqqjqZ+QpTHJ/LzqYjXby8v53KIhf7W/tOyoM/2BZMimmqZZH6e8sYxD0WT/DdJ3fxvSd3Zx1zpCPE8lklvPesOh7ZdJT7Xj7EpiNdnDW7lHgi82KrleNeEzDEvabYQzyhk+KdOufUDyNrA9Psch92m8q7LdMRjHDzb9bxnSd20dwT5vFtY8vBFyY/uYj7OmChUmquUsoFXA+sTR2glDoT+AWGsLfkf5rCdMHncvD6oQ68Tjv/dMViEnrQXhkLe04Yz6kpdqctqKZG8blG7l2hCB/99ev84vkDWXeRaq050hFidrmPv79wLpFYgm/+cSfLaov55KULADjW1T/seVaOe02x0ZSlwqyrn2qzWLZSRzCc/MDq6Y/idtjwuuyU+Zx5rS8Tiyd4z09f4qV97XzrmuWsri/l+b3yTXq6Maq4a61jwG3A08Au4CGt9Q6l1J1KqavNYT8AioD/UUptVkqtzXI54TTH4zT+yb3v7FmcP68CgJ3Hxm7NvNHch92mWDOnPE3cU3/PZUH1aFc/7777JdYd7GRxTSAtok6ltS/MQDTB7HIf86uKuNxsOP7Vv1tKfblRJ+hY1/DXa+4ZwGlXlPkMUbc6b6Vmv3SYvnxCD4p+d3+UEq+xwa3C76Yjj577sa4BDreH+No7l3Lj+Q1cvKiKLY1ddIVk0XY6kVOeu9b6Ca31Iq31fK31d8zHvq61Xmv+frnWukZrvdr8uXrkKwqnK1bxsJvePJfZ5T78LntOvvv3ntzN3c/uSx7vbe6locJHTbGHzuCgIFviXlviSYvco/EE7/3pSzy3J/2L5e9eO0JjZz+/veU8rlwxg96BWEbvvNFcTJ1tCvk33rWcH1+/mjcvqKS2xLBcskXu1QEPNpuxdJWM3FPFPeXbRkvPoLgXm+Je7nfl1XNv7DTuZb65S/rixVUkNLy4ry1vryFMPLJDVSgoS2YEuHrVTBZUF2GzKZbUFrPr+Oi2zOPbjvGz5/Yn69K80dLHopoAZT4nfeEYkZixiGqlRS6sCaR57ofbQ2w80sUzu9PFfW9zL3Mr/axpKKfUZ4hpT4bo3cqUsaL0+nIf16w2ksYCHicBjyOjuLf0DlBdPNgnt8Jv/J5my6T8bmXXpEbu5UWuvHrujUPuZVVdKSVeJ8/vEWtmOiHiLhSUH3xgFT++fnXyeFltsZk/nr31ntaa1t4wfeEYf9p5goFonEPtQRbWBCgzt/VblkKnGQUvrC6itTecvO6htiAw6NVb7GvpY2G1EcFaYtqVSdzbDeGuK/NmnOOsUi9Hs9gy1mIqGFG4UkNsmWAkaVdZ3zZSxb3S78qr597YGcJuU8lvHHab4i0LK3nhjdYR3wdhaiHiLhSc1H6uS2uL6Q3HaOocHvVa9IVjDESNyPz3G4+yr6UPrY3esFbNFmtRsisUodjjoLbEQziWoDdslDo41G6I+xstfcnrGumUwaS4W5F7Jt/9SEeIGcWeZMORocws9Wa1ZWpSIne7TVHuc6UVA+sIRlhYbbRCzCTu5X433f3RvJVTaOrsp7bEg8M++N//4oVVNPeE2SMpkdMGEXdhQllaa4jajhEWVS3Bm1Ph48U3WpPe8KKaoqQgW7ZFZyhKud9FVcAQVMvDtsS9IxhJCuvBtiAJDfOHRu4ZFhYbO0NJvz0TM0s9HBtSwGwgGqe7P5rMcbeoKHKlNeDoCEaYWeoh4HZkFnfTp89XfZnGjhD1Zen3cqFZwO4FyZqZNoi4CxPKkhnF2NTIm5ks7/yWC+eR0PDz5/fjtCsaKv3JyN1aVO0MRSj1uagys1IssTzUZlgRAHtNa2afGcVbUXOJ17hWpsi9sSOU9KgzUVvipSsUTSuK1pJMgxwi7n53+oJqMEq5301VwE1rr5EO2TsQSy6oVvqt9Mk8iXtnP/Xl6fZSbYmXxTUBSYmcRoi4CxOK12WnodLPzhHE3RLo8+aWc+bsUrpCUeZW+nHabZSbKYadSc89QpnPmVzEtNIhD7YFOW9uOTCYV/9Gcx82NVi/xYqUh4r7QDTOiZ6BESP3WaWGWKamQzabr51qy4AZuZtCnUhoOkMRyv3OpLj3meV+S1KyZSD3XarP7G5m0VeeZPFXn2TVN//EJ3+7Me1eWnvDwyJ3gIsWVbLuYCcD0TjReIJP/nYju0+c3A5iYeIRcRcmHGtRNRuWuFcF3LzvLKMm3aIaI9ou9aVbFp3BKGU+F1VFnuRzB6JxjnX3c05DOSVeJ3vNiH1fSx+zy31JH33QlkkX96Nd/WgNsysyL6aC4blDejqkVXpgaOReWeROWkM9A1HiCT0YufeFkx8uyTz3DBufRuLPO1twOWzcdEEDi2sCPLntOEFz7aGpMz1TJpVz51YQiSfYfrSbE90DPL71uNg0UxgRd2HCWVpbTFNn/4gbiJx2RYnXybvOmEnA7eDM2WUAuBw2ityOtAXVUp+LYq8Dl8NGa2+Yps4QWhvlcxfVFCVtmTdaellgWjLWtXwu+7B5HBmS456JmaXDc92Tu1MDQ8XdRe9AjHAsnlwrsCL3lp6B4eJupk/mmg656UgnZ80p48tXLeUfLjasLOubUWOHMb+htgzAqvoSADY3diX73HYEM78nwuRHxF2YcJbNLAbgxTcyb6Jp6QlTVeRGKUWJz8nfvngpHz1/sB9Mmd9JVyhKOBYnGIlT7neilKKqyLA5DrYZ4txQ6WdRTYC9zb1E4wkOtgVZWFOU9lqlXuewyH1oXngmaoo92FS6uLf0DOB22Cj2OtLGVhQNivWguLupDngIRuLJzlLFHuN5JV4ndpvKyZbpC8fY09zLWbNLAVg5yxDsrU3dxr2YkXtdBlumOuBhVqnXFHcj0pcmIVMXEXdhwjm3oZz5VX5uf3ATD647Mux8a1+YqhRro9TnSkvjK/cZm3wsUbasmupiw+awctznVhji3jMQY93BDqJxnUyDtCj2OodH7u0hPE5bcpE2E067jeqAJy3XvblngJpiT1rqJ0CF6aG39UaS3nu5bzDDZ1+rYRuVmJlANpvKub7MlsYutCb5zaa62MOMYg/bmoxuVU2d/bgc2e9ldX0pmxu7kpvF8plfP97csXYHr46xkcp0RsRdmHD8bgePfOIC3jSvgi/+fhv/8sSutPOtveERhbXU56IzFEkuqlp1XKqK3LT0hDnYHqTU56TE50x69Y+ZVRAXptgyxrWcdPenC5pVMGyoSA9lZqknrZ/r0Bx3CytybwuGk5FxeVGKuJtrApYtA7nXl9loNvxYXV+afGxlXclg5N4Roq7MmyyHMJRV9SU0dfYn9x0M7XI1WYnEEtz38iGe3nFioqcyaRBxFyYFJV4nv77pHK5dU8c9LxzgoBltgynugeziXu43xd30h8vMiNeK3A+3B2moMDJiFpk2zNPbDRGYX+1Pu1ap15XRcx/Jb7cYupGpuXdgWI47GJ47GNkvqZF79Qjinmt9mY1HOllYXZT23FV1JRxoC9IzEKWxc3iOeyqr642I/xUzAp4qHaC6+q0F9akx30Ig4i5MGhx2GzecOxsYFLhYPEF7cGRxL/O56AxGk5uPrJIEVUUeOoIR3mjuS/YirShyU2Fu568r8yYLmVmUDPHctdaj5rhbzCr1cqx7INmIo6UnPGwx1ZoDQHufEbl7nXa8LnvyHve39OG0q2TnKjAi+6EWSXPPAJ97cDMvm5u6tNZsauzizNmlaeNW1hnH249209gxPMc9lRWzirHbFC/vN645VcS923zPOkKyAGwh4i5MKuaZlQr3m75zRzCC1owi7kbxMCv1MGnLWLtUe8PMqRgUZ8uaGeq3g2XLpDfSCEbiI0a7FjNLvcmeqX3hGH3hWEZbxu+y43HaaDcXVK089jKfC7tNEYzEKfE602ygBVVFHGwL8vH/3sDRrn7+9kYr7/jx33hk01H+vz9sSy4Qd4WinGX67RbWourL+9rp7o+OeC8+l4NFNYFkpk93f5RYnsoejCdWPSCJ3AdxjD5EEApHiddJZZGb/Wbkbu1OrR5J3E1xtKwcqyRB6nOsyB0Ma+aVA+0syCDuxV4n4ViCgWgcj9Oe3JQ0K0vBsFRSS//+1aw+aX2QpKKUosJv5Lq3p4i73aao8Lto6Q0nd6dafOLS+Tjtin9/dh/P7mkhHEuwqDrAxy+Zz7cf38XDG5pwmovMZ81JF/dyv4u6Mi9PmOsMo30LWV1fmrbvoDMUHfHDdTJgfduaKt80CoGIuzDpmF/l54Ap1FYJ3NE8d4ADbUG8TntyU1LqcyzPHWDRDCtyHy68qcXDPE47R00P3dqBOhLWRqb7Xj7EHzYd5QNn13HJ4sztJCuLDA/d2J3qSj5eXeympTec5pkDuB12brtsIe8+cxZ3PbWHYq+Dr7xjGR6njce2Huff/voGFyyoJOB2sKBq+IfWGXUlPLHNWGcY7VvI6voSfvf64HFnKDLpxT11h7JgILaMMOmYV1WUtGVaTXtg5GwZQwgPtAaTi6lAWh31VHE/b65Ru31NQ3qEC8N3qVrZLzNzEHfrA+APm46yqr6Ub717RdYMm4oiN+3BMO196eJu3Wexx5nxeXVlPv7thjP59rtX4nXZUUrx+bcv5nj3AL/f2MTq2aUZM2HOqBv04Ufy3GFwUdViPJpz5xvLcw9F4ifVk3c6IuIuTDrmV/npCkXpCEbGFLkf7epPWjQwuLOzzEyDtFhQHWDz169I+vuplA4pHnasqx+3w5b2oZGNUp8Tn8tOZZGbX3z47KzlgY25uWjrHR65W/c5NHIfiQsWVPLm+RVp+e1DOcP03QNux6jXXlBdhN81OPepEA13paSvToX5FgIRd2HSMT9lUbW1N0zA4xhRKK3iYTC4mApGOYFyv4uGSn+mp2VkaNnfY10DzCr1jprjDoaX/sNrV3P/35/HjJLhWTKpVBQZaZqhSDzdljGza8Yi7gBfuHIJLoeNi83SvUNZbop7XQ75+nabYmVdCT5T4KfCRqbUDCfx3Q3EcxcmHZa4HzDFfaTFVBjckWr8ni6K5zSUZfTWs18rvRvT0a7+nCwZiytXzMhpXGWRK9mr9VQjdzAWQXd88+3JRdWhlHidLK4JZFxEzsQXr1zCie4BPn7/ximRgZIq7p15rodz/2uH2XOilzuvWZHX6443OUXuSqkrlVJ7lFL7lFJfynD+IqXURqVUTCn1/vxPUzidmFXmxeWwsb81SEvvwKiLeVbxMEiP3AF+ceMa/vnti3N+bStLxeqjery7P5kFk08qU9YQ8iHuQFZht7jv/5zDnVcvz+laZ84u46qVtQQ8jikRCXf1R5K1eDrybMs8se04a7ccy+s1C8Go4q6UsgN3A1cBy4AblFLLhgw7AtwE/DbfExROP+w2xdwKfzJyr8qwEWgoZX6n+adrlJEjE3A7sCkjEozEErT0hscUueeKVcYX8ifuo1Fb4h3z30+5P7/NuceLrlCUueY3vkydtIYSDMdyXnht7OhPFqabSuQSuZ8L7NNaH9BaR4AHgGtSB2itD2mttwKTf7eDMCWYX+1nf2tw1LoyFpbvnsvC50jYbEZp4e7+KM09A2idWxrkWLEWeyFd3BfVBFhVXzpsl+lEYZV2mOx0haI0mBvVcvkwuvk36/jKH7aPOi4WTyTTYa2+AlOFXMR9FtCYctxkPiYI48a8yiIOtwcJRuI55ViX+gZ3eZ4qJV4nXf3RZJ2Y2tLxsGVSIveUOZd4nTz6yQtYmGHz00RQ7sutps1E0xWKUOF3U+J1jrpGoLVmx9Ee9rWM3gz8ePdAcm3E2rU7VchF3DMtreuTeTGl1K1KqfVKqfWtrdLhRcjO/Go/5v+pURdUYTD6HbqgejKU+IziYcfGkOM+VspSdqWOhwWTL6ZC5B6JJQhG4pT5nIaNNEp9ma5QlN5wLCexthq1ALT2DowwcvKRi7g3AfUpx3XASa0uaK3v0Vqv0VqvqarKvHNPEMCI3C1yidzL8hy5d4ciydIDM0vyL+5Ou41Sn5MynzNr+d3JgOW5a31S8VxBsPYkWH+fo0XulmC39oWTUfloY2GwFMZUIRdxXwcsVErNVUq5gOuBteM7LeF0x2paDbmKe3oz6VOh1PTcj3b1U+534XVlz7E/FSqL3Hn5MBpPyv0uwrFEsu3eZMRaQC3xuXJaALYEO57Qo/albewI4bAp7DaVLEw3VRhV3LXWMeA24GlgF/CQ1nqHUupOpdTVAEqpc5RSTcAHgF8opXaM56SF6U/A40zaMbmI+5qGclbVl6aVHDhZLM/9eNf4pEFazK30j2mD1URg2UeTOWPG2pNQ5nMa5Z9HsZFSo/Hm7pHF/UhHiFll3mTjl6lETpuYtNZPAE8MeezrKb+vw7BrBCFvzK8qMqom5hDdnj+/gkc/eUFeXrfU56SnP0pTZ/+4iu+Prls9btfOF9bffUcwklNN+4kg2V7R60qzkbLtxD3SniLuPQOspCTrtRvNRi3d/dFpacsIwoSwpqGMRTWBgnvSJV4nCW2UEB6PNEiLIrcjuflqslJuZvXke2NQPrFsmVKfkzLTRuofIYf9SEco+b42j7JIesRs1FIdcGcV97a+MLf9dvLt5BVxFyYtt1++KG/R+FiwsldiCT2utsxUIBm5T+J0SCtyLzEXVGFkG+lIR4iz5pSh1MjpjT0DUTpDUWaX+6gKeGjJ4rn/aUczj209zrN7Wk7hLvKPiLswabHbFC5H4f+JpqYmjkca5FTCitwnczpkV38Eu00RcDuSC9TZ6stEYgmOd/czt9JPZZE7q2CDYckAzC73UVPspj0YIZqhK9UGsyn5lsauU72VvCLiLghDSC1EdrqLe8DtwGlXwyLhREKz/Wj3pEiR7ApFKTXbElrZUtk+jI529ZPQg4I9UgZMY4exz2F2uS9ZrbOtb3ikv/GIIe6bm7qzXuv3G5qS4wqFiLsgDCF1I9R4eu5TAaUUZT5XsoWhxV1P7+GdP3mR/3718ATNbJCu/miyXn/ZKOJ+JCUan1HsGdGWsSL3+jJfMnNraMZMe1+Yg21BitwOdh3ryVh/Jp7QfOV/t/HTZ/eN8c5ODRF3QRiCZcs4bGrSt5crBFevmsmT20/wX6aQ/3VXMz9/fj9+l53vPbk7WXtlougKRZJ2TGp2TyZSxb262EPLCAuqRzpCFHsclPicyRTboZH+xiOGFXPtmnoi8QS7jw8vaXCkI8RANMHOYz3Dzo0nIu6CMARL3GuKPdgn8e7RQvGlq5bw1iXVfOPR7fzXK4f43ENbWD6zmEdvu4CEhq/8YduE2jOWLQNGyWabIpm58vzeVt7y/WdoN+2Uxo4QboeN6oCbmoCHtr7MPjoYojzbLEZm2TJDM2Y2HO7EaVd8+E2zAdjSNNx33202Gz/WPVDQjBoRd0EYgsdpx+2wnfaWjIXDbuMnHzyT5TNL+NqjO0hozU8/dBYLqgN84crFPLenlT9sOjph8+sKDdoydpui1OdKpm4+uukoTZ39PLb1OGDkuNeX+7DZFDVmNJ6t2qOV4w5GoTelhov7xiOdLJ9ZwtxKP1UBN5szLKruPjEYze86XrjoXcRdEDJQFXBP2k07E4HP5eA/blrDpYur+LcbzmSO2XD8o+c3cPacMu5Yu6OgwpVKd3802fsWMOvLRNFa88IbbQA8Yn74HEkR7JpiIxo/kWFRNZHQNHX2J/8NOOw2KvzutOJh0XiCLY1dnDW7DKUUq+pKM2bM7D7RQ4W5FrBTxF0QJpZ7blzDF67MvYPT6UB1wMOvP3Yuly6uTj5msyn+33Wr8bsdfOhXr7HHjFJ3HOvm5vvW8cDrR3K69pH20LBF21yIxBL0hWNpdfytXaq7jvfS1hdmaW0xWxq7ONDalybulo+eKR2yuXeASDyRHGvcvzttAXbnsR7CsQRnzzGakq+uL2F/a5CegfQ0zN0nejlvXjk1xW4Rd0GYaJbNLE5GdsLI1Jf7+N0tb8JpV3zwl6/y+f/Zwrt+8iJ/3d3Ctx/flfS7s6G15ubfrONDv3yVSGxs/X5SK0JaWPVlXnjDKCv+/fetxKbg3pcO0heODYvcM2XMWCUK6stSxL3YnbYAa+W3nzXHaKyyqt74c1tKSmQwHONIR4glM4pZVltc0EVVEXdBEE6Zhko/v7vlTdhtikc2HeUj5zfwyCfeTCgS4yfPjJwCuPFIJ2+09HGse4D/3Tw27767f7AipEWZz4jc//ZGK4trApxRV8oFCyp5aF0TQFLcy30unPb0ao/doShP7zjBf7x4MG0sQE3Ak5YKueFIJ7NKvdSaJaHPmGWIe6rvvre5F61h8YwAS2uL2dfSV7B2fZO7sIUgCFOGeVVFPPbpt9AfiSc9+evOqef+1w7zsQsako8N5XevN+J32akv9/Gz5/bzvrPqcs5SskoPpNoyZaYts+5gJx998xwA3r16Fn8z/XcrA8ZmU1QHBnPd32ju5Zq7XyIUieNx2rhy+Yy0dZfqYjdtZg14u02x8XAnaxrKk+dLfE7mVfrTfHdrMXXpjGKi8QSxhOaN5j5WzMperCxfSOQuCELeqA540kT89ssXYbcpfvD0HlWdB3MAAAoSSURBVMBYhOwLx5LnewaiPL71OFevnsmn37qQg21Bntx+fMTX+MvOZr7z+E601nSmVIS0KPc7iSU0kXiCCxcaTYGuXDEDr9Ooy5/Nanl4QxORWILf3nIeW75xBT+/8ey0D5nqgJuENjYubTrSyfHuAc6bOyjuYFgzmxu7kqmhe0704nfZqSvzsqy2GCjcoqqIuyAI40ZNsYe/f8s8Htt6nMv+9TmWfu0pVn3zTzy8wbBI1m4+Rn80zvXnzObK5TOYX+Xn7mf3Z82b7wpF+PzDW/jl3w6y/nBnWkVIC2tDk9th41xTfP1uB+9YWcucCl9a85WagMdshK55bOtx3rKwkjfPr8TtGN6gpSol1/3uZ/dT6nPynjPT20mfP7+Clt4wL+9vB4zUx0UzjMqmcyr8+Fz2gvnuIu6CIIwr/3DxPC5bUs2imgC3XjSPcxrK+MLDW1i75RgPrmtkaW0xZ9SVYLMpPn7JAnYd7+HpHc0Zr/Wvf9pDd3+UgMfBL57fn1xQLRmSLQNw3rwKPM5Bkf72u1fw8D++Oe16Rn2ZMJsauzja1c87z5iZ9T6svPgX3mjlL7ua+dib5+IfUrL5mtUzqS3x8P/+shetNXuae1kyw4jY7TbFkhmBgkXu4rkLgjCuBDxO7r3pnORxfyTOR3/9Orc/sImEhm9evTzZWOOa1TP5+fP7+cwDm7jr/WdwzerByHhbUzf3v3aEj57fQInXyY//+gZ2swVeIEVkrfoyFy2sTJuH12Uf1jKxuthDd3+Uhzc04bLbuGJ5Tdb7qDaza+5+Zh9+lz3p56fidtj5xCXz+dqjO3hk41G6QlGW1gaS55fNLObRzcdGbCaSLyRyFwShoHhddu696RxW15dS5Hbw7hQBd9ptPPQP57OqvpTPPLCZu57azZ4TvRxpD/G1R7dT4Xfz2bct4iPnz8HjtPH0juZkRUiLlbNKuP3yhXzg7PpR52KlQz6ysYmLF1dR7HFmHVtVZETuwUicD58/J616aCrXnlPPjGIPd/zR6Da6uCZF3GtL6B2I0dQ5/vV4JHIXBKHgFLkdPHDr+XSFImmWChi2yn/ffB5ff3Q7P31uPz99bn/y3P/9wKpk7Z8PnF3Pf716eNjznXYbt1++KKd5WFbLQDTBO8+oHXGsy2GjzOckFInz92+Zl3Wc22HnE5fO5+uPGuJu2TJgRO5gLKqO9w5oEXdBECYEl8OWtDoynfvue1fy/rPraO4JE4rEKPO5eOvSwd2xf3/hXO5/7XByAfVksCJ3j9PG5UuzWzIWVyybQV2Zd9Rqodeuqeenz+5HqfT1gMU1AeZV+bMWK8snOYm7UupK4MeAHfiV1vp7Q867gf8Ezgbageu01ofyO1VBEE4nlFJpeeRDmVPh57ZLF1DszW6ljIYl7pctqR62OJqJ77//jJyu63HauftDZ9IzEEt73Ouy88w/XTLmeZ4Mo96NUsoO3A28DWgC1iml1mqtd6YMuxno1FovUEpdD3wfuG48JiwIgmDxuStOrf5PscfB5962aMSF1JPl7DnZP5gKQS4LqucC+7TWB7TWEeAB4JohY64BfmP+/jDwVjXeS8GCIAiniFKKT791YZovPl3IRdxnAY0px03mYxnHaK1jQDdQMfRCSqlblVLrlVLrW1tbT27GgiAIwqjkIu6ZIvCh28dyGYPW+h6t9Rqt9Zqqqqpc5icIgiCcBLmIexOQmjBaBxzLNkYp5QBKgI58TFAQBEEYO7mI+zpgoVJqrlLKBVwPrB0yZi3wUfP39wPP6IlsqigIgnCaM2q2jNY6ppS6DXgaIxXyXq31DqXUncB6rfVa4D+A/1JK7cOI2K8fz0kLgiAII5NTnrvW+gngiSGPfT3l9wHgA/mdmiAIgnCySG0ZQRCEaYiIuyAIwjRETdS6p1KqFTh8kk+vBNryOJ3Jhtzf1Ebub2oz2e9vjtZ61FzyCRP3U0EptV5rvWai5zFeyP1NbeT+pjbT5f7ElhEEQZiGiLgLgiBMQ6aquN8z0RMYZ+T+pjZyf1ObaXF/U9JzFwRBEEZmqkbugiAIwghMOXFXSl2plNqjlNqnlPrSRM9nrCil6pVSzyqldimldiilPmM+Xq6U+rNS6g3zzzLzcaWU+jfzfrcqpc6a2DvIDaWUXSm1SSn1mHk8Vyn1mnl/D5p1ilBKuc3jfeb5homcdy4opUqVUg8rpXab7+P50+n9U0p91vy3uV0p9TullGcqv39KqXuVUi1Kqe0pj435/VJKfdQc/4ZS6qOZXmsyMaXEPaUr1FXAMuAGpdSyiZ3VmIkB/6S1Xgq8CfikeQ9fAv6qtV4I/NU8BuNeF5o/twI/K/yUT4rPALtSjr8P/Mi8v06M7l2Q0sUL+JE5brLzY+AprfUSYBXGfU6L908pNQv4NLBGa70Co56U1V1tqr5/9wFXDnlsTO+XUqoc+AZwHkYDo29YHwiTFq31lPkBzgeeTjn+MvDliZ7XKd7ToxgtDPcAteZjtcAe8/dfADekjE+Om6w/GGWh/wpcBjyGUe+/DXAMfR8xCtKdb/7uMMepib6HEe6tGDg4dI7T5f1jsPFOufl+PAa8faq/f0ADsP1k3y/gBuAXKY+njZuMP1Mqcie3rlBTBvMr7JnAa0CN1vo4gPmn1eZ9Kt7z/wO+AFgt3iuALm106YL0e8ipi9ckYh7QCvzatJ1+pZTyM03eP631UeBfgSPAcYz3YwPT5/2zGOv7NaXeR5hitgw5dnyaCiilioDfA7drrXtGGprhsUl7z0qpdwItWusNqQ9nGKpzODcZcQBnAT/TWp8JBBn8Sp+JKXV/ptVwDTAXmAn4MayKoUzV9280st3PlLvPqSbuuXSFmvQopZwYwn6/1voR8+FmpVSteb4WaDEfn2r3fAFwtVLqEEYz9cswIvlSs0sXpN/DVOvi1QQ0aa1fM48fxhD76fL+XQ4c1Fq3aq2jwCPAm5k+75/FWN+vqfY+Tjlxz6Ur1KRGKaUwmpvs+v/bu0OXCIIojuPfSSe2MxvkitV4wSAIF+5fEBT1rxCT/4D/hMFguWAxqF0MoiKie8liNRueYd7CgsF1y9w+fh9Y7nZ2wzze8riZ2WPM7LRxqbmb1R55Lr5u3/VV/DHwVQ8nF5GZHZnZqpmtkfNzY2Y7wC15ly74HV9vdvEys0/gI6W07k3bwAtB8keejhmnlJb9Wa3jC5G/hv/m6wqYpJSGPrqZeNviKj3p32FhZAq8AXPguHR/OvR/kzycewQe/JiS5ymvgXf/XPH7E/kNoTnwRH6LoXgcLWPdAi79+wi4AyrgAhh4+5KfV359VLrfLeLaAO49hzNgGCl/wAnwCjwDZ8Cgz/kDzsnrB9/kX+CHXfIFHHicFbBfOq6/Dv1DVUQkoL5Ny4iISAsq7iIiAam4i4gEpOIuIhKQiruISEAq7iIiAam4i4gEpOIuIhLQD3b3Ib4szY25AAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "tplt( lt, 'loss')\n", + "tplt( lt, 'val_loss')\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX0AAAD8CAYAAACb4nSYAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzsvXl8nGW5//++Z59ksu9N0yZNN7oXugBlFS0gCCJFUWQVARFEBb/qOUfZ9JwjoiKLoOcny0E8BUEQFQTEQqEsbUr3Qrc0bbZm3yaZfe7fH888T2aSmWTSZmtyv1+vvDrzzD3P3JO2n+d6Pvd1X5eQUqJQKBSKyYFprCegUCgUitFDib5CoVBMIpToKxQKxSRCib5CoVBMIpToKxQKxSRCib5CoVBMIpToKxQKxSRCib5CoVBMIpToKxQKxSTCMtYT6Etubq4sLS0d62koFArFccXmzZubpZR5g40bd6JfWlpKRUXFWE9DoVAojiuEEIeSGafsHYVCoZhEKNFXKBSKSYQSfYVCoZhEjDtPX6E4XggEAtTU1OD1esd6KopJhMPhYOrUqVit1qN6vxJ9heIoqampIS0tjdLSUoQQYz0dxSRASklLSws1NTWUlZUd1TmSsneEEOcJIfYIIfYLIX4Q5/XvCiF2CyG2CyHeFEJMj3otJITYGvl5+ahmqVCMQ7xeLzk5OUrwFaOGEIKcnJxjurscNNIXQpiBR4DPADXAJiHEy1LK3VHDtgDLpJQ9QohvAPcBX4q85pFSLjnqGSoU4xgl+IrR5lj/zSUT6a8A9kspK6WUfmAtcHH0ACnlOillT+TpB8DUY5rVCBAOS/6ytZamLt9YT0WhUCjGjGREvxiojnpeEzmWiK8Br0Y9dwghKoQQHwghPh/vDUKIGyJjKpqampKY0tAIhMJ87/nt3LZ2K795a/+wn1+hUCiOF5JZyI13LxG3m7oQ4qvAMuDMqMPTpJR1QogZwL+EEDuklAdiTibl74DfASxbtuyYO7XXtXv444eHmZnvYm5RGve/tod/ftyIy25hR03HsZ5eoVAcB+i7+3Nzc8d6KuOKZES/BiiJej4VqOs7SAjxaeDfgTOllIaHIqWsi/xZKYR4C1gKHOj7/uHCHwzzjWc+Ylt1e9Tc4N7PL6Cyyc3ajdUEQ2EsZrVFQaE43ggGg1gso5d0GAqFMJvNCZ8nYrTnORSSmdUmYJYQogyoBS4HvhI9QAixFPgtcJ6UsjHqeBbQI6X0CSFygVVoi7wjxv2v72FbdTsPfXkpswvS2FHbwbTsFFaUZfPilhqe2FDFgaZu5hSmjeQ0FJOMu/+6i911ncN6znlT0rnzc/MHHFNVVcV5553HaaedxgcffMDixYu59tprufPOO2lsbOSZZ55h/vz53HrrrezYsYNgMMhdd93FxRdfTFVVFVdeeSXd3d0APPzww5x66qm89dZb3HXXXeTm5rJz505OOukk/vCHPyRcQPzBD37Ayy+/jMViYfXq1dx///0cPHiQr3zlKwSDQc477zx+9atf4Xa7eeutt7j//vv529/+BsAtt9zCsmXLuOaaa7jnnnv461//isfj4dRTT+W3v/0tQgjOOussTj31VDZs2MBFF13EVVddxU033cThw4cBeOCBB1i1ahUtLS18+ctfpqmpiRUrViDlwKbBH/7wBx588EH8fj8rV67kN7/5DWazGZfLxXe/+11ee+01fvGLX/DVr36V6667jtdff51bbrmFuXPnctNNN9HT00N5eTmPP/44WVlZ/eZ5++23D/WvfFQYNNyVUgaBW4DXgI+B56SUu4QQ9wghLooM+zngAv7UJzXzBKBCCLENWAf8d5+sn2Fl3Z5Gfre+kq+ePI3PLZ7CnMI01pw0lRVl2QAsLM4AYEetsngUE4f9+/dz2223sX37dj755BP++Mc/8u6773L//ffzn//5n/z0pz/lU5/6FJs2bWLdunV873vfo7u7m/z8fN544w0++ugjnn32Wb71rW8Z59yyZQsPPPAAu3fvprKykg0bNsT97NbWVl588UV27drF9u3b+Y//+A8AbrvtNr7xjW+wadMmCgsLk/oet9xyC5s2bWLnzp14PB7jwgDQ3t7O22+/ze23385tt93Gd77zHTZt2sQLL7zA9ddfD8Ddd9/NaaedxpYtW7jooouMi0I8Pv74Y5599lk2bNjA1q1bMZvNPPPMMwB0d3ezYMECPvzwQ0477TRA2xD17rvvcvnll3PVVVfxs5/9jO3bt7Nw4ULuvvvuuPMcryR1/yGlfAV4pc+xH0c9/nSC970HLDyWCSZLQ6eX25/bxtzCNP7jgnlxx5Tluki1mdlR086ak8ZdgpHiOGawiHwkKSsrY+FC7b/Z/PnzOeeccxBCsHDhQqqqqqipqeHll1/m/vvvB7T9BYcPH2bKlCnccssthujt3bvXOOeKFSuYOlX7P7JkyRKqqqoMAYwmPT0dh8PB9ddfzwUXXMCFF14IwIYNG3jhhRcAuPLKK/n+978/6PdYt24d9913Hz09PbS2tjJ//nw+97nPAfClL33JGPfPf/6T3bt7Y8fOzk66urpYv349f/7znwG44IILyMrKSvhZb775Jps3b2b58uUAeDwe8vPzATCbzVx66aUx4/XP7+jooL29nTPP1JYtr776ai677LJ+48Yz49N0OgpSbGbOmZvPjWfOwGGN77mZTYL5xRlsV5G+YgJht9uNxyaTyXhuMpkIBoOYzWZeeOEF5syZE/O+u+66i4KCArZt20Y4HMbhcMQ9p9lsJhgMxv1si8XCxo0befPNN1m7di0PP/ww//rXv4D4+eQWi4VwOGw81zcZeb1ebr75ZioqKigpKeGuu+6K2YCUmppqPA6Hw7z//vs4nc5+5082h11KydVXX81//dd/9XvN4XD08+2jP38gkh03lkyY1cw0h5WfX7aYmfkDe/ULizPYXddJMBQecJxCMVE499xzeeihhwyPe8uWLYAWtRYVFWEymXj66acJhUJDPrfb7aajo4PPfvazPPDAA2zduhWAVatWsXbtWgDDNgGYPn06u3fvxufz0dHRwZtvvgn0in9ubi5ut5vnn38+4WeuXr2ahx9+2Hiuf+YZZ5xhfNarr75KW1tbwnOcc845PP/88zQ2akuQra2tHDo0eDn6jIwMsrKyeOeddwB4+umnjaj/eGHCiH6yLJqagS8YZl+je6ynolCMCj/60Y8IBAIsWrSIBQsW8KMf/QiAm2++maeeeoqTTz6ZvXv3HlWU2tXVxYUXXsiiRYs488wz+dWvfgXAr3/9ax555BGWL19OR0fvnXVJSQlf/OIXWbRoEVdccQVLly4FIDMzk69//essXLiQz3/+84btEo8HH3yQiooKFi1axLx583jssccAuPPOO1m/fj0nnngir7/+OtOmTUt4jnnz5vGTn/yE1atXs2jRIj7zmc9QX1+f1Hd+6qmn+N73vseiRYvYunUrP/7xjwd/0zhCDLbCPdosW7ZMjmTnrMomN5/6xdvcd+kivri8ZPA3KBQJ+PjjjznhhBPGehrHBS6XC7dbBVrDRbx/e0KIzVLKZYO9d9JF+qU5qdomrQF8/cZOLxc+9A41bT0JxygUCsXxyIRZyE0Wk0mwoDh9wMXc7TUd7Kzt5JP6LqZmpYzi7BSK8csll1zCwYMHY4797Gc/49xzzx30vWMV5be0tHDOOef0O/7mm2+Sk5MzBjMaeyad6AMsmprJk+9VEQiFscbZmdsYKcrmCQx9YUuhmKi8+OKLYz2FIZOTk2Ms9Co0Jp29A7CgOAN/MMy+hvjRR0OnlkmgRF+hUEw0JqXoz8p3AXCgKb7oN3ZFcoeV6CsUignGpBT9stxUhIDKpu64rzd2RuwdvxJ9hUIxsZiUou+wminOdCaM9Bu6lL2jUCgmJpNS9AFm5LmobE7k6auFXMXEw+VyjfUUjlsm0u9u8op+bioHm7r7lV8NhsK0uDXR9yp7R6GYsBxN2YmjRUoZU3NoKJ8/3POclCmbAOV5qXT7QzR0+ijM6C001dLtJxy5DqhIX5E0r/4AjuwY3nMWLoTz/zvhy9///veZPn06N998M6AVUBNCsH79etra2ggEAvzkJz/h4osvTngOnbfeeos777yTgoICtm7dyhe+8AUWLlzIr3/9azweDy+99BLl5eU0NTXFrWW/ceNGvv3tb+PxeHA6nTzxxBPMmTOHJ598kpdffpmenh4OHDjAJZdcwn33xW+pEQqF+NrXvkZFRQVCCK677jq+853vsHnzZq677jpSUlI47bTTePXVV9m5cydPPvkkFRUVRh2eCy+8kDvuuIOzzjrLKOvs8XhYs2aNUf64tLQ0pjb+8uXL+eY3v0lTUxMpKSn8z//8D3Pnzu3XD2Awfv7zn/Pcc8/h8/m45JJLuPvuu6mqquL888/n7LPP5v333+ell15i/vz5MbX6fT4fd9xxB8FgkOXLl/Poo49it9v7zfPyyy8fdA7JMmkj/fK8+Bk8+iIugCegirIpxi+XX345zz77rPH8ueee49prr+XFF1/ko48+Yt26ddx+++2DNhPR2bZtG7/+9a/ZsWMHTz/9NHv37mXjxo1cf/31PPTQQwAJa9nPnTuX9evXs2XLFu655x7+7d/+zTjv1q1befbZZ9mxYwfPPvss1dXVcT9/69at1NbWsnPnTnbs2MG1114LwLXXXsuDDz7I+++/n/Tv5qc//SkVFRVs376dt99+m+3btxuvRdfGv+GGG3jooYfYvHkz999/v3EBHUo/gNdff519+/axceNGtm7dyubNm1m/fj0Ae/bs4aqrrmLLli1Mnz49pla/3jxG/90Eg0EeffTRuPMcTiZtpD8jIvqVTW5Wzeztoann6AsBHn/8crIKRT8GiMhHiqVLl9LY2EhdXR1NTU1kZWVRVFTEd77zHdavX4/JZKK2tpaGhoakGpksX76coqIiAMrLy1m9ejUACxcuZN26dUDiWvYdHR1cffXV7Nu3DyEEgUDAGHPOOeeQkaE1MJo3bx6HDh2ipKR/3asZM2ZQWVnJrbfeygUXXMDq1av71a+/8sorefXVVwf9Ls899xy/+93vCAaD1NfXs3v3bhYtWgT01rx3u9289957MfXwfT4t6BtKP4DXX3+d119/3Sge53a72bdvH9OmTWP69OmcfPLJxtjoWv179uyhrKyM2bNnA1pt/kceeYRvf/vbMfMcbiat6Bek20m1mTnQJ21Tz9yZkuFU9o5i3LNmzRqef/55jhw5wuWXX84zzzxDU1MTmzdvxmq1UlpaGlOXfiAGq8sPiWvZ33rrrZx99tm8+OKLVFVVcdZZZ8U970C1+bOysti2bRuvvfYajzzyCM899xy//OUvE9bIT1Sb/+DBg9x///1s2rSJrKwsrrnmmri1+cPhMJmZmQl37A6lNv8Pf/hDbrzxxpjjVVVV/SqXRtfqH+wObKRq809ae0cIwYw8V1x7RwiYmuVUefqKcc/ll1/O2rVref7551mzZg0dHR3k5+djtVpZt25dUjXih0KiWvYdHR0UFxcD8OSTTx7VuZubmwmHw1x66aXce++9fPTRR2RmZpKRkcG7774LxNbmLy0tZevWrYTDYaqrq9m4cSOg3X2kpqaSkZFBQ0NDwjuD9PR0ysrK+NOf/gRoIrxt2zYgcT+AeJx77rk8/vjjRn2h2tpao07/QMydO5eqqir2798PjF5t/kkr+gAz8lL7bdBq7PKSk2ojzWFVnr5i3DN//ny6urooLi6mqKiIK664goqKCpYtW8YzzzzD3Llzh/XzEtWy/3//7//xwx/+kFWrVh11tkltbS1nnXUWS5Ys4ZprrjG6Wj3xxBN885vf5JRTTom5w1i1apXRKvKOO+7gxBNPBGDx4sUsXbqU+fPnc91117Fq1aqEn/nMM8/w+9//nsWLFzN//nz+8pe/AIn7AcRj9erVfOUrX+GUU05h4cKFrFmzhq6urkG/r8Ph4IknnuCyyy5j4cKFmEwmbrrppkHfd6xMunr60fz6n/t44M29fHzPeUaLxa89uYn6Di8z813sqO1g3R1njcpcFMcfqp7+6FNVVcWFF17Izp07x3oqY4qqp3+UlOenIiUcbO6N9hu6vOSn23FazcreUSgUE45Ju5ALMCO3N23zhKJ0QNuNO78oA6fNrBZyFROOHTt2cOWVV8Ycs9vtfPjhh6M6j5UrVxqZMjpPP/00CxcuHPB9paWlYxblj5ff3bEyqUW/LFdbHdd9fX03bkG6HX9IKtFXDIqUMuksj/HAwoULx0V9+eNNKGH8/O6O1ZKf1PaO06YVXquMZPDou3Hz0h04rWb8wTCh8Pha81CMHxwOBy0tLcf8n1ChSBYpJS0tLTgcjsEHJ2BSR/qgZfDoufr6xqyCNLuxMcsbCJFqn/S/JkUcpk6dSk1NDU1NTWM9FcUkwuFwMHXq1KN+/6RXsyUlmfzmrQPUd3iMEgwF6Y6Y7llK9BXxsFqtlJWVjfU0FIohMantHYDLTiohLCXPbqo2duPmp9uNFE6VwaNQKCYSk170p+WkcMasPNZurKa+3YsQkOuy47RFRF8t5ioUignEpBd9gCtWTuNIp5cXPqohJ9WO1WzCqSJ9hUIxAVGiD3xqbj6F6Q7qO7zkp2nFoQzRV5G+QqGYQCQl+kKI84QQe4QQ+4UQP4jz+neFELuFENuFEG8KIaZHvXa1EGJf5Ofq4Zz8cGExm/jScq3Ua0G6JvoOZe8oFIoJyKCiL4QwA48A5wPzgC8LIeb1GbYFWCalXAQ8D9wXeW82cCewElgB3CmEyBq+6Q8fl68owSQwumjpkb5qmahQKCYSyUT6K4D9UspKKaUfWAvE9F+TUq6TUvZEnn4A6Emk5wJvSClbpZRtwBvA4L3HxoCiDCePffUkvn76DABSVKSvUCgmIMkkoBcD0f3NatAi90R8DdALWMd7b/FQJjiarJ7f211IefoKhWIikozoxyssEnffuRDiq8AyQO8EkNR7hRA3ADcATJs2LYkpjTyGp6/sHYVCMYFIxt6pAaIbWk4F6voOEkJ8Gvh34CIppW8o75VS/k5KuUxKuSwvLy/ZuY8ohqevIn2FQjGBSEb0NwGzhBBlQggbcDnwcvQAIcRS4Ldogh/dJ+w1YLUQIiuygLs6cmzcYzWbsJiEsncUCsWEYlB7R0oZFELcgibWZuBxKeUuIcQ9QIWU8mXg54AL+FOkzOxhKeVFUspWIcS9aBcOgHuklK0j8k1GAKfVTI+ydxQKxQQiqUpiUspXgFf6HPtx1ONPD/Dex4HHj3aCY4nDZlb2jkKhmFCoHbkDoFomKhSKiYYS/QFwWlXLRIVCMbFQoj8ADpsZTyA81tNQKBSKYUOJ/gA4rSZVhkGhUEwolOgPgLJ3FArFREOJ/gA4bUr0FQrFxEKJ/gA4rRaVvaNQKCYUSvQHwGkzqTx9hUIxoVCiPwBqR65CoZhoKNEfAH0hV8q4RUUVCoXiuEOJ/gDo5ZV9QZWrr1AoJgZK9AfAaKSiLB6FQjFBUKI/AKp7lkKhmGgo0R8Ap+qTq1AoJhhK9AfAoewdhUIxwVCiPwCqZaJCoZhoKNEfgJG0d3zBEOGwSgVVKBSjixL9ARip7B0pJWfe9xbPbDw8rOdVKBSKwVCiPwCOEcre6fAEONLp5XBL97CeV6FQKAZDif4AGPbOMEf6zW4fAF7VoEWhUIwySvQHIGWEIv1mt39EzqtQKBSDoUR/AEZqIVeP9JXoKxSK0UaJ/gDYLdqvZ7hbJjZ3+UbkvAqFQjEYSvQHQAgxIi0TW7qVvaNQKMYGJfqDMBItE3sXcpXoKxSK0UWJ/iA4rWY8/uHNsuldyFXZOwqFYnRRoj8IDuvwt0xUkb5CoRgrlOgPwkjaO6qQm0KhGG2U6A+CZu8M80KuytNXKBRjhBL9QXAMc/ZOjz9Ijz+E2SSUvaNQKEYdJfqDMNyRvh7lF6Y78AXDqtKmQqEYVZISfSHEeUKIPUKI/UKIH8R5/QwhxEdCiKAQYk2f10JCiK2Rn5eHa+KjxVA8/X990sDbe5sGHNMU8fNLsp0AeIMq2lcoFKOHZbABQggz8AjwGaAG2CSEeFlKuTtq2GHgGuCOOKfwSCmXDMNcx4ShbM76z1c+IcNp5czZeQnH6Ltxp2alAK14/CFSbIP+NSgUCsWwkEykvwLYL6WslFL6gbXAxdEDpJRVUsrtwIRLPHfakrN3fMEQB5u7aYvstk2Evhu3JCsFAG9wwv3KFArFOCYZ0S8GqqOe10SOJYtDCFEhhPhACPH5eAOEEDdExlQ0NQ1sj4w2ZbmpuH1BKqpaBxxX2dRNKCwNUU+EHukXZ2n2jkrbVCgUo0kyoi/iHBvK6uM0KeUy4CvAA0KI8n4nk/J3UsplUspleXmJrZGxYM1JU8l12fjVP/cOOG5vQxegNUgJhhJH7y3dftIcFjKcVkBt0FIoFKNLMqJfA5REPZ8K1CX7AVLKusiflcBbwNIhzG/MSbFZuOnMcjbsb2HjwcTR/p4jXcbjdk8g4bgmt488l723FaMSfYVCMYokI/qbgFlCiDIhhA24HEgqC0cIkSWEsEce5wKrgN0Dv2v8ccXK6eS67PzqjcTRvh7pAwP6+s1dPnJddpw27Vev7B2FQjGaDCr6UsogcAvwGvAx8JyUcpcQ4h4hxEUAQojlQoga4DLgt0KIXZG3nwBUCCG2AeuA/+6T9XNc4LSZ+cZZ5bxf2cIHlS1xx+xp6CIn1QYwoK/f0u0nx2Uz+u8qe0ehUIwmSeXpSylfkVLOllKWSyl/Gjn2Yynly5HHm6SUU6WUqVLKHCnl/Mjx96SUC6WUiyN//n7kvsrIcsXKaeS6bDz9waF+r3X7glS3elg5IxsYJNJ3a5H+SDVdVygUioFQO3KTxGE1s6Ismx01Hf1e29foBuCUGTkAtPbEF/1AKEx7T4Acl83w9FWkr1AoRhMl+kNg/pQMDrf20OmNXajdG1nEXRkR/USRfmvkeG70Qq7y9BUKxSiiRH8IzJuSDsDHdZ0xx/c0dOGwmijPc+GyWxJ6+k2RHH1tIVe3d9TmLIVCMXoo0R8C84s00d9dHyv6exu6mJWfhtkkyE61JYz0W4xI39bbdF3ZOwqFYhRRoj8E8tMd5Lrs7Oob6R/pYnZBGgBZqTZae+Ln6TdHRfpCiBHpyqVQKBQDoUR/iMybkh4j+u09fhq7fMwpdAGQnWIdINKPiH6aHRhaMTeFQqEYDpToD5H5U9LZ39iFP1IobW+DlrkTE+knEP1mtx+7xURqxM8fia5cCoVCMRBK9IfI/CnpBELS2IG7o1ZL4dRFP2cg0Y/sxhVCK2fkGIH+uwqFQjEQSvSHyDx9Mbeuk3BY8scPDzGvKJ2iDAegRfqeQKhfBC+lZEt1OzPyUo1jTqsZr8reUSgUo4gS/SFSmpNKqs3M7vpO3vykkQNN3dx45gwjes9O0UoxtPXZoPXJkS4ONndz/oIi45jDak64kNva7VeLvAqFYthRoj9ETCbBCUXp7Krr4HfrD1Cc6eSChb1CnhWpv9PX4nllRz0mAefOLzCODbSQu+ax9/jF63tG4BsoFIrJjBL9o2DelHQ+OtzOpqo2rj+9DIu599eYE0f0pZT8fUc9J8/IIcdlN447BljIrWv3sLO2M+5rxzObD7Wxrbp9rKehUExalOgfBfOnpBMKSzKcVr64rCTmNT3Sj7Z39ja4qWzq5rNRdwSgVe+MZ+EEQ2G8gTBVLd0jMPux5dJH3+PiRzaM9TQUikmLEv2jYEFxBgBXnjydVHtsU3Pd04+O9P9uWDuFMWMdlvibs7p92rH6Dq9K6VQoFMOKZfAhir7MK0rnsa+exFlz+rd2zHBaMYlY0X91Rz0ryrLJS7PHjHUmSNl0+4PG46qWbk6IZAwpFArFsaIi/aNACMF5CwqNmvjRmEyCrJTeXP19DV3sa3T3s3Yg8UJuty9K9JvHxuLx+ENGgTiFQjFxUKI/AmSl2gxP/58fNwL9rR3QUzbDhMOxfea7vL2if3CMfP0H3tzLmsfeS/j69pp27n9NZRcpFMcbSvRHgOyoSP+dfU3MLUyjIN3Rb5xeXtkXjN2gNR4i/cqmbmraPEgp477+0pY6Hl63X605KBTHGUr0R4DsSCmGHn+Qiqo2zpjd3/sHbSEX+pdX1kU/zWHh4BiJfmOnl1BY0p1A1Bu6vIDW/vFoSHQxUSgUI4sS/RFAK7oW4MODrfhDYU6flRt3XG8jlVhhdUdEf8GUDA4294zsZBNwpFMT9Q5P/DLRjZ3HJvr+kCo/oVCMBUr0R4DsVCttPX7e2duMzWJieWl23HGJmqPror9wagbNbh9d3vjCO1KEwtJYxG1P0O/3iCH6iZvAD4TXr0RfoRgLlOiPANmpdkJhyas761lZlh03ywdI2CdXt3fmR9ozHmoZ3Wi/xe1DX1uOF+lLKWno9BljjwZVXVShGBuU6I8A2alWQNtcddrM+NYO9No7fT19ty+E1SyYU6iVa64cZV9fF3SAzjii3+EJGP0EjtbeUaKvUIwNSvRHgKzIrlyA02fFX8SFXnunb3nlbl8Ql93C9GytDPNoZ/A0RKwbiB/pR18Ujtbe6YnagKZQKEYPJfojQHak/k6uy87cSLQeD+cAnn6q3YLTZqYowzH6ot/VK/rtcfr9Rl8UjjbSV2WjFYqxQZVhGAF00T9tZg4mk0g4bqCFXFekpk9Zbuqob9Bq6PQhBJiFiBvp64u4Bel2Wo4y0veohVyFYkxQkf4IkJ/mYEVpdr8KnH0xPP04C7m66Jfmph5Vrn5tu4fads+Q3wdaOmauy06G0xpX9PV0zXlF6UOK9KN3HitPX6EYG5TojwA2i4nnbjqFUwdYxIXE9k53xN4BKMtJpb0nkDB1sssb4Bt/2Mz7B1qMYy1uH5c8soFvr91yVPNv6PRSkJ5Y9Bs6fWQ4rRRnOYck+sEo0VeevkIxNijRH0Mc1vg7crv6RPoAB5rccc/xj51HeHXnEW58uoLKJjdSSr73/HYau3xGH9+h0tDpoyDNQUZKItH3UpjuICfVTltPgGCSG60Ot/besShPX6EYG5TojyEOy0CRvvbakpJMUmxmHv7X/rilC/7uWf/5AAAgAElEQVS6vZ7CdAcWs4mvPVXBQ//az78+aWRxSSbd/lBSFo/HH4q5ODR0eslPdySO9Lt85KfbyY2Uiu7bGjIRT79/KOYzFQrF6JOU6AshzhNC7BFC7BdC/CDO62cIIT4SQgSFEGv6vHa1EGJf5Ofq4Zr4RMBkEtgtpjiiH8Jl13L989Ls3LF6Duv2NPHytrqYcS1uHxv2N/OFE4v57ZUnUdPWwy/f2Mun5ubzowtOAGBvQ9eAc/D4Q5x+3zoeW38AAH8wTEu3n8KBRL/DS0G6g9zIgnUyaZtd3gDPb67h/AVatVFPQC3kKhRjwaCiL4QwA48A5wPzgC8LIeb1GXYYuAb4Y5/3ZgN3AiuBFcCdQoisY5/2xMFpM8cs5IbDkm5/EJe9dxfv1aeWsrgkk3v+upu2qKj6H7uOEApLPrd4CstLs7n/ssWsLMvm52sWMTuSKrpnENF/ffcRmt0+3t3XDEBTxKNP5OmHwpImt4+CqEg/GV//xS21dPtD3HDGDAA8ytNXKMaEZCL9FcB+KWWllNIPrAUujh4gpaySUm4H+oZv5wJvSClbpZRtwBvAecMw7wmDwxLbSKUnEEJKYtowmk2C//7CQjo8Ae75227D5vnrtjpm5ruMvQAXLynm2RtPIcdlJ91hpTjTyd4jA4v+85trANhW3U4oLI0c/IJ0B5kR0Y+2flq6fYTCMuLp24xjAyGl5Kn3qlg8NYOl07ISNo9RKBQjTzKiXwxURz2viRxLhmN576RAa47ee63U6+707b17QlE6N589kxe31HL3X3dT3+Hhw4OtXLioCCHi7wWYXeBiT0P8BWCAIx1eNuxvZkZuKt3+EPsau4x0zPx0O+lOK1JqC8s6jZHduPnpjt5Iv2tge2fD/hYONHVz9amlxndWoq9QjA3JiH48RUk2JSSp9wohbhBCVAghKpqampI89cTA0SfqdUfV0u/Ldz49i6+fXsaT71Xxpd9+gJRw4aIpCc89uzCNA43uhNk1L22tJSzhR5/T3Loth9uNEgsFEU8fYuvvRN8JpNkt2MwmmgeJ9J96v4qcVJvRMtJpNavNWQrFGJGM6NcA0buMpgJ1CcYe1XullL+TUi6TUi7Ly0tcq2Yi4rSaYtIXjUjf1l/0hRD822dP4HvnzuFwaw/zitKZme9KeO45BWn4Q2Gq4uzolVLywuYaTpqexVmz88hKsbLlcBsNnV4sJkF2is0Q/WhfP3o3rhCCXJdtwEi/rt3Dmx838KXlJcYOZC3SV56+QjEWJCP6m4BZQogyIYQNuBx4OcnzvwasFkJkRRZwV0eOKSI4beaY9EW3N769oyOE4Jtnz+Txa5Zx35pFA557dkFkMfdIf4tnR20H+xrdfOHEYoQQLCnJNCL9/DQ7JpMgM1I4Lrr+jl6iIdelWTs5LruxkOsNhPj22i3squswxq/deBgJfHnFtN7vbDWrlE2FYowYVPSllEHgFjSx/hh4Tkq5SwhxjxDiIgAhxHIhRA1wGfBbIcSuyHtbgXvRLhybgHsixxQR+i7kDmTvRPOpuQUsKM4YcMzMfBcmET+D58UttdgsJsMeWjoti32NbvY3dpEf6ecbL9LXSzRYzdo/nVyXzVjI3XiwlZe21nH7c9sIhMIEQmHWbqrm7Dn5lGSnGOdQC7kKxdiRVME1KeUrwCt9jv046vEmNOsm3nsfBx4/hjlOaBw2c6y94x840h/Sua1mSnNS42bw7K7rZFFxhiHsS6dlArCtpoNz5xcA8UVfL9Ggk+Oy83G9dv4PKrVSEJ8c6eKJDQeZmpVCY5ePr57cG+WD9p07EpSVUCgUI4uqsjnGOK2x2Ttun3YBSLXH77Y1VOYUprEnjuhXt/Zw8owc4/nikkyEACm1RVpIJPo+ijIcxvNcl52Wbh9SSt6vbOHEaZlkp9r51Rv7KMtNpTjTyZmz82M+O8Vq5oiK9BWKMUGVYRhj+loduqfvGoZIHzRfv6qlO+Zuwh8MU9/pZWqU5ZLusDIzT1sU1kXfYTVhs5j6Rfq6/QOavRMISeo7vGyv6eCU8hzuvng+QsDu+k6+snIa5j7lpVXKpkIxdijRH2P6LuR2+4KYRG8FzmNlTmEaYQn7G3sXc+vaPUgJJVnOmLG6xaOLvhAisitXs2KiSzTo6Au6r+7UdgefMiOX4kwnPzh/LukOS9zy0g6VsqlQjBlK9McYR6T2jr7LVu+alWjD1VDpzeDptXgOt2qN1qdFRfqgLeYCMZ59dCmG6BINOjkuLcPnb9vrsJoFJ03XznHVKaVs+fFq8tJ6x+po2TsqZVOhGAuUpz/GOCKNVHzBMA6rOaaBynBQmpOCzWyKKbxW3aaJfkkf0T9/QSG76zoN4YZY0T8Uyfefktl7h6BH+lsOt7O8NMtoDAP0s3V0UiL2jpRy2C5uCoUiOVSkP8Y4jebomsXjjmqgMhxYzCbK810xaZvVrR6sZmHYODqZKTbu/fwCUqI2hmVGif7OWi3/PjpVVI/0AU6JWhgeCKfNTFiCP8k6/AqFYvhQoj/G9O2e5R7mSB9gbmFaTNpmdWsPxZnOhJF4NBlOq7E5a1tNB1OznEYPYIDsFBt6sH5yeXKir+/M9SpfX6EYdZTojzG6HaIv5g63vQOar1/X4aXTq4l3dVtPP2snEelRkf72mnYWTY3dEGYxm8hKsWGzmDhxWnJVs/ULXY8qxaBQjDrK0x9j7JHuWT2G6IfiLn4eC3MKtVTMvUe6WFaaTXVrDwsixc8GI8NppcsbpNnto7rVw1dWTO83pihDK87mSCbj6A9ruKR6KyttFrKeKYDUdLCngz0NbC7tz3g/NlfvOLsLrCmg1gMUiiGjRH+M0TNhats9LCjOGHZPH6IyeBq6mFOYRltPgJKs5CL9zBRtg9aG/VqTlcVT+5d+ePDLS5MTfIDys2kOZbJ7fzX5dieOoAd6qsDXCb4u7SecxB2AMEUuBn0vEPqFIz3xRcQWNcaeBhbb4J+nUEwQlOiPMXML0zEJ+Li+k3PnF+L2BUkbZtEvznTislvYe6SL6hKtZ25JtnOQd2nou3LfiXTWmh+n3k95XuJKn/045ZtU5jRx68cbeeHTp3DS9OzY16WEoC9yAegEv7v3YuBzx14cjNc6tde8HdBRE/ueZKqAm21RF4R4F5B4F5GoO4/o46bh2V+hUIwUSvTHGKfNTGluKrvrOpFSRpqiD+9fixAi0lCly0jX7Jujnwhd9N/dpzVb0Z8fC4anH6/SphBgdWg/rmMssx0OQ6A7crHQLxRdUReRPj/RFwv3EWiJeh4cvME8oNlOA9lScS8ucS4itlRlXylGBCX644ATitLZXtOOLxgmGJbDLvqg7cz9x84jVEc2ZiVr7+gif6TTy8VLEjdsGQopfRavRwyTqVdUSW4NIyGhYNQFI/ruo+/dSJwLSfuh3rsRX2fS9lXI6sJvTsHpyjy2i4hleNeIFMc3SvTHAfOK0vn79nrqO7QGJYOVVT4aZhek8X8bq9lyuB2X3WJ49YMRPW7R1MxhmYujT5rqcYHZAs4s7edY0O0rfx+ryufudxF5d0clHe2tXFSapr3m7YTOutiLSjL2lck6yNqGvr7R5yJSdoZa75iAKNEfB8wrSgegokprNRCva9axMieymPv23iZKslOS3gmb7owW/YHr9yeLnqbqPZ5Ef7iItq9Scwcc+njVRt6ub2L1F86Lv1AeDkOgJ4F1Fb3e0dX/bsTdCL4DvccDPf3P/8NaJfoTECX644B5U3TRbwOGp5Z+X+YUaqLv9gX7FVobCN3eMQmYH5nnsTKgpz+MNHR6yXNpXcCOR9oj+yMaOr1Mz0ntP8BkikTnLobPvoq6ONjifKbiuEdtzhoH5KfZyU61sSkS6Q/35izQmp3kRkomJLsxC7R9BA6riVn5aTHlGY4Fw9MfwUi/vcfP6fet45Wd9SP2GSON3mjmSMT2G1F0+yqzBArmwbSVaiF5gqJEfxwghGBeUTqVzVpBM9cIePrQm68/lEhfG5/CyhnZgw9MErtF+2fnHcFIv7HLhz8Y5lBLHNviOEHfCa03o1cohgMl+uOEE4rSjMeuYeqa1Rdd9KflJB/pA6y94WR+eP4JwzYPIcSI98ntipScaHEfn20Zw2HZK/qjEekrJg1K9McJ86L88pHw9KHXky/LHcJmKjRrKLpk8nDgtJlH1NPv9GhpkW3joBdvty/Iqf/1Jv/c3ZBwjJTS6KkA0OULEo48VZG+YjhRoj9OOKFo5EX/80uLee7GUyjLHfsFupGO9PXici3dYy/6h1p6qOvw8urOI3FfD4clp/73v/hTRY1xrKOnt0WlivQVw4kS/XFCeZ4Lm1n76xiJlE0Aq9nEirLh8+aPBafNPKIpm50Ra6S12zdin5Esde3abt6NVS1xX+/wBKjv8LK7vjPmGGhrqSMV6Td2enn0rQOEwknk+ismDEr0xwlWs4lZBS5SbOak6twf7zitI2zvRBrMt3UHBhk58tR1aKJf3eqhvqN/OQfdgmp2916g2iN9iadlp4xYpP9fr37Cz/7xCdtq2kfk/IrxiRL9ccTSaZn9ullNVLQ+uaNh74x9pF/b3iv0Gw+29nu9raf/orPeuGZOQRqNXb5hj8YPNLn5y9ZaADZH9ocoJgdK9McRPzj/BP7v6yeP9TRGhZG3d7RI3xsI0zPCTdg7egL8/t2DCT+nvt1rVDqNJ/rtkUg/+gKlb8yaW5hGKCxj7gKGg4f/tR+7xUx+mp2KQ/3npJi4qB254wiX3TIiG7PGI06rmfpRWMgFaO32D9vGsr7sqOng5j9uprrVQ6bTyqUnTe03pq7dQ0m2k1lWV9KRvr4xa06htsB/pMM7bHeBepT/9dNn0NTlY/2+JtWkfhKhIn3FmHC0KZstbl9Skbu+kAua6A83Ukqe+fAQlz76HsGQZr0kWnCta/cwJcPJirJs9jW6aekTteuRfmuP37BxOjwBUmxmowT2cC7m6lH+18+YwUmlWTS7/RxuPX43sSmGhhJ9xZjgsB6dvfOV//mQO/+ya9BxXd7eXsNDSdt8dUc9T71XNeAYbyDEHX/azr+/uJOTy3P4+7dOJzvVZmTpRBMMhWno8jEl08nKSObUpj4eur6QK2Xv4/aeABlOKwUZWlnk4VrM7egJ8JettVyxchq5LjsnTdeqhlYoX3/SoERfMSak2Ia+kCul5FBrN//6pJHwIAubnd4ApblalNyWpOj/Y2c93/zjRzyybn/CMXXtHr7wm/f485YabjtnFk9es5zsVBuF6Y64wqwvwk7JdLKwOBO7xdTP4mmPysnXLZ52jyb6ual2LCYxbJH+4dYewhKWRy5As/PTSHNYqDikRH+yoERfMSbom7Oid6EORo8/hDcQpqXbz56GLuP4x/WdPPTmvpixnZ6gUZkyGXvn3X3NfOv/tiLpzZGPx4Nv7qOy2c3jVy/nO5+ZbVTwLMpwGP0QotGj/ymZDmwWEydOy+qXrx8r+pr109ETIDPFiskkKEhwQTka9M5pUyP1l0wmwYnTsvhIif6kQYm+Ykxw2syEJfiC4aTfE73QqTdqB/jlG3v5xRt7cft6vf5Ob4DiTCdWsxjU3qlq7uaGpysoy03lhjNm4AuGE1pPG6taWVWey9lz82OOF2Y44kbjdRGxnpKpiezysmx213XGzLWtx096pMhec7ce6fvJdNp6zz1Mol9jiH5v/aVl07PY29g14MVOMXFISvSFEOcJIfYIIfYLIX4Q53W7EOLZyOsfCiFKI8dLhRAeIcTWyM9jwzt9xfGK3hRkKL5+dErjuxHRb+/x89aeRgCau3zGOf3BMBlOK1kptkHtnQ0Hmunxh/jNV09kenZq5Lz9BbDF7aOyqZtlpf13NRdlOGjt9vf7PnqkX5ShZd7MKUgjLDHaVoKWvTMz32V8Bmh3G3rXssL0+BeURLyzrwlfMP7vtabNQ7rDEtPr+KTSLKSEjw6raH8yMKjoCyHMwCPA+cA84MtCiHl9hn0NaJNSzgR+Bfws6rUDUsolkZ+bhmneiuOco6mpr0f6S0oy+bCyFX8wzCs7jhCIZM/ouexdkd246Q4L2am2mEj/0bcO8PPXPok5b3WrB6tZUJqTagitviM2Gt33Xlbav2ViYYYWyTf0Eee6dk1k0xzaeQvS7f3Gtff4Kc1NxWwSvZ5+ZCFXO7cW6Sdjhe050sWVv9/IL9/YG/f1mjZPTJQP2u/TbBJqk9YkIZlIfwWwX0pZKaX0A2uBi/uMuRh4KvL4eeAcoZJ+FQOgF5Xb2+BO+j26N3/R4il4AiG2HG7jpS21Rieu5ohg6jn6aQ4r2am2GE//pS21vPhRbcx5q9t6KM50YjYJMiNCGy/S33yoDZvZxMLi/m0j9Ui+r69f1+4xrB3AyLVv7Oy9a2nr8ZOTaotcoHx4AyF8wTAZUZG+JxAySksMxCdHtPo9T7xbFXM3oVPT1mP4+TopNgvzitLVJq1JQjKiXwxURz2viRyLO0ZKGQQ6gJzIa2VCiC1CiLeFEKcf43wVE4Sz5uRRlpvK7c9ti5vqGI/miL1z4aIiTAKerahmY1UrayIbovRIX8/RT3dqkb5u7wRDYQ42d1PX4Y2xYWpae4xuYrrQxhP9TVWtLJqaEbdfbWFE9Pt673Xt3hjRz+8T6XsD2uJ0ZoqNnFQbzW6/8dnRnn68c8djf6Mbs0lgMsF//yP2jkZKSXVr/0gf4KTpWWyv6VDF1yYByYh+vIi977+MRGPqgWlSyqXAd4E/CiH6NVoVQtwghKgQQlQ0NTUlMSXF8U66w8r/XHUS3kCIG5/ejMcfYvOhVn7+2ifsqOmI+54Wt59Um5n8dAeLSzL5cyRiv3ZVKRDP3rHG2Ds1bR78IW3hODoKro6yPDJTNKHt6GPveAMhdtZ2cFIcawe0aBziRPodHqZk9u6ktVvMZKVYDY9ez8vPSrGR67LT4vYZC6qGp6+LfhK+/r4GN9NzUrjxjHL+vr2ezVHRe2u3H08gREl2/85pS0oy6fGH2BuVFaWYmCQj+jVASdTzqUBdojFCCAuQAbRKKX1SyhYAKeVm4AAwu+8HSCl/J6VcJqVclpeXN/RvoTgumZmfxgNfWsLOug5OvPcNLn30fR5Zd4Af/WVnXP+6tdtPdqTP72kzcwEtQp2R5yIrxdob6Xv1SF8T/Q5PgEAozP7GXiupKtJGsdsXpLXbbwhhIntnW3U7gZBk+fT4palT7RbSHRaORFXR7PEHae8JxET6oFk8DRF7R68CmpViJcelR/rahcDw9NP1SH/wO6J9jV3Myndx45kzKEi3c8/fPjZ+lzVt2vvjRfpLp2UCsOWwqrg50UlG9DcBs4QQZUIIG3A58HKfMS8DV0cerwH+JaWUQoi8yEIwQogZwCygcnimrpgIfHpeAXdfNJ+z5uTxwJeW8P3z5rK1up3NcfLGm90+clI1e+T0WVpw8PmlmtOY67LT3BXx9CPF1tIcFnJStYtEe0+AA01Roh/pR6znrZdEhDDFZsZqFkbBMx19EVffwRqPogxnTKRf1649Lo4j+o1d3si8tDlr9o4W6eufrYt+gSH6Axdd8wfDVLX0GE3sbzqznG3V7Uaf4F7R7x/pT8tOITvVxtbqNtp7/Lz5ceIuX4rjm0GrUEkpg0KIW4DXADPwuJRylxDiHqBCSvky8HvgaSHEfqAV7cIAcAZwjxAiCISAm6SUarVIEcNVp5Ry1SmlgBYdP/b2Af7nncp+qZEtbr+xYLq8NIunrlvBqnJt6SjXZe8f6TusZEVEv7Xbz/5GN3lpdgKhMFUtEdFv1YRQ9/SFEGQ4bf0i/YqqVmbmu4zzxaNvrn7vxqy+om83Flz1YmtZqVqk3+0PGd69bu/YLCZKsp2RTV2zEn5+VUs3obBkVoGW/nlquXY3VHGojdLc1Kgc/f6iL4RgSUkmWw63880/fsSG/S1s/o9Pk+OyJ/w8xfFJUnn6UspXpJSzpZTlUsqfRo79OCL4SCm9UsrLpJQzpZQrpJSVkeMvSCnnSykXSylPlFL+deS+imIikGKzcOXJ03l9dwMHI9G4Tmu3n5yIvSOE4MzZeVgi3cZy0+yGd9/lDWA2CVJsZrIjIt3S7eNAk5vyvFSm56Qa0a/u7ZdECWFmijXG0w+HJZsPtbFsgCgf+u/K7Zujr1OQ7qCpy0cwFO7j6WtzrYzckejrCwBfXjGNDftbjIsFgNsXZF+UB78vkgml5/zPyneR5rAYvn51Ww+ZKVYjfbQvS0sy2d/kZlu1tqYyHvoLK4YftSNXMe646tTpWE0mHn/3oHFMSklLt4/s1PiRZ06qzdic1ekJku6wIIQw7CA90i/Pc1GWk9Ib6bf1xFwcQPP1oyP9/U1uOr3BuJuyoinMcNDs9uGP7DKu6/BiEvQriVyQ7iAstUJw0f69PtfK5m7MJkFqVDP6r6yYhtNqNn4n4bDkpqc3c9HDG4zdvfsauxBCa70JvSUWdKtMy9HvH+XrLJmWiZQY52vqUqI/EVGirxh35Kc5+PzSKfxpc7WRbtnlCxIISSMa7ktemp0uXxBvIESnN0B6xA/PStX+3NugCffMfBfTc1Kpa/fgC4aobvVQkpUSU0s+MyVW9CubtAvEnIK0AeddlOFASgy/vq7dQ0G6A6s59r+ZfhFo6PTS3hPAaTXjsJqNu5gDjW4yndY+c7Jx6UnFvLS1jma3j2c+PMS7+5vxBEKs+0Tbkbyv0c207JSYlNJl07PY2+CmoyegiX5m/0VcncUlmUTvrhnuxi2K8YESfcW45IvLSvAGwmyq0qwJfadqTgLR1y8GzW4fnZ4AaZFaNlkRi2RTpLJleZ6L0tyUSCkEDzVtPf1SGDOctpg6NHpOvV7mOBH6rlzdk69p6+ln7UDvrtwjHV7aegJkRbz73Ih/XtfhNfYLRHPtqjL8wTD/9con/Ocrn3D6rFxyXXb+sfMIAPsb3MzKj70w6QvPHx1ui7sxK5p0h9W4SwAl+hMVJfqKccmsSFRdGfH19Zo0iewdXTCb3X46vUHSI7611Wwiw2llS7VmceiRPmgZPNWtPf1SGLVIv9faONLpxWIS5Cb4bJ3oXbnNbh8VVW1xLSEj0u/y0d7jN7z76AtaprO/6JfnuTh7Th4vfFSDxSy4b80iVs8vYN2eRty+IJXNbmMRV2fJNK3Ewuu7j+ANhI0F60QsLck0HivRn5go0VeMSzKcVvLS7ByI5Nbri7Q5CbJnDNHv8tHlDRiiD5CdasMbCJNiM1OU4aAsIvpbq9vp9of6CWGm00q3P2R48w0dXvLT7EYZ5URE75z980c1BMOSLy4r6Tcu12XHJKCx00tbj9+woFJsFqOkRPQibjQ3nlmOzWzi3osXUJTh5PwFhfT4Q/zhg0MEQpJZ+bGir5dY+Pv2eiB+5k40S6ZFib7y9CckSvQV45byvFQjt35QeydNE/2Wbp+2kOvszUbWF2nL81wIIchMsZLusPDOPm33d0kfIdRTJXWL50inl4I4Nk1f0uwWUm1m6jo8rN1UzfLSLCOTJhqzSZCXZjc8/WiB179fRpxIH+DkGTls+fFnjP0JJ8/IIcNp5f97R9v+0tfeAc3i0ev2xNuYFc3Skt4MJRXpT0yU6CvGLeV5Lg40dSOlpLVbt3fii75+B6DZO4GYtETd1y/P0yJ8IQSlualsr9VSE/tG+hl9SjEc6fQau2IHQghBYYaD13YeobKpmy8tn5ZwbEG6gyOdPi3Sj/Lv9bz4RKIPvcXqQLOvPn1CgVFsrjw/td/46A1lg0X6swtclOVq52gegd7CirFHib5i3FKe56LDE6Cl20+z20+aw4Ld0r/YGWj1+dPsFo50eOnxh2LsHf2CEB11T89JRa/0EM/egd5SDA0d3n5pl4koynBS1+ElzW7hswsLE47TumF56PAEjIsSQG5krplxFnITcf4C7XOmZjlJsfXfb6mLfnaqLeaCEQ+L2cS6O87iC0uLjRRYxcRCib5i3DIjEplXNnXT0u1P6Ofr5KbZjQ1dMfaOq9fe0SnL0YQ+K8VqNFDXyYyqtNnlDdDtDxl+/WDo4z63ZEpcAdYpSNfmGpax/r2+NhFvITcRp83KJdVm7ufn60zJdDIlwzFolB+NVgfIN6R2lorjg0HLMCgUY4Uu0gea3LR2+wYtCZDrshm7WWMWclPiR/rQP8qH3pLG7Z6Aka6ZjL0DvRk8ly/vv4AbTUGaw2j+EmvvRDz9IUT6DquZR644kby0xL+fH372BKzm5Ftc5Lrs+IJh3L5gwh2844Gv/28Fp5bncO2qsrGeynGDEn3FuKU404ndYuJAo5sWt3/QdMNcl51Nke5P6VGR8vkLC+n0BmIi/dJc7VwlcRY2e2vq+40iZ8naO19aXsKUTGfcRivRRJ8vK2YhV4/0B76r6ctZc/IHfP1zi6cM6XzRKbDjVfRDYcm6TxoJhsKDir6Ukr9ur+dTc/P73dlNNpS9oxi3mEyCGXkuDjS5aXb7E+7G1YnO7NE3Z4GWsXL76jkxKZd6pD81Tm35NLsFk9Cyd/QCavE2WcVjalYKX14xjcEax0VnA0VH9fp3TB+CvTMS6NlQ4zmDp9ntIxiWHIrTIawv+xvdfOv/tvDSltpBxw5GY5eXG5+uGLT38nhFib5iXFOel8q+RjdtPf6EmTs6uVH2T/og0WlOqo0fnj83bh69ySRIj9TfMeydJEU/WfRduRAb6Z85O48bzpgx6J3CSGPscB7Hi7m1kYJ2Na0ewoN0/NJ7KejlpY+F9XubeW1XAxsONB/zucYCJfqKcU15nouaNg+hsDQKkiUiRvSdA9/CCyG48czyGMsnmkynlXZPgCMdXjKc1rgtEo+FgrRoe6f3ApWZYuPfPnsCNsvY/tfMc43/SL8+0q/AHwrT0DVwVzF9Z3dtkq05B2Jfo1bZdCj9nccTSvQV45ryqMXXRBuzdGJF/9jskYwUm+bpJ5mjP1QyU6zYLCZMYvVXG+gAABEqSURBVPC7krEgO9WGEL3N5scj9VGdxPRS2YnQN/kl2495IPQS1vuO09aSSvQV45oZub2bjQaL9PPS9Fr74BogXTIZMp1WOjwBGpPcjTtUhBAUpNvJcFoHLe8wFljMJrJSbOM60o+O2g8P4uvrlVKHQ/T1PsLHaz9hJfqKcY2eqw/JR/ouu+WYhVQvr6xF+iPTPaogzRHj5483clLHt+jXt3spzUnBbBIcHiDSl1IaqbwNnV4CofBRf2aPP0hNmweH1URVSw++YCjuOG8g/vHxgBJ9xbgmxWYxeswOtjlLT3ccDrsk02mltdtPU5dvROwdgEtOLObSk6aOyLmHA60F5fi1d+o6PJRkpzAl0zFgpN/SrVVenVuYRlj2lr4+GvQF4XPmFhAKy37d3QAOtXSz6K7X2XhwfHaGVaKvGPfo0f5A/WkBUm1mHFbTsKQ7ZqTYcPuChCUjYu8AXLFyOt88e+aInHs4yE2zj+tIv67dS3Gmk+nZqQOKvm7tnDZT6xk82GJuMNTbx6Ev+uLtZxcWxTyPZlNVG/5QmI0HWwb/EmOAEn3FuGfptCxKc1L6daDqixCCXJeddMexb76JLoMwUpH+eCfXZRu3KZu+YIhmt4+iDCcl2SmDiL4mzKfPzgMG9/X/9/1DXPbY+zH9iHX2NXRhM5s4e24eZpOIu5i7u057355xmt2jRF8x7rn1UzP527dOT2rsyrIclkQ1AjlaogueJbsbd6KR67LT7Q/h8Y8/f1q3aKZkOpiWnUJrt58ur1Yg71BLN89vrjHGVjZ3Y7OYWF6qFZ6rHSRXX3/v9kiD+Gj2NbqZkZdKis3C9JyUuIu5u+q09+2Jc9EYDyjRV4x7rGZT0lvnf/HFxfzwsycc82dGi/5wb8w6Xsgz2jf2imRjp5dfvrGX98Z4Y5Ju0UzJdDI9UjxPj/Yf+Oc+7vjTNqoifntlk5vSnBRSbBZyXbaY79OXj+s72V2vifXOuv6iv7ehy+jqNjs/zUjf1JFSsru+EyE0W0lvxDOeUKKvUMQhI1L7xmoWRsG2yYYuphc++C7ffXYr//biDk772ToefHMfX3uygp21/UURtAyXRFktfdnf2HVU2TT6xqwpmU6mRWoyVbf2EAiFefPjBgBe3lYHaOI7I9dljB9oV+6fP6rBYhLMLnCxqy42Uu/2aZk7ejXT2QUuqlq6YzJ1qls9dHmDnDIjh2BYGvsDxhNK9BWKOOiRfn6aY1zm0Y8GK2fk8OLNp3LJicW88XEDz1fUsGbZVF74xqlkp9r42lObYjZIHenwcu/fdnPSvf/k22u3Dnr++g4P5z3wDr98Y++Q56Z/blGGwyjEd6ilh40HW+n0BnHZLby8rY5AKMzh1h4jGaA405nQ0w+Gwry4pY6z5+azamYuu+s6CUWVd9AFfHakD/GsAi0bSF8ohl5r55JIZ7PxmMs/ucvNKRQJ0BdyJ6u1o7N0WhZLp2Xx4wvnEQiFjYqbv79mGWsefZ+rfr+RRVMzOdTSzfaaDkJSMjPPxas7j3CgyZ2wzAXA67saCIYlf3j/EN84q3xIqba17V5yUm04rGYcVjOZKVYOt/ZQ167l0H/707P4yd8/5rVdRwiGpTGPKZlO3trThJSyX1G8d/Y10+z2cemJU+n2BfEEqjjY7GZmpAWlnqlj2DuRP/c1djFvSjoAu+o6MZsE5y0o5Id/3sEnR7q4OOlvNTqoSF+hiIPernCyZu70xWE1x5RYnluYziNXnEhjl48N+5sxmwRXnjKdt+44iz9cvxKbxcTj7x4c8Jyv7TpCVoqVLl+QP354eEjzqe/wUJTZ+3czLZLB88buBs6YlccXTpyKxST49T/3AcRE+p5AiLZIVzQdKSXPb64hM8XKp+bmsyBS8G5nba/Fo2fuTI/cWZTlpmIxiZhoflddBzPzXKQ5rJTnudhzREX6CsVxgcVsojjTGbexuULjzNl5bP3xZ+KWkb5kSTEvfFTD7avnxK2O2tbt58ODrdx4xgy213Tw+LsHuXZVacJ2mH2pa/dQmtO7W7skO4U3djfgD4b5zmdmk51q4/RZuazb0wTAjKhIX39/dqqNX7y+h7Wbqmnr9hMMS646ZTo2i4nyvFTsFhM7azuMJvR7G7qYkZeKJZI6bLOYKM1NjcnV31XXaewHmFOYxuZDbUl9n9FERfoKRQL+eutp3Hx2+VhPY1yTqG/A104vwxsI88wHh+K+/uYnjYTCknPnF3LTmeU0dvmGVOu+vt1rCDjA9OwU/MEwJgHnnFAAwEVLtMYxuS6bceem7+6uadP6E/9ufSXFmU6+fsYM/uOCE7jtnFmAdtGfW5RuZPBIKdnb4DasHZ3ZBS7D+2/q8tHY5TOsnjmFadS2e+j0xt5VdPQEuOrxjazb05j09x1OlOgrFAnITrUlHXkqYpldkMaZs/N46v1DcTN5Xtt1hKIMB4umZrBqZg7zp6Tz2/WVMQun0Ww+1MbnHnqXvQ1ddHoDdPmCTOlj7wAsL8027iw+M68Qu8VkZO4AFGf1Rvp/3VaHLxjm3osX8P3z5nL96TNiWnIumJLOrtpOwmHJu/ubqW33sLIsO2Ze584vpLbdw2NvHzAWcedP0ayhObrn32cx99G3D7B+bxO3/nEL+xtjXwseQ12gZFGir1AoRoTrTy+j2e3jkkfe408V1UZqY48/yPq9TayeV4AQAiEEt5w9k8qmbh57+0C/87h9Qb797BZ21Hbw7y/uMDZXFWX0Rvq66K+eX2gcc9kt3HvxAm46a4ZxLCvFisNqorbdw58qqplbmMaC4vS4819QnEGXL8jh1h5+/toeijOdXLYstlbSRYuncOGiIn75xl7+b6O2LhEd6QN8EuXrH+nw8sSGg5w9Jw+H1cTX/3czHZ4Au+o6uOnpzdz27OBZT8eKEn2FQjEinDYzl5+vWUQwHP7/27v/2KrKO47j709/WJCipVALhWJhtDgtAbSUHy5uY9YyNug0s5ZoRqYLW6JR53SRbInOzGUmRpS4MM3mFs0GU0YcYdmYK8TNhanFuQ7lV/1dRFtjh8EZIuG7P87Telt621ta7D33fl/JSe95ztP2+fZpvvee55x7v9y2uZUlP93BfU8dYMsLhzh2/AT1CQl6WfXknuS5+43en3tz9x/30t71Edcsms7zr3exvjm6OJu4vLNgRjG31c+msU9SblxQztLzSnv2JTG1aCxPH+jk3+1HaKwpT7pEVR1esd/7l/20th/h5ksrTzrzk8Tdl89h8llj2P7Su5QXj+1ZSpo2YSyFBXm9LuY+0HyQE2bc1VDNhmsuor3rf9Sv+xtfWf8M/2h7j8+UFGI2cBWw4Uop6UtaJmm/pDZJt/dzvEDS78LxZyVVJBxbG9r3S6ofuaE759KZJK6sKWf7zZfw228tZH55EeubD/LDJ/dQdGY+tQlLJZL4yRVzKCsaw40bX+TIR9E6+M59HWx87k3WXDKTu1ZWU1tRzJ/2vAPQa3knPzeH6784K6Ui7mVFY2nrOEp+rnou0vananIheTliW+thZp1TyBUX9v+JqGePzWf9qnnk5qjniaI7pqrSwp5X+q92HuXxlre4euG5lBefyYKKYu7+2hxyBLfUVfHM7Uu5pa5q0PrKwzXo3TuScoGfAXVAO/C8pK1m9nJCt+uALjObJakJuAe4StL5QBNwAVAG/FVSlZml34d5OOdOC0ksmTWJJbMm8WrnUR7d9QZVpeN77oLpdtaYfNY3zefKn++i7r6nOWHw/ofHmF06nlvqqsjJET++vJrlD/wdI3rj3KmYFtb1684vHbDuckFeLlWl43n58AfcelkVuQO8Se+ic4t59NraXmcfEC3xbHnhEI0P7eJQ10cU5OX0+mTVxgXlNC44uU7z6ZTKLZu1QJuZvQogaRPQACQm/QbgzvB4M/CgoqerBmCTmR0DXpPUFn7erpEZvnMuTmaWFHLnyguSHp8/fQLrrprHtta3KR53BhPHFdBUW96zrFJVOp7b6mez+42uAZPwQMrCtYArawZPtsvnTGbK2WN6LUUlc3G4VTNRw7yptHUcJUdR6c+mBeWUjD89RXlSlUrSnwq8lbDfDixM1sfMjks6AkwM7f/s870nnU9JWgOsAZg+fXqqY3fOZaAVc8tYMbcs6fFvf354t9GumFvGseMnuKSyZNC+NyytHNbvWjRzIk98Z8mwfsZIS2VNv7+n075XGpL1SeV7MbOHzazGzGpKSgafCOecO1UVk8Zxa/3sUz5TiLtUkn47kHgeNA14O1kfSXnA2cD7KX6vc865T0kqSf95oFLSDElnEF2Y3dqnz1ZgdXj8dWCHRfcdbQWawt09M4BK4LmRGbpzzrmhGnRNP6zR3wBsB3KBR8zsJUl3AS1mthX4JfBYuFD7PtETA6Hf40QXfY8D1/udO845N3p0ut8IMFQ1NTXW0tIy2sNwzrlYkbTbzGoG6+fvyHXOuSziSd8557KIJ33nnMsinvSdcy6LpN2FXEmdQP+VF1IzCXhvhIaTTjIxrkyMCTyuuMmUuM41s0Hf3Zp2SX+4JLWkcgU7bjIxrkyMCTyuuMnUuJLx5R3nnMsinvSdcy6LZGLSf3i0B3CaZGJcmRgTeFxxk6lx9Svj1vSdc84ll4mv9J1zziWRMUl/sDq+6UxSuaSdkvZKeknSTaG9WNJTkg6GrxNCuyStD7G2SrpwdCNITlKupH9J2hb2Z4Q6ygdDXeUzQnvSOsvpSFKRpM2S9oV5Wxz3+ZL03fD/t0fSRklj4jpfkh6R1CFpT0LbkOdH0urQ/6Ck1f39rrjJiKSfUMf3y8D5wKpQnzcujgPfM7PPAouA68P4bweazawSaA77EMVZGbY1wIZPf8gpuwnYm7B/D7AuxNRFVF8ZEuosA+tCv3T2APBnMzsPmEsUY2znS9JU4EagxsyqiT5Rt7vedRzn69fAsj5tQ5ofScXAHUSVAmuBO7qfKGLNzGK/AYuB7Qn7a4G1oz2uYcTzB6JC9PuBKaFtCrA/PH4IWJXQv6dfOm1ERXOagaXANqJKau8BeX3njeijuxeHx3mhn0Y7hiRxnQW81nd8cZ4vPil5Whz+/tuA+jjPF1AB7DnV+QFWAQ8ltPfqF9ctI17p038d35Nq8cZBOE2eDzwLlJrZYYDw9ZzQLS7x3g98HzgR9icC/zWz42E/cdy96iwD3XWW09FMoBP4VVi6+oWkccR4vszsEHAv8CZwmOjvv5vMmK9uQ52ftJ+3U5EpST+lWrzpTlIh8HvgZjP7YKCu/bSlVbySvgp0mNnuxOZ+uloKx9JNHnAhsMHM5gMf8slSQX/SPrawbNEAzADKgHFEyx59xXG+BjOsGt9xkylJP/a1eCXlEyX835jZltD8rqQp4fgUoCO0xyHei4GVkl4HNhEt8dwPFIU6ytB73MnqLKejdqDdzJ4N+5uJngTiPF+XAq+ZWaeZfQxsAZaQGfPVbajzE4d5G7JMSfqp1PFNW5JEVHJyr5ndl3AosfbwaqK1/u72b4S7DhYBR7pPW9OFma01s2lmVkE0HzvM7GpgJ1EdZTg5pv7qLKcdM3sHeEvS7ND0JaKSoLGdL6JlnUWSzgz/j90xxX6+Egx1frYDl0maEM6ELgtt8TbaFxVGagOWAweAV4AfjPZ4hjj2zxGdNrYCL4ZtOdEaaTNwMHwtDv1FdLfSK8B/iO64GPU4BojvC8C28Hgm8BzQBjwBFIT2MWG/LRyfOdrjHiSmeUBLmLMngQlxny/gR8A+YA/wGFAQ1/kCNhJdm/iY6BX7dacyP8C1IcY24JujHddIbP6OXOecyyKZsrzjnHMuBZ70nXMui3jSd865LOJJ3znnsognfeecyyKe9J1zLot40nfOuSziSd8557LI/wHz6KCEYSaEJAAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "tplt( lt, 'mean_squared_error')\n", + "tplt( lt, 'val_mean_squared_error')\n", + "plt.legend()\n", + "plt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.4" + }, + "pycharm": { + "stem_cell": { + "cell_type": "raw", + "source": [], + "metadata": { + "collapsed": false + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/examples/tensorflow/notebooks/tf-sentiment-script-mode/sentiment-analysis.ipynb b/examples/tensorflow/notebooks/tf-sentiment-script-mode/sentiment-analysis.ipynb new file mode 100644 index 0000000000..91a3a18ad1 --- /dev/null +++ b/examples/tensorflow/notebooks/tf-sentiment-script-mode/sentiment-analysis.ipynb @@ -0,0 +1,974 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Sentiment Analysis with TensorFlow\n", + "\n", + "A Convolutional Neural Net (CNN) is sometimes used in text classification tasks such as sentiment analysis. We'll use a CNN built with TensorFlow to perform sentiment analysis in Amazon SageMaker on the IMDB dataset, which consists of movie reviews labeled as having positive or negative sentiment. Three aspects of Amazon SageMaker will be demonstrated:\n", + "\n", + "- How to use Script Mode with a prebuilt TensorFlow container, along with a training script similar to one you would use outside SageMaker. \n", + "- Local Mode training, which allows you to test your code on your notebook instance before creating a full scale training job.\n", + "- Batch Transform for offline, asynchronous predictions on large batches of data. \n", + "\n", + "# Prepare Dataset\n", + "\n", + "We'll begin by loading the reviews dataset, and padding the reviews so all reviews have the same length. Each review is represented as an array of numbers, where each number represents an indexed word. Training data for both Local Mode and Hosted Training must be saved as files, so we'll also save the transformed data to files." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using TensorFlow backend.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "25000 train sequences\n", + "25000 test sequences\n", + "x_train shape: (25000, 400)\n", + "x_test shape: (25000, 400)\n" + ] + } + ], + "source": [ + "import os\n", + "from keras.preprocessing import sequence\n", + "from keras.datasets import imdb\n", + "\n", + "max_features = 20000\n", + "maxlen = 400\n", + "\n", + "(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)\n", + "print(len(x_train), 'train sequences')\n", + "print(len(x_test), 'test sequences')\n", + "\n", + "x_train = sequence.pad_sequences(x_train, maxlen=maxlen)\n", + "x_test = sequence.pad_sequences(x_test, maxlen=maxlen)\n", + "print('x_train shape:', x_train.shape)\n", + "print('x_test shape:', x_test.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "data_dir = os.path.join(os.getcwd(), 'data')\n", + "os.makedirs(data_dir, exist_ok=True)\n", + "\n", + "train_dir = os.path.join(os.getcwd(), 'data/train')\n", + "os.makedirs(train_dir, exist_ok=True)\n", + "\n", + "test_dir = os.path.join(os.getcwd(), 'data/test')\n", + "os.makedirs(test_dir, exist_ok=True)\n", + "\n", + "csv_test_dir = os.path.join(os.getcwd(), 'data/csv-test')\n", + "os.makedirs(csv_test_dir, exist_ok=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "\n", + "np.save(os.path.join(train_dir, 'x_train.npy'), x_train)\n", + "np.save(os.path.join(train_dir, 'y_train.npy'), y_train)\n", + "np.save(os.path.join(test_dir, 'x_test.npy'), x_test)\n", + "np.save(os.path.join(test_dir, 'y_test.npy'), y_test)\n", + "np.savetxt(os.path.join(csv_test_dir, 'csv-test.csv'), np.array(x_test[:100], dtype=np.int32), fmt='%d', delimiter=\",\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Local Mode Training\n", + "\n", + "Amazon SageMaker’s Local Mode training feature is a convenient way to make sure your code is working as expected before moving on to full scale, hosted training. With Local Mode, you can run quick tests with just a sample of training data, and/or a small number of epochs (passes over the full training set), while avoiding the time and expense of attempting full scale hosted training using possibly buggy code. \n", + "\n", + "To train in Local Mode, it is necessary to have docker-compose or nvidia-docker-compose (for GPU) installed in the notebook instance. Running following script will install docker-compose or nvidia-docker-compose and configure the notebook environment for you." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/bin/bash: ./setup.sh: No such file or directory\r\n" + ] + } + ], + "source": [ + "!/bin/bash ./setup.sh" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The next step is to set up a TensorFlow Estimator for Local Mode training. A key parameters for the Estimator is the `train_instance_type`, which is the kind of hardware on which training will run. In the case of Local Mode, we simply set this parameter to `local_gpu` to invoke Local Mode training on the GPU, or to `local` if the instance has a CPU. Other parameters of note are the algorithm’s hyperparameters, which are passed in as a dictionary, and a Boolean parameter indicating that we are using Script Mode." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING: Logging before flag parsing goes to stderr.\n", + "W0729 09:01:18.666472 4639487424 session.py:1106] Couldn't call 'get_role' to get Role ARN from role name olg to get Role path.\n" + ] + }, + { + "ename": "ValueError", + "evalue": "The current AWS identity is not a role: arn:aws:iam::722321484884:user/olg, therefore it cannot be used as a SageMaker execution role", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[0mtrain_instance_count\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0mhyperparameters\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mhyperparameters\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 14\u001b[0;31m \u001b[0mrole\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msagemaker\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_execution_role\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 15\u001b[0m \u001b[0mbase_job_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'tf-keras-sentiment'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0mframework_version\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'1.13.1'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/sagemaker/session.py\u001b[0m in \u001b[0;36mget_execution_role\u001b[0;34m(sagemaker_session)\u001b[0m\n\u001b[1;32m 1310\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0marn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1311\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'The current AWS identity is not a role: {}, therefore it cannot be used as a SageMaker execution role'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1312\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0marn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1313\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1314\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mValueError\u001b[0m: The current AWS identity is not a role: arn:aws:iam::722321484884:user/olg, therefore it cannot be used as a SageMaker execution role" + ] + } + ], + "source": [ + "import sagemaker\n", + "from sagemaker.tensorflow import TensorFlow\n", + "\n", + "model_dir = '/opt/ml/model'\n", + "train_instance_type = 'local'\n", + "tornasole_s3 = 's3://' + sagemaker.Session().default_bucket() + \"/tornasole-parameters/\"\n", + "hyperparameters = {'epochs': 1, 'batch_size': 128, \n", + " 'tornasole-save-interval': 100, 'tornasole_outdir' : tornasole_s3 }\n", + "local_estimator = TensorFlow(entry_point='sentiment_keras.py',\n", + " model_dir=model_dir,\n", + " train_instance_type=train_instance_type,\n", + " train_instance_count=1,\n", + " hyperparameters=hyperparameters,\n", + " role=sagemaker.get_execution_role(),\n", + " base_job_name='tf-keras-sentiment',\n", + " framework_version='1.13.1',\n", + " py_version='py3',\n", + " image_name='072677473360.dkr.ecr.us-east-1.amazonaws.com/tornasole-preprod-tf-1.13.1-cpu:latest',\n", + " script_mode=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we'll briefly train the model in Local Mode. Since this is just to make sure the code is working, we'll train for only one epoch. (Note that on a CPU-based notebook instance, this one epoch will take at least 3 or 4 minutes.) As you'll see from the logs below the cell when training is complete, even when trained for only one epoch, the accuracy of the model on training data is already at almost 80%. " + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating tmpsw39_nhj_algo-1-zwl3k_1 ... \n", + "\u001b[1BAttaching to tmpsw39_nhj_algo-1-zwl3k_12mdone\u001b[0m\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m 2019-07-16 15:20:30,596 sagemaker-containers INFO Imported framework sagemaker_tensorflow_container.training\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m 2019-07-16 15:20:30,603 sagemaker-containers INFO No GPUs detected (normal if no gpus installed)\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m 2019-07-16 15:20:30,917 sagemaker-containers INFO No GPUs detected (normal if no gpus installed)\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m 2019-07-16 15:20:30,939 sagemaker-containers INFO No GPUs detected (normal if no gpus installed)\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m 2019-07-16 15:20:30,961 sagemaker-containers INFO No GPUs detected (normal if no gpus installed)\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m 2019-07-16 15:20:30,975 sagemaker-containers INFO Invoking user script\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Training Env:\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m {\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"additional_framework_parameters\": {},\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"channel_input_dirs\": {\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"train\": \"/opt/ml/input/data/train\",\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"test\": \"/opt/ml/input/data/test\"\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m },\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"current_host\": \"algo-1-zwl3k\",\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"framework_module\": \"sagemaker_tensorflow_container.training:main\",\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"hosts\": [\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"algo-1-zwl3k\"\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m ],\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"hyperparameters\": {\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"epochs\": 1,\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"batch_size\": 128,\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"tornasole-save-interval\": 100,\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"tornasole_outdir\": \"s3://sagemaker-us-east-1-072677473360/tornasole-parameters/\",\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"model_dir\": \"/opt/ml/model\"\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m },\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"input_config_dir\": \"/opt/ml/input/config\",\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"input_data_config\": {\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"train\": {\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"TrainingInputMode\": \"File\"\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m },\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"test\": {\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"TrainingInputMode\": \"File\"\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m }\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m },\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"input_dir\": \"/opt/ml/input\",\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"is_master\": true,\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"job_name\": \"tf-keras-sentiment-2019-07-16-15-20-27-160\",\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"log_level\": 20,\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"master_hostname\": \"algo-1-zwl3k\",\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"model_dir\": \"/opt/ml/model\",\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"module_dir\": \"s3://sagemaker-us-east-1-072677473360/tf-keras-sentiment-2019-07-16-15-20-27-160/source/sourcedir.tar.gz\",\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"module_name\": \"sentiment_keras\",\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"network_interface_name\": \"eth0\",\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"num_cpus\": 4,\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"num_gpus\": 0,\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"output_data_dir\": \"/opt/ml/output/data\",\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"output_dir\": \"/opt/ml/output\",\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"output_intermediate_dir\": \"/opt/ml/output/intermediate\",\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"resource_config\": {\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"current_host\": \"algo-1-zwl3k\",\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"hosts\": [\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"algo-1-zwl3k\"\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m ]\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m },\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \"user_entry_point\": \"sentiment_keras.py\"\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m }\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Environment variables:\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_HOSTS=[\"algo-1-zwl3k\"]\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_NETWORK_INTERFACE_NAME=eth0\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_HPS={\"batch_size\":128,\"epochs\":1,\"model_dir\":\"/opt/ml/model\",\"tornasole-save-interval\":100,\"tornasole_outdir\":\"s3://sagemaker-us-east-1-072677473360/tornasole-parameters/\"}\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_USER_ENTRY_POINT=sentiment_keras.py\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_FRAMEWORK_PARAMS={}\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_RESOURCE_CONFIG={\"current_host\":\"algo-1-zwl3k\",\"hosts\":[\"algo-1-zwl3k\"]}\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_INPUT_DATA_CONFIG={\"test\":{\"TrainingInputMode\":\"File\"},\"train\":{\"TrainingInputMode\":\"File\"}}\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_OUTPUT_DATA_DIR=/opt/ml/output/data\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_CHANNELS=[\"test\",\"train\"]\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_CURRENT_HOST=algo-1-zwl3k\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_MODULE_NAME=sentiment_keras\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_LOG_LEVEL=20\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_FRAMEWORK_MODULE=sagemaker_tensorflow_container.training:main\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_INPUT_DIR=/opt/ml/input\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_INPUT_CONFIG_DIR=/opt/ml/input/config\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_OUTPUT_DIR=/opt/ml/output\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_NUM_CPUS=4\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_NUM_GPUS=0\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_MODEL_DIR=/opt/ml/model\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_MODULE_DIR=s3://sagemaker-us-east-1-072677473360/tf-keras-sentiment-2019-07-16-15-20-27-160/source/sourcedir.tar.gz\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_TRAINING_ENV={\"additional_framework_parameters\":{},\"channel_input_dirs\":{\"test\":\"/opt/ml/input/data/test\",\"train\":\"/opt/ml/input/data/train\"},\"current_host\":\"algo-1-zwl3k\",\"framework_module\":\"sagemaker_tensorflow_container.training:main\",\"hosts\":[\"algo-1-zwl3k\"],\"hyperparameters\":{\"batch_size\":128,\"epochs\":1,\"model_dir\":\"/opt/ml/model\",\"tornasole-save-interval\":100,\"tornasole_outdir\":\"s3://sagemaker-us-east-1-072677473360/tornasole-parameters/\"},\"input_config_dir\":\"/opt/ml/input/config\",\"input_data_config\":{\"test\":{\"TrainingInputMode\":\"File\"},\"train\":{\"TrainingInputMode\":\"File\"}},\"input_dir\":\"/opt/ml/input\",\"is_master\":true,\"job_name\":\"tf-keras-sentiment-2019-07-16-15-20-27-160\",\"log_level\":20,\"master_hostname\":\"algo-1-zwl3k\",\"model_dir\":\"/opt/ml/model\",\"module_dir\":\"s3://sagemaker-us-east-1-072677473360/tf-keras-sentiment-2019-07-16-15-20-27-160/source/sourcedir.tar.gz\",\"module_name\":\"sentiment_keras\",\"network_interface_name\":\"eth0\",\"num_cpus\":4,\"num_gpus\":0,\"output_data_dir\":\"/opt/ml/output/data\",\"output_dir\":\"/opt/ml/output\",\"output_intermediate_dir\":\"/opt/ml/output/intermediate\",\"resource_config\":{\"current_host\":\"algo-1-zwl3k\",\"hosts\":[\"algo-1-zwl3k\"]},\"user_entry_point\":\"sentiment_keras.py\"}\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_USER_ARGS=[\"--batch_size\",\"128\",\"--epochs\",\"1\",\"--model_dir\",\"/opt/ml/model\",\"--tornasole-save-interval\",\"100\",\"--tornasole_outdir\",\"s3://sagemaker-us-east-1-072677473360/tornasole-parameters/\"]\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_OUTPUT_INTERMEDIATE_DIR=/opt/ml/output/intermediate\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_CHANNEL_TRAIN=/opt/ml/input/data/train\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_CHANNEL_TEST=/opt/ml/input/data/test\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_HP_EPOCHS=1\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_HP_BATCH_SIZE=128\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_HP_TORNASOLE-SAVE-INTERVAL=100\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_HP_TORNASOLE_OUTDIR=s3://sagemaker-us-east-1-072677473360/tornasole-parameters/\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m SM_HP_MODEL_DIR=/opt/ml/model\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m PYTHONPATH=/opt/ml/code:/usr/local/bin:/usr/lib/python36.zip:/usr/lib/python3.6:/usr/lib/python3.6/lib-dynload:/usr/local/lib/python3.6/dist-packages:/usr/lib/python3/dist-packages\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Invoking script with the following command:\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m /usr/local/bin/python sentiment_keras.py --batch_size 128 --epochs 1 --model_dir /opt/ml/model --tornasole-save-interval 100 --tornasole_outdir s3://sagemaker-us-east-1-072677473360/tornasole-parameters/\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m \n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Using TensorFlow backend.\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m x train (25000, 400) y train (25000,)\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m [[ 0 0 0 ... 19 178 32]\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m [ 0 0 0 ... 16 145 95]\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m [ 0 0 0 ... 7 129 113]\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m ...\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m [595 13 258 ... 72 33 32]\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m [ 0 0 0 ... 28 126 110]\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m [ 0 0 0 ... 7 43 50]]\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m [1 0 0 1 0 0 1 0 1 0]\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m x test (25000, 400) y test (25000,)\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Instructions for updating:\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Colocations handled automatically by placer.\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Instructions for updating:\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Colocations handled automatically by placer.\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Instructions for updating:\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Instructions for updating:\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Instructions for updating:\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Use tf.cast instead.\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Instructions for updating:\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Use tf.cast instead.\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_grad.py:102: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Instructions for updating:\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Deprecated in favor of operator or tf.math.divide.\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_grad.py:102: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Instructions for updating:\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Deprecated in favor of operator or tf.math.divide.\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Train on 25000 samples, validate on 25000 samples\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m Epoch 1/1\n", + "25000/25000 [==============================] - 390s 16ms/step - loss: 0.4266 - acc: 0.7852 - val_loss: 0.2631 - val_acc: 0.8911\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m 2019-07-16 15:27:04,144 sagemaker_tensorflow_container.training WARNING Your model will NOT be servable with SageMaker TensorFlow Serving container.The model artifact was not saved in the TensorFlow SavedModel directory structure:\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m https://www.tensorflow.org/guide/saved_model#structure_of_a_savedmodel_directory\n", + "\u001b[36malgo-1-zwl3k_1 |\u001b[0m 2019-07-16 15:27:04,144 sagemaker-containers INFO Reporting training SUCCESS\n", + "\u001b[36mtmpsw39_nhj_algo-1-zwl3k_1 exited with code 0\n", + "\u001b[0mAborting on container exit...\n", + "===== Job Complete =====\n" + ] + } + ], + "source": [ + "inputs = {'train': f'file://{train_dir}',\n", + " 'test': f'file://{test_dir}'}\n", + "\n", + "local_estimator.fit(inputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Hosted Training\n", + "\n", + "After we've confirmed our code seems to be working using Local Mode training, we can move on to use SageMaker's hosted training, which uses compute resources separate from your notebook instance. Hosted training spins up one or more instances (cluster) for training, and then tears the cluster down when training is complete. In general, hosted training is preferred for doing actual training, especially for large-scale, distributed training. Before starting hosted training, the data must be uploaded to S3. " + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'train': 's3://sagemaker-us-east-1-072677473360/sagemaker-us-east-1-072677473360/data/train', 'test': 's3://sagemaker-us-east-1-072677473360/sagemaker-us-east-1-072677473360/data/test'}\n" + ] + } + ], + "source": [ + "s3_prefix = sagemaker.Session().default_bucket()\n", + "\n", + "traindata_s3_prefix = '{}/data/train'.format(s3_prefix)\n", + "testdata_s3_prefix = '{}/data/test'.format(s3_prefix)\n", + "\n", + "train_s3 = sagemaker.Session().upload_data(path='./data/train/', key_prefix=traindata_s3_prefix)\n", + "test_s3 = sagemaker.Session().upload_data(path='./data/test/', key_prefix=testdata_s3_prefix)\n", + "\n", + "inputs = {'train':train_s3, 'test': test_s3}\n", + "print(inputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With the training data now in S3, we're ready to set up an Estimator object for hosted training. It is similar to the Local Mode Estimator, except the `train_instance_type` has been set to a ML instance type instead of a local type for Local Mode. Additionally, we've set the number of epochs to a number greater than one for actual training, as opposed to just testing the code." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "train_instance_type = 'ml.p3.2xlarge'\n", + "#hyperparameters = {'epochs': 10, 'batch_size': 128}\n", + "hyperparameters = {'epochs': 1, 'batch_size': 128, \n", + " 'tornasole-save-interval': 1, 'tornasole_outdir' : tornasole_s3 }\n", + "\n", + "estimator = TensorFlow(entry_point='sentiment_keras.py',\n", + " model_dir=model_dir,\n", + " train_instance_type=train_instance_type,\n", + " train_instance_count=1,\n", + " hyperparameters=hyperparameters,\n", + " role=sagemaker.get_execution_role(),\n", + " base_job_name='tf-keras-sentiment',\n", + " framework_version='1.13.1',\n", + " py_version='py3',\n", + " image_name='072677473360.dkr.ecr.us-east-1.amazonaws.com/tornasole-preprod-tf-1.13.1-cpu:latest',\n", + " script_mode=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With the change in training instance type and increase in epochs, we simply call `fit` to start the actual hosted training. At the end of hosted training, you'll see from the logs below the cell that accuracy on the training set has greatly increased, and accuracy on the validation set is around 90%. The model may be overfitting now (less able to generalize to data it has not yet seen), even though we are employing dropout as a regularization technique. In a production situation, further investigation would be necessary." + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2019-07-16 00:36:04 Starting - Starting the training job...\n", + "2019-07-16 00:36:09 Starting - Launching requested ML instances......\n", + "2019-07-16 00:37:17 Starting - Preparing the instances for training......\n", + "2019-07-16 00:38:15 Downloading - Downloading input data......\n", + "2019-07-16 00:39:27 Training - Downloading the training image......\n", + "2019-07-16 00:40:17 Training - Training image download completed. Training in progress.\n", + "\u001b[31m2019-07-16 00:40:20,820 sagemaker-containers INFO Imported framework sagemaker_tensorflow_container.training\u001b[0m\n", + "\u001b[31m2019-07-16 00:40:21,423 sagemaker-containers INFO Invoking user script\n", + "\u001b[0m\n", + "\u001b[31mTraining Env:\n", + "\u001b[0m\n", + "\u001b[31m{\n", + " \"additional_framework_parameters\": {},\n", + " \"channel_input_dirs\": {\n", + " \"test\": \"/opt/ml/input/data/test\",\n", + " \"train\": \"/opt/ml/input/data/train\"\n", + " },\n", + " \"current_host\": \"algo-1\",\n", + " \"framework_module\": \"sagemaker_tensorflow_container.training:main\",\n", + " \"hosts\": [\n", + " \"algo-1\"\n", + " ],\n", + " \"hyperparameters\": {\n", + " \"batch_size\": 128,\n", + " \"tornasole_outdir\": \"s3://sagemaker-us-east-1-072677473360/tornasole-parameters/\",\n", + " \"model_dir\": \"/opt/ml/model\",\n", + " \"epochs\": 1,\n", + " \"tornasole-save-interval\": 1\n", + " },\n", + " \"input_config_dir\": \"/opt/ml/input/config\",\n", + " \"input_data_config\": {\n", + " \"test\": {\n", + " \"TrainingInputMode\": \"File\",\n", + " \"S3DistributionType\": \"FullyReplicated\",\n", + " \"RecordWrapperType\": \"None\"\n", + " },\n", + " \"train\": {\n", + " \"TrainingInputMode\": \"File\",\n", + " \"S3DistributionType\": \"FullyReplicated\",\n", + " \"RecordWrapperType\": \"None\"\n", + " }\n", + " },\n", + " \"input_dir\": \"/opt/ml/input\",\n", + " \"is_master\": true,\n", + " \"job_name\": \"tf-keras-sentiment-2019-07-16-00-36-04-131\",\n", + " \"log_level\": 20,\n", + " \"master_hostname\": \"algo-1\",\n", + " \"model_dir\": \"/opt/ml/model\",\n", + " \"module_dir\": \"s3://sagemaker-us-east-1-072677473360/tf-keras-sentiment-2019-07-16-00-36-04-131/source/sourcedir.tar.gz\",\n", + " \"module_name\": \"sentiment_keras\",\n", + " \"network_interface_name\": \"eth0\",\n", + " \"num_cpus\": 8,\n", + " \"num_gpus\": 1,\n", + " \"output_data_dir\": \"/opt/ml/output/data\",\n", + " \"output_dir\": \"/opt/ml/output\",\n", + " \"output_intermediate_dir\": \"/opt/ml/output/intermediate\",\n", + " \"resource_config\": {\n", + " \"current_host\": \"algo-1\",\n", + " \"hosts\": [\n", + " \"algo-1\"\n", + " ],\n", + " \"network_interface_name\": \"eth0\"\n", + " },\n", + " \"user_entry_point\": \"sentiment_keras.py\"\u001b[0m\n", + "\u001b[31m}\n", + "\u001b[0m\n", + "\u001b[31mEnvironment variables:\n", + "\u001b[0m\n", + "\u001b[31mSM_HOSTS=[\"algo-1\"]\u001b[0m\n", + "\u001b[31mSM_NETWORK_INTERFACE_NAME=eth0\u001b[0m\n", + "\u001b[31mSM_HPS={\"batch_size\":128,\"epochs\":1,\"model_dir\":\"/opt/ml/model\",\"tornasole-save-interval\":1,\"tornasole_outdir\":\"s3://sagemaker-us-east-1-072677473360/tornasole-parameters/\"}\u001b[0m\n", + "\u001b[31mSM_USER_ENTRY_POINT=sentiment_keras.py\u001b[0m\n", + "\u001b[31mSM_FRAMEWORK_PARAMS={}\u001b[0m\n", + "\u001b[31mSM_RESOURCE_CONFIG={\"current_host\":\"algo-1\",\"hosts\":[\"algo-1\"],\"network_interface_name\":\"eth0\"}\u001b[0m\n", + "\u001b[31mSM_INPUT_DATA_CONFIG={\"test\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"},\"train\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"}}\u001b[0m\n", + "\u001b[31mSM_OUTPUT_DATA_DIR=/opt/ml/output/data\u001b[0m\n", + "\u001b[31mSM_CHANNELS=[\"test\",\"train\"]\u001b[0m\n", + "\u001b[31mSM_CURRENT_HOST=algo-1\u001b[0m\n", + "\u001b[31mSM_MODULE_NAME=sentiment_keras\u001b[0m\n", + "\u001b[31mSM_LOG_LEVEL=20\u001b[0m\n", + "\u001b[31mSM_FRAMEWORK_MODULE=sagemaker_tensorflow_container.training:main\u001b[0m\n", + "\u001b[31mSM_INPUT_DIR=/opt/ml/input\u001b[0m\n", + "\u001b[31mSM_INPUT_CONFIG_DIR=/opt/ml/input/config\u001b[0m\n", + "\u001b[31mSM_OUTPUT_DIR=/opt/ml/output\u001b[0m\n", + "\u001b[31mSM_NUM_CPUS=8\u001b[0m\n", + "\u001b[31mSM_NUM_GPUS=1\u001b[0m\n", + "\u001b[31mSM_MODEL_DIR=/opt/ml/model\u001b[0m\n", + "\u001b[31mSM_MODULE_DIR=s3://sagemaker-us-east-1-072677473360/tf-keras-sentiment-2019-07-16-00-36-04-131/source/sourcedir.tar.gz\u001b[0m\n", + "\u001b[31mSM_TRAINING_ENV={\"additional_framework_parameters\":{},\"channel_input_dirs\":{\"test\":\"/opt/ml/input/data/test\",\"train\":\"/opt/ml/input/data/train\"},\"current_host\":\"algo-1\",\"framework_module\":\"sagemaker_tensorflow_container.training:main\",\"hosts\":[\"algo-1\"],\"hyperparameters\":{\"batch_size\":128,\"epochs\":1,\"model_dir\":\"/opt/ml/model\",\"tornasole-save-interval\":1,\"tornasole_outdir\":\"s3://sagemaker-us-east-1-072677473360/tornasole-parameters/\"},\"input_config_dir\":\"/opt/ml/input/config\",\"input_data_config\":{\"test\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"},\"train\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"}},\"input_dir\":\"/opt/ml/input\",\"is_master\":true,\"job_name\":\"tf-keras-sentiment-2019-07-16-00-36-04-131\",\"log_level\":20,\"master_hostname\":\"algo-1\",\"model_dir\":\"/opt/ml/model\",\"module_dir\":\"s3://sagemaker-us-east-1-072677473360/tf-keras-sentiment-2019-07-16-00-36-04-131/source/sourcedir.tar.gz\",\"module_name\":\"sentiment_keras\",\"network_interface_name\":\"eth0\",\"num_cpus\":8,\"num_gpus\":1,\"output_data_dir\":\"/opt/ml/output/data\",\"output_dir\":\"/opt/ml/output\",\"output_intermediate_dir\":\"/opt/ml/output/intermediate\",\"resource_config\":{\"current_host\":\"algo-1\",\"hosts\":[\"algo-1\"],\"network_interface_name\":\"eth0\"},\"user_entry_point\":\"sentiment_keras.py\"}\u001b[0m\n", + "\u001b[31mSM_USER_ARGS=[\"--batch_size\",\"128\",\"--epochs\",\"1\",\"--model_dir\",\"/opt/ml/model\",\"--tornasole-save-interval\",\"1\",\"--tornasole_outdir\",\"s3://sagemaker-us-east-1-072677473360/tornasole-parameters/\"]\u001b[0m\n", + "\u001b[31mSM_OUTPUT_INTERMEDIATE_DIR=/opt/ml/output/intermediate\u001b[0m\n", + "\u001b[31mSM_CHANNEL_TEST=/opt/ml/input/data/test\u001b[0m\n", + "\u001b[31mSM_CHANNEL_TRAIN=/opt/ml/input/data/train\u001b[0m\n", + "\u001b[31mSM_HP_BATCH_SIZE=128\u001b[0m\n", + "\u001b[31mSM_HP_TORNASOLE_OUTDIR=s3://sagemaker-us-east-1-072677473360/tornasole-parameters/\u001b[0m\n", + "\u001b[31mSM_HP_MODEL_DIR=/opt/ml/model\u001b[0m\n", + "\u001b[31mSM_HP_EPOCHS=1\u001b[0m\n", + "\u001b[31mSM_HP_TORNASOLE-SAVE-INTERVAL=1\u001b[0m\n", + "\u001b[31mPYTHONPATH=/opt/ml/code:/usr/local/bin:/usr/lib/python36.zip:/usr/lib/python3.6:/usr/lib/python3.6/lib-dynload:/usr/local/lib/python3.6/dist-packages:/usr/lib/python3/dist-packages\n", + "\u001b[0m\n", + "\u001b[31mInvoking script with the following command:\n", + "\u001b[0m\n", + "\u001b[31m/usr/local/bin/python sentiment_keras.py --batch_size 128 --epochs 1 --model_dir /opt/ml/model --tornasole-save-interval 1 --tornasole_outdir s3://sagemaker-us-east-1-072677473360/tornasole-parameters/\n", + "\n", + "\u001b[0m\n", + "\u001b[31mUsing TensorFlow backend.\u001b[0m\n", + "\u001b[31mx train (25000, 400) y train (25000,)\u001b[0m\n", + "\u001b[31m[[ 0 0 0 ... 19 178 32]\n", + " [ 0 0 0 ... 16 145 95]\n", + " [ 0 0 0 ... 7 129 113]\n", + " ...\n", + " [595 13 258 ... 72 33 32]\n", + " [ 0 0 0 ... 28 126 110]\n", + " [ 0 0 0 ... 7 43 50]]\u001b[0m\n", + "\u001b[31m[1 0 0 1 0 0 1 0 1 0]\u001b[0m\n", + "\u001b[31mx test (25000, 400) y test (25000,)\u001b[0m\n", + "\u001b[31mWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\u001b[0m\n", + "\u001b[31mInstructions for updating:\u001b[0m\n", + "\u001b[31mColocations handled automatically by placer.\u001b[0m\n", + "\u001b[31mWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\u001b[0m\n", + "\u001b[31mInstructions for updating:\u001b[0m\n", + "\u001b[31mColocations handled automatically by placer.\u001b[0m\n", + "\u001b[31mWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\u001b[0m\n", + "\u001b[31mInstructions for updating:\u001b[0m\n", + "\u001b[31mPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\u001b[0m\n", + "\u001b[31mWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\u001b[0m\n", + "\u001b[31mInstructions for updating:\u001b[0m\n", + "\u001b[31mPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\u001b[0m\n", + "\u001b[31mWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\u001b[0m\n", + "\u001b[31mInstructions for updating:\u001b[0m\n", + "\u001b[31mUse tf.cast instead.\u001b[0m\n", + "\u001b[31mWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\u001b[0m\n", + "\u001b[31mInstructions for updating:\u001b[0m\n", + "\u001b[31mUse tf.cast instead.\u001b[0m\n", + "\u001b[31mWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_grad.py:102: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\u001b[0m\n", + "\u001b[31mInstructions for updating:\u001b[0m\n", + "\u001b[31mDeprecated in favor of operator or tf.math.divide.\u001b[0m\n", + "\u001b[31mWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_grad.py:102: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\u001b[0m\n", + "\u001b[31mInstructions for updating:\u001b[0m\n", + "\u001b[31mDeprecated in favor of operator or tf.math.divide.\u001b[0m\n", + "\u001b[31mTrain on 25000 samples, validate on 25000 samples\u001b[0m\n", + "\u001b[31mEpoch 1/1\u001b[0m\n", + "\u001b[31m 128/25000 [..............................] - ETA: 5:29 - loss: 0.6979 - acc: 0.4531\u001b[0m\n", + "\u001b[31m 256/25000 [..............................] - ETA: 5:21 - loss: 0.6944 - acc: 0.5000\n", + " 384/25000 [..............................] - ETA: 4:22 - loss: 0.7009 - acc: 0.4922\u001b[0m\n", + "\u001b[31m 512/25000 [..............................] - ETA: 3:52 - loss: 0.7005 - acc: 0.4922\u001b[0m\n", + "\u001b[31m 640/25000 [..............................] - ETA: 3:34 - loss: 0.6990 - acc: 0.4875\u001b[0m\n", + "\u001b[31m 768/25000 [..............................] - ETA: 3:22 - loss: 0.6979 - acc: 0.4935\u001b[0m\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[31m 896/25000 [>.............................] - ETA: 3:13 - loss: 0.6988 - acc: 0.4922\n", + " 1024/25000 [>.............................] - ETA: 3:06 - loss: 0.6980 - acc: 0.4961\u001b[0m\n", + "\u001b[31m 1152/25000 [>.............................] - ETA: 3:00 - loss: 0.6998 - acc: 0.4931\u001b[0m\n", + "\u001b[31m 1280/25000 [>.............................] - ETA: 2:56 - loss: 0.7008 - acc: 0.4883\n", + " 1408/25000 [>.............................] - ETA: 3:06 - loss: 0.7003 - acc: 0.4879\u001b[0m\n", + "\u001b[31m 1536/25000 [>.............................] - ETA: 3:01 - loss: 0.6993 - acc: 0.4961\u001b[0m\n", + "\u001b[31m 1664/25000 [>.............................] - ETA: 2:57 - loss: 0.6984 - acc: 0.5072\u001b[0m\n", + "\u001b[31m 1792/25000 [=>............................] - ETA: 2:53 - loss: 0.6981 - acc: 0.5056\u001b[0m\n", + "\u001b[31m 1920/25000 [=>............................] - ETA: 2:50 - loss: 0.6969 - acc: 0.5120\n", + " 2048/25000 [=>............................] - ETA: 2:48 - loss: 0.6969 - acc: 0.5112\u001b[0m\n", + "\u001b[31m 2176/25000 [=>............................] - ETA: 2:45 - loss: 0.6969 - acc: 0.5078\u001b[0m\n", + "\u001b[31m 2304/25000 [=>............................] - ETA: 2:43 - loss: 0.6966 - acc: 0.5074\u001b[0m\n", + "\u001b[31m 2432/25000 [=>............................] - ETA: 2:40 - loss: 0.6965 - acc: 0.5053\u001b[0m\n", + "\u001b[31m 2560/25000 [==>...........................] - ETA: 2:45 - loss: 0.6960 - acc: 0.5059\u001b[0m\n", + "\u001b[31m 2688/25000 [==>...........................] - ETA: 2:43 - loss: 0.6956 - acc: 0.5089\u001b[0m\n", + "\u001b[31m 2816/25000 [==>...........................] - ETA: 2:40 - loss: 0.6950 - acc: 0.5142\n", + " 2944/25000 [==>...........................] - ETA: 2:38 - loss: 0.6947 - acc: 0.5166\u001b[0m\n", + "\u001b[31m 3072/25000 [==>...........................] - ETA: 2:36 - loss: 0.6940 - acc: 0.5208\u001b[0m\n", + "\u001b[31m 3200/25000 [==>...........................] - ETA: 2:34 - loss: 0.6936 - acc: 0.5209\u001b[0m\n", + "\u001b[31m 3328/25000 [==>...........................] - ETA: 2:33 - loss: 0.6932 - acc: 0.5216\n", + " 3456/25000 [===>..........................] - ETA: 2:31 - loss: 0.6930 - acc: 0.5214\u001b[0m\n", + "\u001b[31m 3584/25000 [===>..........................] - ETA: 2:29 - loss: 0.6928 - acc: 0.5209\u001b[0m\n", + "\u001b[31m 3712/25000 [===>..........................] - ETA: 2:33 - loss: 0.6923 - acc: 0.5229\u001b[0m\n", + "\u001b[31m 3840/25000 [===>..........................] - ETA: 2:31 - loss: 0.6915 - acc: 0.5289\n", + " 3968/25000 [===>..........................] - ETA: 2:30 - loss: 0.6906 - acc: 0.5358\u001b[0m\n", + "\u001b[31m 4096/25000 [===>..........................] - ETA: 2:28 - loss: 0.6901 - acc: 0.5386\u001b[0m\n", + "\u001b[31m 4224/25000 [====>.........................] - ETA: 2:27 - loss: 0.6895 - acc: 0.5400\u001b[0m\n", + "\u001b[31m 4352/25000 [====>.........................] - ETA: 2:25 - loss: 0.6892 - acc: 0.5402\u001b[0m\n", + "\u001b[31m 4480/25000 [====>.........................] - ETA: 2:24 - loss: 0.6883 - acc: 0.5442\n", + " 4608/25000 [====>.........................] - ETA: 2:22 - loss: 0.6875 - acc: 0.5493\u001b[0m\n", + "\u001b[31m 4736/25000 [====>.........................] - ETA: 2:21 - loss: 0.6866 - acc: 0.5536\u001b[0m\n", + "\u001b[31m 4864/25000 [====>.........................] - ETA: 2:22 - loss: 0.6855 - acc: 0.5588\n", + " 4992/25000 [====>.........................] - ETA: 2:21 - loss: 0.6845 - acc: 0.5623\u001b[0m\n", + "\u001b[31m 5120/25000 [=====>........................] - ETA: 2:19 - loss: 0.6836 - acc: 0.5652\u001b[0m\n", + "\u001b[31m 5248/25000 [=====>........................] - ETA: 2:18 - loss: 0.6827 - acc: 0.5665\u001b[0m\n", + "\u001b[31m 5376/25000 [=====>........................] - ETA: 2:17 - loss: 0.6825 - acc: 0.5664\n", + " 5504/25000 [=====>........................] - ETA: 2:15 - loss: 0.6816 - acc: 0.5672\u001b[0m\n", + "\u001b[31m 5632/25000 [=====>........................] - ETA: 2:14 - loss: 0.6806 - acc: 0.5701\u001b[0m\n", + "\u001b[31m 5760/25000 [=====>........................] - ETA: 2:13 - loss: 0.6795 - acc: 0.5724\u001b[0m\n", + "\u001b[31m 5888/25000 [======>.......................] - ETA: 2:11 - loss: 0.6785 - acc: 0.5727\u001b[0m\n", + "\u001b[31m 6016/25000 [======>.......................] - ETA: 2:12 - loss: 0.6762 - acc: 0.5751\u001b[0m\n", + "\u001b[31m 6144/25000 [======>.......................] - ETA: 2:11 - loss: 0.6753 - acc: 0.5778\u001b[0m\n", + "\u001b[31m 6272/25000 [======>.......................] - ETA: 2:10 - loss: 0.6736 - acc: 0.5818\u001b[0m\n", + "\u001b[31m 6400/25000 [======>.......................] - ETA: 2:09 - loss: 0.6719 - acc: 0.5850\n", + " 6528/25000 [======>.......................] - ETA: 2:07 - loss: 0.6705 - acc: 0.5879\u001b[0m\n", + "\u001b[31m 6656/25000 [======>.......................] - ETA: 2:06 - loss: 0.6688 - acc: 0.5898\u001b[0m\n", + "\u001b[31m 6784/25000 [=======>......................] - ETA: 2:05 - loss: 0.6678 - acc: 0.5909\u001b[0m\n", + "\u001b[31m 6912/25000 [=======>......................] - ETA: 2:04 - loss: 0.6661 - acc: 0.5926\u001b[0m\n", + "\u001b[31m 7040/25000 [=======>......................] - ETA: 2:03 - loss: 0.6641 - acc: 0.5938\u001b[0m\n", + "\u001b[31m 7168/25000 [=======>......................] - ETA: 2:03 - loss: 0.6620 - acc: 0.5958\u001b[0m\n", + "\u001b[31m 7296/25000 [=======>......................] - ETA: 2:02 - loss: 0.6612 - acc: 0.5973\n", + " 7424/25000 [=======>......................] - ETA: 2:01 - loss: 0.6589 - acc: 0.6001\u001b[0m\n", + "\u001b[31m 7552/25000 [========>.....................] - ETA: 2:00 - loss: 0.6573 - acc: 0.6029\u001b[0m\n", + "\u001b[31m 7680/25000 [========>.....................] - ETA: 1:59 - loss: 0.6552 - acc: 0.6065\u001b[0m\n", + "\u001b[31m 7808/25000 [========>.....................] - ETA: 1:58 - loss: 0.6532 - acc: 0.6092\n", + " 7936/25000 [========>.....................] - ETA: 1:57 - loss: 0.6502 - acc: 0.6129\u001b[0m\n", + "\u001b[31m 8064/25000 [========>.....................] - ETA: 1:56 - loss: 0.6481 - acc: 0.6152\u001b[0m\n", + "\u001b[31m 8192/25000 [========>.....................] - ETA: 1:55 - loss: 0.6467 - acc: 0.6161\u001b[0m\n", + "\u001b[31m 8320/25000 [========>.....................] - ETA: 1:55 - loss: 0.6445 - acc: 0.6178\n", + " 8448/25000 [=========>....................] - ETA: 1:54 - loss: 0.6423 - acc: 0.6197\u001b[0m\n", + "\u001b[31m 8576/25000 [=========>....................] - ETA: 1:53 - loss: 0.6404 - acc: 0.6212\u001b[0m\n", + "\u001b[31m 8704/25000 [=========>....................] - ETA: 1:52 - loss: 0.6386 - acc: 0.6229\u001b[0m\n", + "\u001b[31m 8832/25000 [=========>....................] - ETA: 1:51 - loss: 0.6363 - acc: 0.6248\n", + " 8960/25000 [=========>....................] - ETA: 1:50 - loss: 0.6334 - acc: 0.6282\u001b[0m\n", + "\u001b[31m 9088/25000 [=========>....................] - ETA: 1:49 - loss: 0.6303 - acc: 0.6316\u001b[0m\n", + "\u001b[31m 9216/25000 [==========>...................] - ETA: 1:47 - loss: 0.6284 - acc: 0.6331\u001b[0m\n", + "\u001b[31m 9344/25000 [==========>...................] - ETA: 1:46 - loss: 0.6252 - acc: 0.6360\n", + " 9472/25000 [==========>...................] - ETA: 1:47 - loss: 0.6222 - acc: 0.6388\u001b[0m\n", + "\u001b[31m 9600/25000 [==========>...................] - ETA: 1:46 - loss: 0.6206 - acc: 0.6408\u001b[0m\n", + "\u001b[31m 9728/25000 [==========>...................] - ETA: 1:45 - loss: 0.6181 - acc: 0.6430\u001b[0m\n", + "\u001b[31m 9856/25000 [==========>...................] - ETA: 1:44 - loss: 0.6151 - acc: 0.6452\n", + " 9984/25000 [==========>...................] - ETA: 1:42 - loss: 0.6125 - acc: 0.6473\u001b[0m\n", + "\u001b[31m10112/25000 [===========>..................] - ETA: 1:41 - loss: 0.6102 - acc: 0.6494\u001b[0m\n", + "\u001b[31m10240/25000 [===========>..................] - ETA: 1:40 - loss: 0.6082 - acc: 0.6511\u001b[0m\n", + "\u001b[31m10368/25000 [===========>..................] - ETA: 1:39 - loss: 0.6056 - acc: 0.6529\u001b[0m\n", + "\u001b[31m10496/25000 [===========>..................] - ETA: 1:38 - loss: 0.6038 - acc: 0.6548\u001b[0m\n", + "\u001b[31m10624/25000 [===========>..................] - ETA: 1:47 - loss: 0.6021 - acc: 0.6558\u001b[0m\n", + "\u001b[31m10752/25000 [===========>..................] - ETA: 1:46 - loss: 0.6010 - acc: 0.6566\u001b[0m\n", + "\u001b[31m10880/25000 [============>.................] - ETA: 1:44 - loss: 0.5986 - acc: 0.6590\u001b[0m\n", + "\u001b[31m11008/25000 [============>.................] - ETA: 1:43 - loss: 0.5961 - acc: 0.6611\u001b[0m\n", + "\u001b[31m11136/25000 [============>.................] - ETA: 1:42 - loss: 0.5938 - acc: 0.6629\u001b[0m\n", + "\u001b[31m11264/25000 [============>.................] - ETA: 1:41 - loss: 0.5906 - acc: 0.6652\u001b[0m\n", + "\u001b[31m11392/25000 [============>.................] - ETA: 1:40 - loss: 0.5890 - acc: 0.6664\u001b[0m\n", + "\u001b[31m11520/25000 [============>.................] - ETA: 1:39 - loss: 0.5871 - acc: 0.6682\u001b[0m\n", + "\u001b[31m11648/25000 [============>.................] - ETA: 1:38 - loss: 0.5853 - acc: 0.6696\u001b[0m\n", + "\u001b[31m11776/25000 [=============>................] - ETA: 1:37 - loss: 0.5827 - acc: 0.6713\u001b[0m\n", + "\u001b[31m11904/25000 [=============>................] - ETA: 1:36 - loss: 0.5808 - acc: 0.6728\u001b[0m\n", + "\u001b[31m12032/25000 [=============>................] - ETA: 1:35 - loss: 0.5778 - acc: 0.6750\u001b[0m\n", + "\u001b[31m12160/25000 [=============>................] - ETA: 1:34 - loss: 0.5753 - acc: 0.6766\u001b[0m\n", + "\u001b[31m12288/25000 [=============>................] - ETA: 1:33 - loss: 0.5731 - acc: 0.6785\u001b[0m\n", + "\u001b[31m12416/25000 [=============>................] - ETA: 1:32 - loss: 0.5699 - acc: 0.6807\u001b[0m\n", + "\u001b[31m12544/25000 [==============>...............] - ETA: 1:31 - loss: 0.5679 - acc: 0.6818\u001b[0m\n", + "\u001b[31m12672/25000 [==============>...............] - ETA: 1:29 - loss: 0.5656 - acc: 0.6836\u001b[0m\n", + "\u001b[31m12800/25000 [==============>...............] - ETA: 1:28 - loss: 0.5631 - acc: 0.6855\u001b[0m\n", + "\u001b[31m12928/25000 [==============>...............] - ETA: 1:28 - loss: 0.5616 - acc: 0.6863\u001b[0m\n", + "\u001b[31m13056/25000 [==============>...............] - ETA: 1:27 - loss: 0.5592 - acc: 0.6882\u001b[0m\n", + "\u001b[31m13184/25000 [==============>...............] - ETA: 1:26 - loss: 0.5569 - acc: 0.6899\u001b[0m\n", + "\u001b[31m13312/25000 [==============>...............] - ETA: 1:25 - loss: 0.5541 - acc: 0.6919\u001b[0m\n", + "\u001b[31m13440/25000 [===============>..............] - ETA: 1:24 - loss: 0.5518 - acc: 0.6936\u001b[0m\n", + "\u001b[31m13568/25000 [===============>..............] - ETA: 1:23 - loss: 0.5508 - acc: 0.6944\u001b[0m\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[31m13696/25000 [===============>..............] - ETA: 1:22 - loss: 0.5482 - acc: 0.6962\u001b[0m\n", + "\u001b[31m13824/25000 [===============>..............] - ETA: 1:21 - loss: 0.5464 - acc: 0.6970\u001b[0m\n", + "\u001b[31m13952/25000 [===============>..............] - ETA: 1:20 - loss: 0.5452 - acc: 0.6978\u001b[0m\n", + "\u001b[31m14080/25000 [===============>..............] - ETA: 1:20 - loss: 0.5433 - acc: 0.6993\u001b[0m\n", + "\u001b[31m14208/25000 [================>.............] - ETA: 1:18 - loss: 0.5414 - acc: 0.7010\u001b[0m\n", + "\u001b[31m14336/25000 [================>.............] - ETA: 1:17 - loss: 0.5393 - acc: 0.7026\u001b[0m\n", + "\u001b[31m14464/25000 [================>.............] - ETA: 1:16 - loss: 0.5380 - acc: 0.7038\u001b[0m\n", + "\u001b[31m14592/25000 [================>.............] - ETA: 1:15 - loss: 0.5368 - acc: 0.7047\u001b[0m\n", + "\u001b[31m14720/25000 [================>.............] - ETA: 1:14 - loss: 0.5347 - acc: 0.7064\u001b[0m\n", + "\u001b[31m14848/25000 [================>.............] - ETA: 1:13 - loss: 0.5331 - acc: 0.7076\u001b[0m\n", + "\u001b[31m14976/25000 [================>.............] - ETA: 1:12 - loss: 0.5311 - acc: 0.7091\u001b[0m\n", + "\u001b[31m15104/25000 [=================>............] - ETA: 1:11 - loss: 0.5295 - acc: 0.7103\u001b[0m\n", + "\u001b[31m15232/25000 [=================>............] - ETA: 1:11 - loss: 0.5276 - acc: 0.7119\u001b[0m\n", + "\u001b[31m15360/25000 [=================>............] - ETA: 1:10 - loss: 0.5262 - acc: 0.7130\u001b[0m\n", + "\u001b[31m15488/25000 [=================>............] - ETA: 1:09 - loss: 0.5244 - acc: 0.7143\u001b[0m\n", + "\u001b[31m15616/25000 [=================>............] - ETA: 1:08 - loss: 0.5229 - acc: 0.7154\u001b[0m\n", + "\u001b[31m15744/25000 [=================>............] - ETA: 1:07 - loss: 0.5214 - acc: 0.7167\u001b[0m\n", + "\u001b[31m15872/25000 [==================>...........] - ETA: 1:06 - loss: 0.5203 - acc: 0.7176\u001b[0m\n", + "\u001b[31m16000/25000 [==================>...........] - ETA: 1:05 - loss: 0.5199 - acc: 0.7180\u001b[0m\n", + "\u001b[31m16128/25000 [==================>...........] - ETA: 1:04 - loss: 0.5182 - acc: 0.7193\u001b[0m\n", + "\u001b[31m16256/25000 [==================>...........] - ETA: 1:03 - loss: 0.5165 - acc: 0.7204\u001b[0m\n", + "\u001b[31m16384/25000 [==================>...........] - ETA: 1:02 - loss: 0.5148 - acc: 0.7215\u001b[0m\n", + "\u001b[31m16512/25000 [==================>...........] - ETA: 1:01 - loss: 0.5137 - acc: 0.7224\u001b[0m\n", + "\u001b[31m16640/25000 [==================>...........] - ETA: 1:00 - loss: 0.5131 - acc: 0.7231\u001b[0m\n", + "\u001b[31m16768/25000 [===================>..........] - ETA: 59s - loss: 0.5115 - acc: 0.7245 \u001b[0m\n", + "\u001b[31m16896/25000 [===================>..........] - ETA: 58s - loss: 0.5100 - acc: 0.7253\u001b[0m\n", + "\u001b[31m17024/25000 [===================>..........] - ETA: 57s - loss: 0.5084 - acc: 0.7263\u001b[0m\n", + "\u001b[31m17152/25000 [===================>..........] - ETA: 56s - loss: 0.5066 - acc: 0.7275\u001b[0m\n", + "\u001b[31m17280/25000 [===================>..........] - ETA: 55s - loss: 0.5051 - acc: 0.7284\u001b[0m\n", + "\u001b[31m17408/25000 [===================>..........] - ETA: 54s - loss: 0.5039 - acc: 0.7293\u001b[0m\n", + "\u001b[31m17536/25000 [====================>.........] - ETA: 54s - loss: 0.5020 - acc: 0.7304\u001b[0m\n", + "\u001b[31m17664/25000 [====================>.........] - ETA: 53s - loss: 0.5007 - acc: 0.7312\u001b[0m\n", + "\u001b[31m17792/25000 [====================>.........] - ETA: 52s - loss: 0.4996 - acc: 0.7321\u001b[0m\n", + "\u001b[31m17920/25000 [====================>.........] - ETA: 51s - loss: 0.4980 - acc: 0.7333\u001b[0m\n", + "\u001b[31m18048/25000 [====================>.........] - ETA: 50s - loss: 0.4969 - acc: 0.7340\u001b[0m\n", + "\u001b[31m18176/25000 [====================>.........] - ETA: 49s - loss: 0.4952 - acc: 0.7351\u001b[0m\n", + "\u001b[31m18304/25000 [====================>.........] - ETA: 48s - loss: 0.4939 - acc: 0.7360\u001b[0m\n", + "\u001b[31m18432/25000 [=====================>........] - ETA: 47s - loss: 0.4935 - acc: 0.7368\u001b[0m\n", + "\u001b[31m18560/25000 [=====================>........] - ETA: 46s - loss: 0.4916 - acc: 0.7379\u001b[0m\n", + "\u001b[31m18688/25000 [=====================>........] - ETA: 45s - loss: 0.4902 - acc: 0.7388\u001b[0m\n", + "\u001b[31m18816/25000 [=====================>........] - ETA: 44s - loss: 0.4889 - acc: 0.7396\u001b[0m\n", + "\u001b[31m18944/25000 [=====================>........] - ETA: 43s - loss: 0.4881 - acc: 0.7401\u001b[0m\n", + "\u001b[31m19072/25000 [=====================>........] - ETA: 42s - loss: 0.4864 - acc: 0.7412\u001b[0m\n", + "\u001b[31m19200/25000 [======================>.......] - ETA: 41s - loss: 0.4855 - acc: 0.7419\u001b[0m\n", + "\u001b[31m19328/25000 [======================>.......] - ETA: 40s - loss: 0.4839 - acc: 0.7429\u001b[0m\n", + "\u001b[31m19456/25000 [======================>.......] - ETA: 39s - loss: 0.4825 - acc: 0.7438\u001b[0m\n", + "\u001b[31m19584/25000 [======================>.......] - ETA: 38s - loss: 0.4815 - acc: 0.7446\u001b[0m\n", + "\u001b[31m19712/25000 [======================>.......] - ETA: 37s - loss: 0.4803 - acc: 0.7455\u001b[0m\n", + "\u001b[31m19840/25000 [======================>.......] - ETA: 36s - loss: 0.4792 - acc: 0.7463\u001b[0m\n", + "\u001b[31m19968/25000 [======================>.......] - ETA: 36s - loss: 0.4777 - acc: 0.7475\u001b[0m\n", + "\u001b[31m20096/25000 [=======================>......] - ETA: 35s - loss: 0.4773 - acc: 0.7479\u001b[0m\n", + "\u001b[31m20224/25000 [=======================>......] - ETA: 34s - loss: 0.4757 - acc: 0.7490\u001b[0m\n", + "\u001b[31m20352/25000 [=======================>......] - ETA: 33s - loss: 0.4742 - acc: 0.7500\u001b[0m\n", + "\u001b[31m20480/25000 [=======================>......] - ETA: 32s - loss: 0.4730 - acc: 0.7506\u001b[0m\n", + "\u001b[31m20608/25000 [=======================>......] - ETA: 31s - loss: 0.4729 - acc: 0.7511\u001b[0m\n", + "\u001b[31m20736/25000 [=======================>......] - ETA: 30s - loss: 0.4719 - acc: 0.7517\u001b[0m\n", + "\u001b[31m20864/25000 [========================>.....] - ETA: 29s - loss: 0.4710 - acc: 0.7523\u001b[0m\n", + "\u001b[31m20992/25000 [========================>.....] - ETA: 29s - loss: 0.4697 - acc: 0.7534\u001b[0m\n", + "\u001b[31m21120/25000 [========================>.....] - ETA: 28s - loss: 0.4686 - acc: 0.7541\u001b[0m\n", + "\u001b[31m21248/25000 [========================>.....] - ETA: 27s - loss: 0.4673 - acc: 0.7548\u001b[0m\n", + "\u001b[31m21376/25000 [========================>.....] - ETA: 26s - loss: 0.4664 - acc: 0.7554\u001b[0m\n", + "\u001b[31m21504/25000 [========================>.....] - ETA: 25s - loss: 0.4654 - acc: 0.7560\u001b[0m\n", + "\u001b[31m21632/25000 [========================>.....] - ETA: 24s - loss: 0.4644 - acc: 0.7571\u001b[0m\n", + "\u001b[31m21760/25000 [=========================>....] - ETA: 23s - loss: 0.4632 - acc: 0.7579\u001b[0m\n", + "\u001b[31m21888/25000 [=========================>....] - ETA: 22s - loss: 0.4619 - acc: 0.7589\u001b[0m\n", + "\u001b[31m22016/25000 [=========================>....] - ETA: 21s - loss: 0.4608 - acc: 0.7597\u001b[0m\n", + "\u001b[31m22144/25000 [=========================>....] - ETA: 21s - loss: 0.4598 - acc: 0.7603\u001b[0m\n", + "\u001b[31m22272/25000 [=========================>....] - ETA: 20s - loss: 0.4589 - acc: 0.7608\u001b[0m\n", + "\u001b[31m22400/25000 [=========================>....] - ETA: 19s - loss: 0.4581 - acc: 0.7615\u001b[0m\n", + "\u001b[31m22528/25000 [==========================>...] - ETA: 18s - loss: 0.4567 - acc: 0.7625\u001b[0m\n", + "\u001b[31m22656/25000 [==========================>...] - ETA: 17s - loss: 0.4562 - acc: 0.7629\u001b[0m\n", + "\u001b[31m22784/25000 [==========================>...] - ETA: 16s - loss: 0.4553 - acc: 0.7636\u001b[0m\n", + "\u001b[31m22912/25000 [==========================>...] - ETA: 15s - loss: 0.4540 - acc: 0.7645\u001b[0m\n", + "\u001b[31m23040/25000 [==========================>...] - ETA: 14s - loss: 0.4530 - acc: 0.7653\u001b[0m\n", + "\u001b[31m23168/25000 [==========================>...] - ETA: 13s - loss: 0.4524 - acc: 0.7659\u001b[0m\n", + "\u001b[31m23296/25000 [==========================>...] - ETA: 12s - loss: 0.4512 - acc: 0.7667\u001b[0m\n", + "\u001b[31m23424/25000 [===========================>..] - ETA: 11s - loss: 0.4500 - acc: 0.7672\u001b[0m\n", + "\u001b[31m23552/25000 [===========================>..] - ETA: 10s - loss: 0.4489 - acc: 0.7678\u001b[0m\n", + "\u001b[31m23680/25000 [===========================>..] - ETA: 9s - loss: 0.4477 - acc: 0.7686 \u001b[0m\n", + "\u001b[31m23808/25000 [===========================>..] - ETA: 8s - loss: 0.4464 - acc: 0.7695\u001b[0m\n", + "\u001b[31m23936/25000 [===========================>..] - ETA: 7s - loss: 0.4454 - acc: 0.7703\u001b[0m\n", + "\u001b[31m24064/25000 [===========================>..] - ETA: 6s - loss: 0.4448 - acc: 0.7707\u001b[0m\n", + "\u001b[31m24192/25000 [============================>.] - ETA: 5s - loss: 0.4441 - acc: 0.7710\u001b[0m\n", + "\u001b[31m24320/25000 [============================>.] - ETA: 5s - loss: 0.4437 - acc: 0.7714\u001b[0m\n", + "\u001b[31m24448/25000 [============================>.] - ETA: 4s - loss: 0.4430 - acc: 0.7719\u001b[0m\n", + "\u001b[31m24576/25000 [============================>.] - ETA: 3s - loss: 0.4420 - acc: 0.7726\u001b[0m\n", + "\u001b[31m24704/25000 [============================>.] - ETA: 2s - loss: 0.4411 - acc: 0.7733\u001b[0m\n", + "\u001b[31m24832/25000 [============================>.] - ETA: 1s - loss: 0.4403 - acc: 0.7739\u001b[0m\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[31m24960/25000 [============================>.] - ETA: 0s - loss: 0.4393 - acc: 0.7745\u001b[0m\n", + "\u001b[31m25000/25000 [==============================] - 233s 9ms/step - loss: 0.4387 - acc: 0.7748 - val_loss: 0.2636 - val_acc: 0.8898\u001b[0m\n", + "\u001b[31m2019-07-16 00:44:17,528 sagemaker_tensorflow_container.training WARNING Your model will NOT be servable with SageMaker TensorFlow Serving container.The model artifact was not saved in the TensorFlow SavedModel directory structure:\u001b[0m\n", + "\u001b[31mhttps://www.tensorflow.org/guide/saved_model#structure_of_a_savedmodel_directory\u001b[0m\n", + "\u001b[31m2019-07-16 00:44:17,528 sagemaker-containers INFO Reporting training SUCCESS\u001b[0m\n", + "\n", + "2019-07-16 00:45:32 Uploading - Uploading generated training model\n", + "2019-07-16 00:45:32 Completed - Training job completed\n", + "Billable seconds: 437\n" + ] + } + ], + "source": [ + "estimator.fit(inputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Batch Prediction\n", + "\n", + "\n", + "If our use case requires individual predictions in near real-time, SageMaker hosted endpoints can be created. Hosted endpoints also can be used for pseudo-batch prediction, but the process is more involved than simply using SageMaker's Batch Transform feature, which is designed for large-scale, asynchronous batch inference.\n", + "\n", + "To use Batch Transform, first we must upload to Amazon S3 some test data in CSV format to be transformed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "csvtestdata_s3_prefix = '{}/data/csv-test'.format(s3_prefix)\n", + "csvtest_s3 = sagemaker.Session().upload_data(path='./data/csv-test/', key_prefix=csvtestdata_s3_prefix)\n", + "print(csvtest_s3)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A Transformer object must be set up to describe the Batch Transform job, including the amount and type of inference hardware to be used. Then the actual transform job itself is started with a call to the `transform` method of the Transformer." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "transformer = estimator.transformer(instance_count=1, instance_type='ml.m5.xlarge')\n", + "transformer.transform(csvtest_s3, content_type='text/csv')\n", + "print('Waiting for transform job: ' + transformer.latest_transform_job.job_name)\n", + "transformer.wait()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now download the batch predictions from S3 to the local filesystem on the notebook instance; the predictions are contained in a file with a .out extension, and are embedded in JSON. Next we'll load the JSON and examine the predictions, which are confidence scores from 0.0 to 1.0 where numbers close to 1.0 indicate positive sentiment, while numbers close to 0.0 indicate negative sentiment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "\n", + "batch_output = transformer.output_path\n", + "!mkdir -p batch_data/output\n", + "!aws s3 cp --recursive $batch_output/ batch_data/output/\n", + "\n", + "with open('batch_data/output/csv-test.csv.out', 'r') as f:\n", + " jstr = json.load(f)\n", + " results = [float('%.3f'%(item)) for sublist in jstr['predictions'] for item in sublist]\n", + " print(results)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's look at the text of some actual reviews to see the predictions in action. First, we have to convert the integers representing the words back to the words themselves by using a reversed dictionary. Next we can decode the reviews, taking into account that the first 3 indices were reserved for \"padding\", \"start of sequence\", and \"unknown\", and removing a string of unknown tokens from the start of the review." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import re\n", + "\n", + "regex = re.compile(r'^[\\?\\s]+')\n", + "\n", + "word_index = imdb.get_word_index()\n", + "reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n", + "first_decoded_review = ' '.join([reverse_word_index.get(i - 3, '?') for i in x_test[3]])\n", + "regex.sub('', first_decoded_review)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Overall, this review looks fairly negative. Let's compare the actual label with the prediction:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_sentiment(score):\n", + " return 'positive' if score > 0.5 else 'negative' \n", + "\n", + "print('Labeled sentiment for this review is {}, predicted sentiment is {}'.format(get_sentiment(y_test[3]), \n", + " get_sentiment(results[3])))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Our negative sentiment prediction agrees with the label for this review. Let's now examine another review:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "second_decoded_review = ' '.join([reverse_word_index.get(i - 3, '?') for i in x_test[10]])\n", + "regex.sub('', second_decoded_review)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('Labeled sentiment for this review is {}, predicted sentiment is {}'.format(get_sentiment(y_test[10]), \n", + " get_sentiment(results[10])))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Again, the prediction agreed with the label for the test data. Note that there is no need to clean up any Batch Transform resources: after the transform job is complete, the cluster used to make inferences is torn down.\n", + "\n", + "Now that we've reviewed some sample predictions as a sanity check, we're finished. Of course, in a typical production situation, the data science project lifecycle is iterative, with repeated cycles of refining the model using a tool such as Amazon SageMaker's Automatic Model Tuning feature, and gathering more data. " + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/tensorflow/notebooks/tf-sentiment-script-mode/sentiment_keras.py b/examples/tensorflow/notebooks/tf-sentiment-script-mode/sentiment_keras.py new file mode 100644 index 0000000000..81c99fe253 --- /dev/null +++ b/examples/tensorflow/notebooks/tf-sentiment-script-mode/sentiment_keras.py @@ -0,0 +1,107 @@ +import argparse +import numpy as np +import os + +import keras + +from keras.models import Sequential +from keras.layers import Dense, Dropout, Activation +from keras.layers import Embedding +from keras.layers import Conv1D, GlobalMaxPooling1D + +from tornasole.tensorflow.keras import TornasoleHook +from tornasole import SaveConfig + + +max_features = 20000 +maxlen = 400 +embedding_dims = 300 +filters = 250 +kernel_size = 3 +hidden_dims = 250 + + +def parse_args(): + + parser = argparse.ArgumentParser() + + # hyperparameters sent by the client are passed as command-line arguments to the script + parser.add_argument('--epochs', type=int, default=5) + parser.add_argument('--batch_size', type=int, default=64) + + parser.add_argument('--tornasole_outdir', type=str, required=True) + parser.add_argument('--tornasole_save_interval', type=int, default=10) + + # data directories + parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN')) + parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST')) + + # model directory: we will use the default set by SageMaker, /opt/ml/model + parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR')) + + return parser.parse_known_args() + + +def get_train_data(train_dir): + + x_train = np.load(os.path.join(train_dir, 'x_train.npy')) + y_train = np.load(os.path.join(train_dir, 'y_train.npy')) + print('x train', x_train.shape,'y train', y_train.shape) + print( x_train[:10]) + print( y_train[:10]) + + return x_train, y_train + + +def get_test_data(test_dir): + + x_test = np.load(os.path.join(test_dir, 'x_test.npy')) + y_test = np.load(os.path.join(test_dir, 'y_test.npy')) + print('x test', x_test.shape,'y test', y_test.shape) + + return x_test, y_test + + +def get_model(): + + embedding_layer = keras.layers.Embedding(max_features, + embedding_dims, + input_length=maxlen) + + sequence_input = keras.Input(shape=(maxlen,), dtype='int32') + embedded_sequences = embedding_layer(sequence_input) + x = keras.layers.Dropout(0.2)(embedded_sequences) + x = keras.layers.Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)(x) + x = keras.layers.MaxPooling1D()(x) + x = keras.layers.GlobalMaxPooling1D()(x) + x = keras.layers.Dense(hidden_dims, activation='relu')(x) + x = keras.layers.Dropout(0.2)(x) + preds = keras.layers.Dense(1, activation='sigmoid')(x) + + return keras.Model(sequence_input, preds) + + +if __name__ == "__main__": + + args, _ = parse_args() + + hook = TornasoleHook(out_dir=args.tornasole_outdir, + save_config=SaveConfig(save_interval=args.tornasole_save_interval)) + + x_train, y_train = get_train_data(args.train) + x_test, y_test = get_test_data(args.test) + + model = get_model() + + model.compile(loss='binary_crossentropy', + optimizer='adam', + metrics=['accuracy', 'mean_squared_error']) + + model.fit(x_train, y_train, + batch_size=args.batch_size, + epochs=args.epochs, + validation_data=(x_test, y_test), + callbacks=[hook] + ) + + model.save(os.path.join(args.model_dir,'sentiment_keras.h5')) diff --git a/examples/tensorflow/training_scripts/mnist/README.md b/examples/tensorflow/training_scripts/mnist/README.md new file mode 100644 index 0000000000..deb55cce18 --- /dev/null +++ b/examples/tensorflow/training_scripts/mnist/README.md @@ -0,0 +1,77 @@ +# MNIST Example +We provide an example script `mnist.py` which is a Tornasole-enabled TensorFlow training script. +It uses the Estimator interface of TensorFlow. +In this document we highlight how you can set training and evaluation modes for Tornasole. +This will allow you to distinguish between training and evaluate steps and analyze them independently. + +## Integrating Tornasole +Below we call out the changes for Tornasole in the above script and describe them + +**Importing TornasoleTF** +``` +import tornasole_tf as ts +``` +**Saving gradients** + +We need to wrap our optimizer with TornasoleOptimizer, and use this optimizer to minimize loss. +This will also enable us to access the gradients during analysis without having to identify which tensors out of the saved ones are the gradients. +``` +opt = TornasoleOptimizer(opt) +optimizer_op = optimizer.minimize(loss, global_step=increment_global_step_op) + +ts.TornasoleHook(..., include_collections=[..,'gradients'], ...) +``` +**Setting save interval** + +You can set different save intervals for different modes. +This can be done by passing a dictionary as save_config to the hook. +This dictionary should have the mode as key and a SaveConfig object as value. +``` +ts.TornasoleHook(..., + save_config={ts.modes.TRAIN: ts.SaveConfig(args.tornasole_train_frequency), + ts.modes.EVAL: ts.SaveConfig(args.tornasole_eval_frequency)}..) +``` +**Setting the right mode** + +Notice the calls to `hook.set_mode` at various places in the code. +``` +hook.set_mode(ts.modes.TRAIN) +``` + +``` +hook.set_mode(ts.modes.EVAL) +``` +**Passing the hook** + +We need to pass this hook to a monitored session and use this session for running the job. +``` +hook = ts.TornasoleHook(...) +mnist_classifier.train(..., hooks=[hook]) +``` + +``` +mnist_classifier.evaluate(..., hooks=[hook]) +``` +## Running the example +### Environment +Ensure you are in a python environment which has TensorFlow, TornasoleTF and TornasoleCore installed. If you followed the recommended instructions of using Amazon Deep Learning AMI, then you might want to activate the tensorflow_p36 environment as follows. +``` +source activate tensorflow_p36 +``` +### Tornasole Path +We recommend saving tornasole outputs on S3 by passing the +flag `--tornasole_path` in the format `s3://bucket_name/prefix`. +The commands below will be shown with local path however so you can +run them immediately without having to setup S3 permissions. + +### Command +``` +python mnist.py --tornasole_path ~/ts_outputs/mnist +``` + +### Analysis +Refer [this page](docs/analysis/README.md) for more details on analysis. + +### More +Please refer to [Tornasole Tensorflow page](docs/tensorflow/README.md). + diff --git a/examples/tensorflow/training_scripts/mnist/mnist.py b/examples/tensorflow/training_scripts/mnist/mnist.py new file mode 100644 index 0000000000..b2aedb43fc --- /dev/null +++ b/examples/tensorflow/training_scripts/mnist/mnist.py @@ -0,0 +1,123 @@ +import argparse +import numpy as np +import tensorflow as tf +import tornasole.tensorflow as ts + +parser = argparse.ArgumentParser() +parser.add_argument('--tornasole_path', type=str) +parser.add_argument('--tornasole_train_frequency', type=int, + help="How often to save TS data", default=50) +parser.add_argument('--tornasole_eval_frequency', type=int, + help="How often to save TS data", default=10) +args = parser.parse_args() + +def cnn_model_fn(features, labels, mode): + """Model function for CNN.""" + # Input Layer + input_layer = tf.reshape(features["x"], [-1, 28, 28, 1]) + + # Convolutional Layer #1 + conv1 = tf.layers.conv2d( + inputs=input_layer, + filters=32, + kernel_size=[5, 5], + padding="same", + activation=tf.nn.relu) + + # Pooling Layer #1 + pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) + + # Convolutional Layer #2 and Pooling Layer #2 + conv2 = tf.layers.conv2d( + inputs=pool1, + filters=64, + kernel_size=[5, 5], + padding="same", + activation=tf.nn.relu) + pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) + + # Dense Layer + pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64]) + dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu) + dropout = tf.layers.dropout( + inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN) + + # Logits Layer + logits = tf.layers.dense(inputs=dropout, units=10) + + predictions = { + # Generate predictions (for PREDICT and EVAL mode) + "classes": tf.argmax(input=logits, axis=1), + # Add `softmax_tensor` to the graph. It is used for PREDICT and by the + # `logging_hook`. + "probabilities": tf.nn.softmax(logits, name="softmax_tensor") + } + + if mode == tf.estimator.ModeKeys.PREDICT: + return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) + + # Calculate Loss (for both TRAIN and EVAL modes) + loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) + + # Configure the Training Op (for TRAIN mode) + if mode == tf.estimator.ModeKeys.TRAIN: + optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001) + optimizer = ts.TornasoleOptimizer(optimizer) + train_op = optimizer.minimize( + loss=loss, + global_step=tf.train.get_global_step()) + return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) + + # Add evaluation metrics (for EVAL mode) + eval_metric_ops = { + "accuracy": tf.metrics.accuracy( + labels=labels, predictions=predictions["classes"]) + } + return tf.estimator.EstimatorSpec( + mode=mode, loss=loss, eval_metric_ops=eval_metric_ops) + +# Load training and eval data +((train_data, train_labels), + (eval_data, eval_labels)) = tf.keras.datasets.mnist.load_data() + +train_data = train_data / np.float32(255) +train_labels = train_labels.astype(np.int32) # not required + +eval_data = eval_data / np.float32(255) +eval_labels = eval_labels.astype(np.int32) # not required + +mnist_classifier = tf.estimator.Estimator( + model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model") + +train_input_fn = tf.estimator.inputs.numpy_input_fn( + x={"x": train_data}, + y=train_labels, + batch_size=100, + num_epochs=None, + shuffle=True) + +eval_input_fn = tf.estimator.inputs.numpy_input_fn( + x={"x": eval_data}, + y=eval_labels, + num_epochs=1, + shuffle=False) + +hook = ts.TornasoleHook(out_dir=args.tornasole_path, + save_config={ts.modes.TRAIN: ts.SaveConfig(args.tornasole_train_frequency), + ts.modes.EVAL: ts.SaveConfig(args.tornasole_eval_frequency)}) + +hook.set_mode(ts.modes.TRAIN) +# train one step and display the probabilties +mnist_classifier.train( + input_fn=train_input_fn, + steps=1000, + hooks=[hook]) + +hook.set_mode(ts.modes.EVAL) +mnist_classifier.evaluate(input_fn=eval_input_fn, hooks=[hook]) + +hook.set_mode(ts.modes.TRAIN) +mnist_classifier.train( + input_fn=train_input_fn, + steps=1000, + hooks=[hook]) diff --git a/examples/tensorflow/training_scripts/resnet50/README.md b/examples/tensorflow/training_scripts/resnet50/README.md new file mode 100644 index 0000000000..03d00eadeb --- /dev/null +++ b/examples/tensorflow/training_scripts/resnet50/README.md @@ -0,0 +1,161 @@ +# ResNet50 Imagenet Example +We provide an example script `train_imagenet_resnet_hvd.py` which is a Tornasole-enabled TensorFlow training script for ResNet50/ImageNet. +**Please note that this script needs a GPU**. +It uses the Estimator interface of TensorFlow. +Here we show different scenarios of how to use Tornasole to +save different tensors during training for analysis. +Below are listed the changes we made to integrate these different +behaviors of Tornasole as well as example commands for you to try. + +## Integrating Tornasole +Below we call out the changes for Tornasole in the above script and describe them + +**Importing TornasoleTF** +``` +import tornasole_tf as ts +``` +**Saving weights** +``` +include_collections.append('weights') +``` +**Saving gradients** + +We need to wrap our optimizer with TornasoleOptimizer, and use this optimizer to minimize loss. +This will also enable us to access the gradients during analysis without having to identify which tensors out of the saved ones are the gradients. +``` +opt = TornasoleOptimizer(opt) + +include_collections.append('gradients') +ts.TornasoleHook(..., include_collections=include_collections, ...) +``` +**Saving relu activations by variable** +``` +x = tf.nn.relu(x + shortcut) +ts.add_to_collection('relu_activations', x) +... +include_collections.append('relu_activations') +ts.TornasoleHook(..., include_collections=include_collections, ...) +``` +**Saving relu activations as reductions** +``` + +x = tf.nn.relu(x + shortcut) +ts.add_to_collection('relu_activations', x) +... +rnc = ts.ReductionConfig(reductions=reductions, abs_reductions=abs_reductions) +... +ts.TornasoleHook(..., reduction_config=rnc, ...) +``` +**Saving by regex** +``` +ts.get_collection('default').include(FLAGS.tornasole_include) +include_collections.append('default') +ts.TornasoleHook(..., include_collections=include_collections, ...) +``` +**Setting save interval** +``` +ts.TornasoleHook(...,save_config=ts.SaveConfig(save_interval=FLAGS.tornasole_step_interval)...) +``` +**Setting the right mode** + +You will see in the code that the appropriate mode has been set before the train or evaluate function calls. +For example, the line: +``` +hook.set_mode(ts.modes.TRAIN) +``` + +**Adding the hook** +``` +training_hooks = [] +... +training_hooks.append(hook) +classifier.train( + input_fn=lambda: make_dataset(...), + max_steps=nstep, + hooks=training_hooks) +``` +## Running the example +### Environment +Ensure you are in a python environment which has TensorFlow, TornasoleTF and TornasoleCore installed. If you followed the recommended instructions of using Amazon Deep Learning AMI, then you might want to activate the tensorflow_p36 environment as follows. +``` +source activate tensorflow_p36 +``` +### Run with synthetic or real data +By default the following commands run with synthetic data. If you have ImageNet data prepared in tfrecord format, + you can pass the path to that with the flag --data_dir, such as the following: + +```python train_imagenet_resnet_hvd.py --data_dir ~/data/tf-imagenet/ ...``` + +This flag can be appended to any of the following commands +to make the job use real data. +### Tornasole Path +We recommend saving tornasole outputs on S3 by passing +the flag `--tornasole_path` in the format `s3://bucket_name/prefix`. +The commands below will be shown with local path however +so you can run them immediately without having to setup S3 permissions. + +### Example commands +#### Saving weights and gradients with Tornasole +``` +python train_imagenet_resnet_hvd.py --clear_log --enable_tornasole \ + --tornasole_save_weights --tornasole_save_gradients \ + --tornasole_step_interval 10 \ + --tornasole_path ~/ts_outputs/default +``` +#### Simulating gradients which 'vanish' +We simulate the scenario of gradients being really small (vanishing) by initializing weights with a small constant. +``` +python train_imagenet_resnet_hvd.py --clear_log --enable_tornasole \ + --tornasole_save_weights --tornasole_save_gradients \ + --tornasole_step_interval 10 \ + --constant_initializer 0.01 \ + --tornasole_path ~/ts_outputs/vanishing +``` + +You can monitor the exploding tensors by doing the following +``` +python -m tornasole.rules.rule_invoker --trial-dir ~/ts_outputs/vanishing --rule-name VanishingGradient +``` +#### Saving activations of RELU layers in full +``` +python train_imagenet_resnet_hvd.py --clear_log --enable_tornasole \ + --tornasole_save_relu_activations \ + --tornasole_step_interval 10 \ + --tornasole_path ~/ts_outputs/full_relu_activations +``` +#### Saving activations of RELU layers as reductions +``` +python train_imagenet_resnet_hvd.py --clear_log --enable_tornasole \ + --tornasole_save_relu_activations \ + --tornasole_relu_reductions min max mean variance \ + --tornasole_relu_reductions_abs mean variance \ + --tornasole_step_interval 10 \ + --tornasole_path ~/ts_outputs/reductions_relu_activations +``` +#### Saving weights every step +If you want to compute and track the ratio of weights and updates, +you can do that by saving weights every step as follows +``` +python train_imagenet_resnet_hvd.py --clear_log --enable_tornasole \ + --tornasole_save_weights \ + --tornasole_step_interval 1 \ + --tornasole_path ~/ts_outputs/weights +``` + +You can invoke the rule to +monitor the ratio of weights to updates every step. +A quick way to invoke the rule is like this: +``` +python -m tornasole.rules.rule_invoker --trial-dir ~/ts_outputs/weights --rule-name WeightUpdateRatio +``` +If you want to customize the thresholds, refer to the example in `analysis`: +[`examples/analysis/scripts/weight_update_ratio.py`](examples/analysis/scripts/weight_update_ratio.py) +and the [Rule API](docs/analysis/README.md#rules-api) + +#### Running with tornasole disabled +``` +python train_imagenet_resnet_hvd.py --clear_log +``` +### More +Please refer to [Tornasole Tensorflow page](docs/tensorflow/README.md) and the various flags in the script to customize the behavior further. +Refer [this page](docs/analysis/README.md) for more details on analysis. \ No newline at end of file diff --git a/examples/tensorflow/training_scripts/resnet50/train_imagenet_resnet_hvd.py b/examples/tensorflow/training_scripts/resnet50/train_imagenet_resnet_hvd.py new file mode 100644 index 0000000000..2d404f9510 --- /dev/null +++ b/examples/tensorflow/training_scripts/resnet50/train_imagenet_resnet_hvd.py @@ -0,0 +1,1211 @@ +#!/usr/bin/env python +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of NVIDIA CORPORATION nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import print_function + +try: + from builtins import range +except ImportError: + pass +import tensorflow as tf +import numpy as np +from tensorflow.contrib.image.python.ops import distort_image_ops +from tensorflow.python.ops import data_flow_ops +from tensorflow.contrib.data.python.ops import interleave_ops +from tensorflow.contrib.data.python.ops import batching +import horovod.tensorflow as hvd +import os +import sys +import time +import argparse +import random +import shutil +import logging +import math +import re +from glob import glob +from operator import itemgetter +from tensorflow.python.util import nest +import tornasole.tensorflow as ts + + +def rank0log(logger, *args, **kwargs): + if hvd.rank() == 0: + if logger: + logger.info(''.join([str(x) for x in list(args)])) + else: + print(*args, **kwargs) + +class LayerBuilder(object): + def __init__(self, activation=None, data_format='channels_last', + training=False, use_batch_norm=False, batch_norm_config=None, + conv_initializer=None, adv_bn_init=False): + self.activation = activation + self.data_format = data_format + self.training = training + self.use_batch_norm = use_batch_norm + self.batch_norm_config = batch_norm_config + self.conv_initializer = conv_initializer + self.adv_bn_init = adv_bn_init + if self.batch_norm_config is None: + self.batch_norm_config = { + 'decay': 0.9, + 'epsilon': 1e-4, + 'scale': True, + 'zero_debias_moving_mean': False, + } + + def _conv2d(self, inputs, activation, *args, **kwargs): + x = tf.layers.conv2d( + inputs, data_format=self.data_format, + use_bias=not self.use_batch_norm, + kernel_initializer=self.conv_initializer, + activation=None if self.use_batch_norm else activation, + *args, **kwargs) + if self.use_batch_norm: + x = self.batch_norm(x) + x = activation(x) if activation is not None else x + return x + + def conv2d_linear_last_bn(self, inputs, *args, **kwargs): + x = tf.layers.conv2d( + inputs, data_format=self.data_format, + use_bias=False, + kernel_initializer=self.conv_initializer, + activation=None, *args, **kwargs) + param_initializers = { + 'moving_mean': tf.zeros_initializer(), + 'moving_variance': tf.ones_initializer(), + 'beta': tf.zeros_initializer(), + } + if self.adv_bn_init: + param_initializers['gamma'] = tf.zeros_initializer() + else: + param_initializers['gamma'] = tf.ones_initializer() + x = self.batch_norm(x, param_initializers=param_initializers) + return x + + def conv2d_linear(self, inputs, *args, **kwargs): + return self._conv2d(inputs, None, *args, **kwargs) + + def conv2d(self, inputs, *args, **kwargs): + return self._conv2d(inputs, self.activation, *args, **kwargs) + + def pad2d(self, inputs, begin, end=None): + if end is None: + end = begin + try: + _ = begin[1] + except TypeError: + begin = [begin, begin] + try: + _ = end[1] + except TypeError: + end = [end, end] + if self.data_format == 'channels_last': + padding = [[0, 0], [begin[0], end[0]], [begin[1], end[1]], [0, 0]] + else: + padding = [[0, 0], [0, 0], [begin[0], end[0]], [begin[1], end[1]]] + return tf.pad(inputs, padding) + + def max_pooling2d(self, inputs, *args, **kwargs): + return tf.layers.max_pooling2d( + inputs, data_format=self.data_format, *args, **kwargs) + + def average_pooling2d(self, inputs, *args, **kwargs): + return tf.layers.average_pooling2d( + inputs, data_format=self.data_format, *args, **kwargs) + + def dense_linear(self, inputs, units, **kwargs): + return tf.layers.dense(inputs, units, activation=None) + + def dense(self, inputs, units, **kwargs): + return tf.layers.dense(inputs, units, activation=self.activation) + + def activate(self, inputs, activation=None): + activation = activation or self.activation + return activation(inputs) if activation is not None else inputs + + def batch_norm(self, inputs, **kwargs): + all_kwargs = dict(self.batch_norm_config) + all_kwargs.update(kwargs) + data_format = 'NHWC' if self.data_format == 'channels_last' else 'NCHW' + return tf.contrib.layers.batch_norm( + inputs, is_training=self.training, data_format=data_format, + fused=True, **all_kwargs) + + def spatial_average2d(self, inputs): + shape = inputs.get_shape().as_list() + if self.data_format == 'channels_last': + n, h, w, c = shape + else: + n, c, h, w = shape + n = -1 if n is None else n + x = tf.layers.average_pooling2d(inputs, (h, w), (1, 1), + data_format=self.data_format) + return tf.reshape(x, [n, c]) + + def flatten2d(self, inputs): + x = inputs + if self.data_format != 'channel_last': + # Note: This ensures the output order matches that of NHWC networks + x = tf.transpose(x, [0, 2, 3, 1]) + input_shape = x.get_shape().as_list() + num_inputs = 1 + for dim in input_shape[1:]: + num_inputs *= dim + return tf.reshape(x, [-1, num_inputs], name='flatten') + + def residual2d(self, inputs, network, units=None, scale=1.0, activate=False): + outputs = network(inputs) + c_axis = -1 if self.data_format == 'channels_last' else 1 + h_axis = 1 if self.data_format == 'channels_last' else 2 + w_axis = h_axis + 1 + ishape, oshape = [y.get_shape().as_list() for y in [inputs, outputs]] + ichans, ochans = ishape[c_axis], oshape[c_axis] + strides = ((ishape[h_axis] - 1) // oshape[h_axis] + 1, + (ishape[w_axis] - 1) // oshape[w_axis] + 1) + with tf.name_scope('residual'): + if (ochans != ichans or strides[0] != 1 or strides[1] != 1): + inputs = self.conv2d_linear(inputs, units, 1, strides, 'SAME') + x = inputs + scale * outputs + if activate: + x = self.activate(x) + return x + + +def resnet_bottleneck_v1(builder, inputs, depth, depth_bottleneck, stride, + basic=False): + num_inputs = inputs.get_shape().as_list()[1] + x = inputs + with tf.name_scope('resnet_v1'): + if depth == num_inputs: + if stride == 1: + shortcut = x + else: + shortcut = builder.max_pooling2d(x, 1, stride) + else: + shortcut = builder.conv2d_linear(x, depth, 1, stride, 'SAME') + if basic: + x = builder.pad2d(x, 1) + x = builder.conv2d(x, depth_bottleneck, 3, stride, 'VALID') + x = builder.conv2d_linear(x, depth, 3, 1, 'SAME') + else: + x = builder.conv2d(x, depth_bottleneck, 1, 1, 'SAME') + x = builder.conv2d(x, depth_bottleneck, 3, stride, 'SAME') + # x = builder.conv2d_linear(x, depth, 1, 1, 'SAME') + x = builder.conv2d_linear_last_bn(x, depth, 1, 1, 'SAME') + x = tf.nn.relu(x + shortcut) + ts.add_to_collection('relu_activations', x) + return x + + +def inference_resnet_v1_impl(builder, inputs, layer_counts, basic=False): + x = inputs + x = builder.pad2d(x, 3) + x = builder.conv2d(x, 64, 7, 2, 'VALID') + x = builder.max_pooling2d(x, 3, 2, 'SAME') + for i in range(layer_counts[0]): + x = resnet_bottleneck_v1(builder, x, 256, 64, 1, basic) + for i in range(layer_counts[1]): + x = resnet_bottleneck_v1(builder, x, 512, 128, 2 if i == 0 else 1, basic) + for i in range(layer_counts[2]): + x = resnet_bottleneck_v1(builder, x, 1024, 256, 2 if i == 0 else 1, basic) + for i in range(layer_counts[3]): + x = resnet_bottleneck_v1(builder, x, 2048, 512, 2 if i == 0 else 1, basic) + return builder.spatial_average2d(x) + + +def inference_resnet_v1(inputs, nlayer, data_format='channels_last', + training=False, conv_initializer=None, adv_bn_init=False): + """Deep Residual Networks family of models + https://arxiv.org/abs/1512.03385 + """ + builder = LayerBuilder(tf.nn.relu, data_format, training, use_batch_norm=True, + conv_initializer=conv_initializer, adv_bn_init=adv_bn_init) + if nlayer == 18: + return inference_resnet_v1_impl(builder, inputs, [2, 2, 2, 2], basic=True) + elif nlayer == 34: + return inference_resnet_v1_impl(builder, inputs, [3, 4, 6, 3], basic=True) + elif nlayer == 50: + return inference_resnet_v1_impl(builder, inputs, [3, 4, 6, 3]) + elif nlayer == 101: + return inference_resnet_v1_impl(builder, inputs, [3, 4, 23, 3]) + elif nlayer == 152: + return inference_resnet_v1_impl(builder, inputs, [3, 8, 36, 3]) + else: + raise ValueError("Invalid nlayer (%i); must be one of: 18,34,50,101,152" % + nlayer) + + +def get_model_func(model_name): + if model_name.startswith('resnet'): + nlayer = int(model_name[len('resnet'):]) + return lambda images, *args, **kwargs: \ + inference_resnet_v1(images, nlayer, *args, **kwargs) + else: + raise ValueError("Invalid model type: %s" % model_name) + + +def deserialize_image_record(record): + feature_map = { + 'image/encoded': tf.FixedLenFeature([], tf.string, ''), + 'image/class/label': tf.FixedLenFeature([1], tf.int64, -1), + 'image/class/text': tf.FixedLenFeature([], tf.string, ''), + 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), + 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), + 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), + 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32) + } + with tf.name_scope('deserialize_image_record'): + obj = tf.parse_single_example(record, feature_map) + imgdata = obj['image/encoded'] + label = tf.cast(obj['image/class/label'], tf.int32) + bbox = tf.stack([obj['image/object/bbox/%s' % x].values + for x in ['ymin', 'xmin', 'ymax', 'xmax']]) + bbox = tf.transpose(tf.expand_dims(bbox, 0), [0, 2, 1]) + text = obj['image/class/text'] + return imgdata, label, bbox, text + + +def decode_jpeg(imgdata, channels=3): + return tf.image.decode_jpeg(imgdata, channels=channels, + fancy_upscaling=False, + dct_method='INTEGER_FAST') + + +def crop_and_resize_image(image, original_bbox, height, width, + distort=False, nsummary=10): + with tf.name_scope('crop_and_resize'): + # Evaluation is done on a center-crop of this ratio + eval_crop_ratio = 0.8 + if distort: + initial_shape = [int(round(height / eval_crop_ratio)), + int(round(width / eval_crop_ratio)), + 3] + bbox_begin, bbox_size, bbox = \ + tf.image.sample_distorted_bounding_box( + initial_shape, + bounding_boxes=tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]), + # tf.zeros(shape=[1,0,4]), # No bounding boxes + min_object_covered=0.1, + aspect_ratio_range=[3. / 4., 4. / 3.], + area_range=[0.08, 1.0], + max_attempts=100, + seed=11 * hvd.rank(), # Need to set for deterministic results + use_image_if_no_bounding_boxes=True) + bbox = bbox[0, 0] # Remove batch, box_idx dims + else: + # Central crop + ratio_y = ratio_x = eval_crop_ratio + bbox = tf.constant([0.5 * (1 - ratio_y), 0.5 * (1 - ratio_x), + 0.5 * (1 + ratio_y), 0.5 * (1 + ratio_x)]) + image = tf.image.crop_and_resize( + image[None, :, :, :], bbox[None, :], [0], [height, width])[0] + return image + + +def parse_and_preprocess_image_record(record, counter, height, width, + brightness, contrast, saturation, hue, + distort=False, nsummary=10, increased_aug=False): + imgdata, label, bbox, text = deserialize_image_record(record) + label -= 1 # Change to 0-based (don't use background class) + with tf.name_scope('preprocess_train'): + try: + image = decode_jpeg(imgdata, channels=3) + except: + image = tf.image.decode_png(imgdata, channels=3) + image = crop_and_resize_image(image, bbox, height, width, distort) + if distort: + image = tf.image.random_flip_left_right(image) + if increased_aug: + image = tf.image.random_brightness(image, max_delta=brightness) + image = distort_image_ops.random_hsv_in_yiq(image, + lower_saturation=saturation, + upper_saturation=2.0 - saturation, + max_delta_hue=hue * math.pi) + image = tf.image.random_contrast(image, lower=contrast, upper=2.0 - contrast) + tf.summary.image('distorted_color_image', tf.expand_dims(image, 0)) + image = tf.clip_by_value(image, 0., 255.) + image = tf.cast(image, tf.uint8) + return image, label + +def make_dataset(filenames, take_count, batch_size, height, width, + brightness, contrast, saturation, hue, + training=False, num_threads=10, nsummary=10, shard=False, synthetic=False, + increased_aug=False): + if synthetic and training: + input_shape = [height, width, 3] + input_element = nest.map_structure(lambda s: tf.constant(0.5, tf.float32, s), tf.TensorShape(input_shape)) + label_element = nest.map_structure(lambda s: tf.constant(1, tf.int32, s), tf.TensorShape([1])) + element = (input_element, label_element) + ds = tf.data.Dataset.from_tensors(element).repeat() + else: + shuffle_buffer_size = 10000 + num_readers = 1 + if hvd.size() > len(filenames): + assert (hvd.size() % len(filenames)) == 0 + filenames = filenames * (hvd.size() / len(filenames)) + + ds = tf.data.Dataset.from_tensor_slices(filenames) + if shard: + # split the dataset into parts for each GPU + ds = ds.shard(hvd.size(), hvd.rank()) + + if not training: + ds = ds.take(take_count) # make sure all ranks have the same amount + + if training: + ds = ds.shuffle(1000, seed=7 * (1 + hvd.rank())) + + ds = ds.interleave( + tf.data.TFRecordDataset, cycle_length=num_readers, block_length=1) + counter = tf.data.Dataset.range(sys.maxsize) + ds = tf.data.Dataset.zip((ds, counter)) + preproc_func = lambda record, counter_: parse_and_preprocess_image_record( + record, counter_, height, width, brightness, contrast, saturation, hue, + distort=training, nsummary=nsummary if training else 0, increased_aug=increased_aug) + ds = ds.map(preproc_func, num_parallel_calls=num_threads) + if training: + ds = ds.apply(tf.data.experimental.shuffle_and_repeat(shuffle_buffer_size, seed=5*(1+hvd.rank()))) + ds = ds.batch(batch_size) + return ds + + +def stage(tensors): + """Stages the given tensors in a StagingArea for asynchronous put/get. + """ + stage_area = data_flow_ops.StagingArea( + dtypes=[tensor.dtype for tensor in tensors], + shapes=[tensor.get_shape() for tensor in tensors]) + put_op = stage_area.put(tensors) + get_tensors = stage_area.get() + tf.add_to_collection('STAGING_AREA_PUTS', put_op) + return put_op, get_tensors + + +class PrefillStagingAreasHook(tf.train.SessionRunHook): + def after_create_session(self, session, coord): + enqueue_ops = tf.get_collection('STAGING_AREA_PUTS') + for i in range(len(enqueue_ops)): + session.run(enqueue_ops[:i + 1]) + + +class LogSessionRunHook(tf.train.SessionRunHook): + def __init__(self, global_batch_size, num_records, display_every=10, logger=None): + self.global_batch_size = global_batch_size + self.num_records = num_records + self.display_every = display_every + self.logger = logger + + def after_create_session(self, session, coord): + rank0log(self.logger, ' Step Epoch Speed Loss FinLoss LR') + self.elapsed_secs = 0. + self.count = 0 + + def before_run(self, run_context): + self.t0 = time.time() + return tf.train.SessionRunArgs( + fetches=[tf.train.get_global_step(), + 'loss:0', 'total_loss:0', 'learning_rate:0']) + + def after_run(self, run_context, run_values): + self.elapsed_secs += time.time() - self.t0 + self.count += 1 + global_step, loss, total_loss, lr = run_values.results + if global_step == 1 or global_step % self.display_every == 0: + dt = self.elapsed_secs / self.count + img_per_sec = self.global_batch_size / dt + epoch = global_step * self.global_batch_size / self.num_records + self.logger.info('%6i %5.1f %7.1f %6.3f %6.3f %7.5f' % + (global_step, epoch, img_per_sec, loss, total_loss, lr)) + self.elapsed_secs = 0. + self.count = 0 + + +def _fp32_trainvar_getter(getter, name, shape=None, dtype=None, + trainable=True, regularizer=None, + *args, **kwargs): + storage_dtype = tf.float32 if trainable else dtype + variable = getter(name, shape, dtype=storage_dtype, + trainable=trainable, + regularizer=regularizer if trainable and 'BatchNorm' not in name and 'batchnorm' not in name and 'batch_norm' not in name and 'Batch_Norm' not in name else None, + *args, **kwargs) + if trainable and dtype != tf.float32: + cast_name = name + '/fp16_cast' + try: + cast_variable = tf.get_default_graph().get_tensor_by_name( + cast_name + ':0') + except KeyError: + cast_variable = tf.cast(variable, dtype, name=cast_name) + cast_variable._ref = variable._ref + variable = cast_variable + return variable + + +def fp32_trainable_vars(name='fp32_vars', *args, **kwargs): + """A varible scope with custom variable getter to convert fp16 trainable + variables with fp32 storage followed by fp16 cast. + """ + return tf.variable_scope( + name, custom_getter=_fp32_trainvar_getter, *args, **kwargs) + + +class MixedPrecisionOptimizer(tf.train.Optimizer): + """An optimizer that updates trainable variables in fp32.""" + + def __init__(self, optimizer, + scale=None, + name="MixedPrecisionOptimizer", + use_locking=False): + super(MixedPrecisionOptimizer, self).__init__( + name=name, use_locking=use_locking) + self._optimizer = optimizer + self._scale = float(scale) if scale is not None else 1.0 + + def compute_gradients(self, loss, var_list=None, *args, **kwargs): + if var_list is None: + var_list = ( + tf.trainable_variables() + + tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)) + + replaced_list = var_list + + if self._scale != 1.0: + loss = tf.scalar_mul(self._scale, loss) + gradvar = self._optimizer.compute_gradients(loss, replaced_list, *args, **kwargs) + + final_gradvar = [] + # with tf.device('/cpu:0'): + # tf.summary.merge([tf.summary.histogram("ts/gradient/%s" % g[1].name, g[0]) for g in gradvar]) + # tf.summary.merge([tf.summary.histogram("ts/weights/%s" % g[1].name, g[1]) for g in gradvar]) + # with tf.variable_scope("tsgrad") as vs: + # pass + # grads = [] + + for orig_var, (grad, var) in zip(var_list, gradvar): + if var is not orig_var: + grad = tf.cast(grad, orig_var.dtype) + # with tf.variable_scope(vs, auxiliary_name_scope=False) as vs1: + # with tf.name_scope(vs1.original_name_scope): + # grad = tf.identity(grad, name='_'.join(grad.name.split('/'))[:-2]) + # grads.append(grad) + if self._scale != 1.0: + grad = tf.scalar_mul(1. / self._scale, grad) + final_gradvar.append((grad, orig_var)) + # save_tensornames_to_file('grads_wrt_weights.txt', grads) + return final_gradvar + + def apply_gradients(self, *args, **kwargs): + return self._optimizer.apply_gradients(*args, **kwargs) + +class LarcOptimizer(tf.train.Optimizer): + """ LARC implementation + ------------------- + Parameters: + - optimizer: initial optimizer that you wanna apply + example: tf.train.MomentumOptimizer + - learning_rate: initial learning_rate from initial optimizer + - clip: if True apply LARC otherwise LARS + - epsilon: default value is weights or grads are 0. + - name + - use_locking + """ + + def __init__(self, optimizer, learning_rate, eta, clip=True, epsilon=1., + name="LarcOptimizer", use_locking=False): + super(LarcOptimizer, self).__init__( + name=name, use_locking=use_locking) + self._optimizer = optimizer + self._learning_rate = learning_rate + self._eta = float(eta) + self._clip = clip + self._epsilon = float(epsilon) + + def compute_gradients(self, *args, **kwargs): + return self._optimizer.compute_gradients(*args, **kwargs) + + def apply_gradients(self, gradvars, *args, **kwargs): + v_list = [tf.norm(tensor=v, ord=2) for _, v in gradvars] + g_list = [tf.norm(tensor=g, ord=2) if g is not None else 0.0 + for g, _ in gradvars] + v_norms = tf.stack(v_list) + g_norms = tf.stack(g_list) + zeds = tf.zeros_like(v_norms) + # assign epsilon if weights or grads = 0, to avoid division by zero + # also prevent biases to get stuck at initialization (0.) + cond = tf.logical_and( + tf.not_equal(v_norms, zeds), + tf.not_equal(g_norms, zeds)) + true_vals = tf.scalar_mul(self._eta, tf.div(v_norms, g_norms)) + # true_vals = tf.scalar_mul(tf.cast(self._eta, tf.float32), tf.div(tf.cast(v_norms, tf.float32), tf.cast(g_norms, tf.float32))) + false_vals = tf.fill(tf.shape(v_norms), self._epsilon) + larc_local_lr = tf.where(cond, true_vals, false_vals) + if self._clip: + ones = tf.ones_like(v_norms) + lr = tf.fill(tf.shape(v_norms), self._learning_rate) + # We need gradients to compute local learning rate, + # so compute_gradients from initial optimizer have to called + # for which learning rate is already fixed + # We then have to scale the gradients instead of the learning rate. + larc_local_lr = tf.minimum(tf.div(larc_local_lr, lr), ones) + gradvars = [(tf.multiply(larc_local_lr[i], g), v) + if g is not None else (None, v) + for i, (g, v) in enumerate(gradvars)] + return self._optimizer.apply_gradients(gradvars, *args, **kwargs) + + +def get_with_default(obj, key, default_value): + return obj[key] if key in obj and obj[key] is not None else default_value + + +def get_lr(lr, steps, lr_steps, warmup_it, decay_steps, global_step, lr_decay_mode, + cdr_first_decay_ratio, cdr_t_mul, cdr_m_mul, cdr_alpha, lc_periods, lc_alpha, lc_beta): + if lr_decay_mode == 'steps': + learning_rate = tf.train.piecewise_constant(global_step, + steps, lr_steps) + elif lr_decay_mode == 'poly' or lr_decay_mode == 'poly_cycle': + cycle = lr_decay_mode == 'poly_cycle' + learning_rate = tf.train.polynomial_decay(lr, + global_step - warmup_it, + decay_steps=decay_steps - warmup_it, + end_learning_rate=0.00001, + power=2, + cycle=cycle) + elif lr_decay_mode == 'cosine_decay_restarts': + learning_rate = tf.train.cosine_decay_restarts(lr, + global_step - warmup_it, + (decay_steps - warmup_it) * cdr_first_decay_ratio, + t_mul=cdr_t_mul, + m_mul=cdr_m_mul, + alpha=cdr_alpha) + elif lr_decay_mode == 'cosine': + learning_rate = tf.train.cosine_decay(lr, + global_step - warmup_it, + decay_steps=decay_steps - warmup_it, + alpha=0.0) + elif lr_decay_mode == 'linear_cosine': + learning_rate = tf.train.linear_cosine_decay(lr, + global_step - warmup_it, + decay_steps=decay_steps - warmup_it, + num_periods=lc_periods,#0.47, + alpha=lc_alpha,#0.0, + beta=lc_beta)#0.00001) + else: + raise ValueError('Invalid type of lr_decay_mode') + return learning_rate + + +def warmup_decay(warmup_lr, global_step, warmup_steps, warmup_end_lr): + from tensorflow.python.ops import math_ops + p = tf.cast(global_step, tf.float32) / tf.cast(warmup_steps, tf.float32) + diff = math_ops.subtract(warmup_end_lr, warmup_lr) + res = math_ops.add(warmup_lr, math_ops.multiply(diff, p)) + return res + + +def cnn_model_function(features, labels, mode, params): + labels = tf.reshape(labels, (-1,)) # Squash unnecessary unary dim + lr = params['lr'] + lr_steps = params['lr_steps'] + steps = params['steps'] + use_larc = params['use_larc'] + leta = params['leta'] + lr_decay_mode = params['lr_decay_mode'] + decay_steps = params['decay_steps'] + cdr_first_decay_ratio = params['cdr_first_decay_ratio'] + cdr_t_mul = params['cdr_t_mul'] + cdr_m_mul = params['cdr_m_mul'] + cdr_alpha = params['cdr_alpha'] + lc_periods = params['lc_periods'] + lc_alpha = params['lc_alpha'] + lc_beta = params['lc_beta'] + + model_name = params['model'] + num_classes = params['n_classes'] + model_dtype = get_with_default(params, 'dtype', tf.float32) + model_format = get_with_default(params, 'format', 'channels_first') + device = get_with_default(params, 'device', '/gpu:0') + model_func = get_model_func(model_name) + inputs = features # TODO: Should be using feature columns? + is_training = (mode == tf.estimator.ModeKeys.TRAIN) + momentum = params['mom'] + weight_decay = params['wdecay'] + warmup_lr = params['warmup_lr'] + warmup_it = params['warmup_it'] + loss_scale = params['loss_scale'] + + adv_bn_init = params['adv_bn_init'] + conv_init = params['conv_init'] + + if mode == tf.estimator.ModeKeys.TRAIN: + with tf.device('/cpu:0'): + preload_op, (inputs, labels) = stage([inputs, labels]) + ts.add_to_collection('inputs', inputs) + + with tf.device(device): + if mode == tf.estimator.ModeKeys.TRAIN: + gpucopy_op, (inputs, labels) = stage([inputs, labels]) + inputs = tf.cast(inputs, model_dtype) + imagenet_mean = np.array([121, 115, 100], dtype=np.float32) + imagenet_std = np.array([70, 68, 71], dtype=np.float32) + inputs = tf.subtract(inputs, imagenet_mean) + inputs = tf.multiply(inputs, 1. / imagenet_std) + if model_format == 'channels_first': + inputs = tf.transpose(inputs, [0, 3, 1, 2]) + with fp32_trainable_vars( + regularizer=tf.contrib.layers.l2_regularizer(weight_decay)): + top_layer = model_func( + inputs, data_format=model_format, training=is_training, + conv_initializer=conv_init, adv_bn_init=adv_bn_init) + logits = tf.layers.dense(top_layer, num_classes, + kernel_initializer=tf.random_normal_initializer(stddev=0.01)) + predicted_classes = tf.argmax(logits, axis=1, output_type=tf.int32) + logits = tf.cast(logits, tf.float32) + if mode == tf.estimator.ModeKeys.PREDICT: + probabilities = tf.softmax(logits) + predictions = { + 'class_ids': predicted_classes[:, None], + 'probabilities': probabilities, + 'logits': logits + } + return tf.estimator.EstimatorSpec(mode, predictions=predictions) + loss = tf.losses.sparse_softmax_cross_entropy( + logits=logits, labels=labels) + loss = tf.identity(loss, name='loss') # For access by logger (TODO: Better way to access it?) + + if mode == tf.estimator.ModeKeys.EVAL: + with tf.device(None): # Allow fallback to CPU if no GPU support for these ops + accuracy = tf.metrics.accuracy( + labels=labels, predictions=predicted_classes) + top5acc = tf.metrics.mean( + tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)) + newaccuracy = (hvd.allreduce(accuracy[0]), accuracy[1]) + newtop5acc = (hvd.allreduce(top5acc[0]), top5acc[1]) + metrics = {'val-top1acc': newaccuracy, 'val-top5acc': newtop5acc} + return tf.estimator.EstimatorSpec( + mode, loss=loss, eval_metric_ops=metrics) + + assert (mode == tf.estimator.ModeKeys.TRAIN) + reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) + total_loss = tf.add_n([loss] + reg_losses, name='total_loss') + + batch_size = tf.shape(inputs)[0] + + global_step = tf.train.get_global_step() + + with tf.device('/cpu:0'): # Allow fallback to CPU if no GPU support for these ops + learning_rate = tf.cond(global_step < warmup_it, + lambda: warmup_decay(warmup_lr, global_step, warmup_it, + lr), + lambda: get_lr(lr, steps, lr_steps, warmup_it, decay_steps, global_step, + lr_decay_mode, + cdr_first_decay_ratio, cdr_t_mul, cdr_m_mul, cdr_alpha, + lc_periods, lc_alpha, lc_beta)) + learning_rate = tf.identity(learning_rate, 'learning_rate') + tf.summary.scalar('learning_rate', learning_rate) + + opt = tf.train.MomentumOptimizer( + learning_rate, momentum, use_nesterov=True) + opt = hvd.DistributedOptimizer(opt) + if use_larc: + opt = LarcOptimizer(opt, learning_rate, leta, clip=True) + + opt = MixedPrecisionOptimizer(opt, scale=loss_scale) + opt = ts.TornasoleOptimizer(opt) + + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) or [] + with tf.control_dependencies(update_ops): + gate_gradients = (tf.train.Optimizer.GATE_NONE) + train_op = opt.minimize( + total_loss, global_step=tf.train.get_global_step(), + gate_gradients=gate_gradients) + train_op = tf.group(preload_op, gpucopy_op, train_op) # , update_ops) + + return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op) + + +def get_num_records(filenames): + def count_records(tf_record_filename): + count = 0 + for _ in tf.python_io.tf_record_iterator(tf_record_filename): + count += 1 + return count + + nfile = len(filenames) + return (count_records(filenames[0]) * (nfile - 1) + + count_records(filenames[-1])) + + +def add_bool_argument(cmdline, shortname, longname=None, default=False, help=None): + if longname is None: + shortname, longname = None, shortname + elif default == True: + raise ValueError("""Boolean arguments that are True by default should not have short names.""") + name = longname[2:] + feature_parser = cmdline.add_mutually_exclusive_group(required=False) + if shortname is not None: + feature_parser.add_argument(shortname, '--' + name, dest=name, action='store_true', help=help, default=default) + else: + feature_parser.add_argument('--' + name, dest=name, action='store_true', help=help, default=default) + feature_parser.add_argument('--no' + name, dest=name, action='store_false') + return cmdline + + +def add_cli_args(): + cmdline = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + # Basic options + cmdline.add_argument('-m', '--model', default='resnet50', + help="""Name of model to run: resnet[18,34,50,101,152]""") + cmdline.add_argument('--data_dir', + help="""Path to dataset in TFRecord format + (aka Example protobufs). Files should be + named 'train-*' and 'validation-*'.""") + add_bool_argument(cmdline, '--synthetic', help="""Whether to use synthetic data for training""") + cmdline.add_argument('-b', '--batch_size', default=128, type=int, + help="""Size of each minibatch per GPU""") + cmdline.add_argument('--num_batches', type=int, default=200, + help="""Number of batches to run. + Ignored during eval or if num epochs given""") + cmdline.add_argument('--num_epochs', type=int, + help="""Number of epochs to run. + Overrides --num_batches. Ignored during eval.""") + cmdline.add_argument('--log_dir', default='tf_logs', + help="""Directory in which to write training + summaries and checkpoints. If the log directory already + contains some checkpoints, it tries to resume training + from the last saved checkpoint. Pass --clear_log if you + want to clear all checkpoints and start a fresh run""") + cmdline.add_argument('--random_seed', type=bool, default=False) + add_bool_argument(cmdline, '--clear_log', default=False, + help="""Clear the log folder passed so a fresh run can be started""") + cmdline.add_argument('--log_name', type=str, default='hvd_train.log') + add_bool_argument(cmdline, '--local_ckpt', + help="""Performs local checkpoints (i.e. one per node)""") + cmdline.add_argument('--display_every', default=20, type=int, + help="""How often (in iterations) to print out + running information.""") + add_bool_argument(cmdline, '--eval', + help="""Evaluate the top-1 and top-5 accuracy of + the latest checkpointed model. If you want to evaluate using multiple GPUs ensure that + all processes have access to all checkpoints. Either if checkpoints + were saved using --local_ckpt or they were saved to a shared directory which all processes + can access.""") + cmdline.add_argument('--eval_interval', type=int, + help="""Evaluate accuracy per eval_interval number of epochs""") + add_bool_argument(cmdline, '--fp16', default=True, + help="""Train using float16 (half) precision instead + of float32.""") + cmdline.add_argument('--num_gpus', default=1, type=int, + help="""Specify total number of GPUS used to train a checkpointed model during eval. + Used only to calculate epoch number to print during evaluation""") + + cmdline.add_argument('--save_checkpoints_steps', type=int, default=1000) + cmdline.add_argument('--save_summary_steps', type=int, default=0) + add_bool_argument(cmdline, '--adv_bn_init', default=True, + help="""init gamme of the last BN of each ResMod at 0.""") + add_bool_argument(cmdline, '--adv_conv_init', default=True, + help="""init conv with MSRA initializer""") + + cmdline.add_argument('--lr', type=float, + help="""Start learning rate""") + cmdline.add_argument('--mom', default=0.90, type=float, + help="""Momentum""") + cmdline.add_argument('--wdecay', default=0.0001, type=float, + help="""Weight decay""") + cmdline.add_argument('--loss_scale', default=1024., type=float, + help="""loss scale""") + cmdline.add_argument('--warmup_lr', default=0.001, type=float, + help="""Warmup starting from this learning rate""") + cmdline.add_argument('--warmup_epochs', default=0, type=int, + help="""Number of epochs in which to warmup to given lr""") + cmdline.add_argument('--lr_decay_steps', default='30,60,80', type=str, + help="""epoch numbers at which lr is decayed by lr_decay_lrs. + Used when lr_decay_mode is steps""") + cmdline.add_argument('--lr_decay_lrs', default='', type=str, + help="""learning rates at specific epochs""") + cmdline.add_argument('--lr_decay_mode', default='poly', + help="""Takes either `steps` (decay by a factor at specified steps) + or `poly`(polynomial_decay with degree 2)""") + + add_bool_argument(cmdline, '--use_larc', default=False, + help="""Use Layer wise Adaptive Rate Control which helps convergence at really large batch sizes""") + cmdline.add_argument('--leta', default=0.013, type=float, + help="""The trust coefficient for LARC optimization, LARC Eta""") + + cmdline.add_argument('--cdr_first_decay_ratio', default=0.33, type=float, + help="""Cosine Decay Restart First Deacy Steps ratio""") + cmdline.add_argument('--cdr_t_mul', default=2.0, type=float, + help="""Cosine Decay Restart t_mul""") + cmdline.add_argument('--cdr_m_mul', default=0.1, type=float, + help="""Cosine Decay Restart m_mul""") + cmdline.add_argument('--cdr_alpha', default=0.0, type=float, + help="""Cosine Decay Restart alpha""") + cmdline.add_argument('--lc_periods', default=0.47, type=float, + help="""Linear Cosine num of periods""") + cmdline.add_argument('--lc_alpha', default=0.0, type=float, + help="""linear Cosine alpha""") + cmdline.add_argument('--lc_beta', default=0.00001, type=float, + help="""Liner Cosine Beta""") + + add_bool_argument(cmdline, '--increased_aug', default=False, + help="""Increase augmentations helpful when training with large number of GPUs such as 128 or 256""") + cmdline.add_argument('--contrast', default=0.6, type=float, + help="""contrast factor""") + cmdline.add_argument('--saturation', default=0.6, type=float, + help="""saturation factor""") + cmdline.add_argument('--hue', default=0.13, type=float, + help="""hue max delta factor, hue delta = hue * math.pi""") + cmdline.add_argument('--brightness', default=0.3, type=float, + help="""Brightness factor""") + + # tornasole arguments + add_bool_argument(cmdline, '--enable_tornasole', default=False, help="""enable Tornasole""") + cmdline.add_argument('--tornasole_path', default='tornasole_outputs/default_run', + help="""Directory in which to write tornasole data. This can be a local path or + S3 path in the form s3://bucket_name/prefix_name""") + add_bool_argument(cmdline, '--tornasole_save_all', default=False, help="""save all tensors""") + add_bool_argument(cmdline, '--tornasole_dryrun', default=False, help="""If enabled, do not write data to disk""") + cmdline.add_argument('--tornasole_exclude', nargs='+', default=[], type=str, action='append', + help="""List of REs for tensors to exclude from Tornasole's default collection""") + cmdline.add_argument('--tornasole_include', nargs='+', default=[], type=str, action='append', + help="""List of REs for tensors to include from Tornasole's default collection""") + cmdline.add_argument('--tornasole_step_interval', default=10, type=int, + help="""Save tornasole data every N runs""" ) + add_bool_argument(cmdline, '--tornasole_save_weights', default=False) + add_bool_argument(cmdline, '--tornasole_save_gradients', default=False) + add_bool_argument(cmdline, '--tornasole_save_inputs', default=False) + add_bool_argument(cmdline, '--tornasole_save_relu_activations', default=False) + cmdline.add_argument('--tornasole_relu_reductions', nargs='+', type=str, default=[], + help='If passed, saves relu activations in the form of these reductions') + cmdline.add_argument('--tornasole_relu_reductions_abs', nargs='+', type=str, default=[], + help='If passed, saves relu activations in the form of these reductions ' + 'on absolute values of the tensor') + cmdline.add_argument('--constant_initializer', type=float, + help="if passed sets that constant as initial weight, " + "if not uses default initialization strategies") + return cmdline + + +def sort_and_load_ckpts(log_dir): + ckpts = [] + for f in os.listdir(log_dir): + m = re.match(r'model.ckpt-([0-9]+).index', f) + if m is None: + continue + fullpath = os.path.join(log_dir, f) + ckpts.append({'step': int(m.group(1)), + 'path': os.path.splitext(fullpath)[0], + 'mtime': os.stat(fullpath).st_mtime, + }) + ckpts.sort(key=itemgetter('step')) + return ckpts + +def get_tornasole_hook(FLAGS): + abs_reductions = [] + reductions = [] + if FLAGS.tornasole_relu_reductions: + for r in FLAGS.tornasole_relu_reductions: + reductions.append(r) + if FLAGS.tornasole_relu_reductions_abs: + for r in FLAGS.tornasole_relu_reductions_abs: + abs_reductions.append(r) + if reductions or abs_reductions: + rnc = ts.ReductionConfig(reductions=reductions, abs_reductions=abs_reductions) + else: + rnc = None + + include_collections = [] + + if FLAGS.tornasole_save_weights: + include_collections.append('weights') + if FLAGS.tornasole_save_gradients: + include_collections.append('gradients') + if FLAGS.tornasole_save_relu_activations: + include_collections.append('relu_activations') + if FLAGS.tornasole_save_inputs: + include_collections.append('inputs') + if FLAGS.tornasole_include: + ts.get_collection('default').include(FLAGS.tornasole_include) + include_collections.append('default') + + return ts.TornasoleHook(out_dir=FLAGS.tornasole_path, + save_config=ts.SaveConfig(save_interval=FLAGS.tornasole_step_interval), + reduction_config=rnc, + include_collections=include_collections, + save_all=FLAGS.tornasole_save_all) +def main(): + gpu_thread_count = 2 + os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private' + os.environ['TF_GPU_THREAD_COUNT'] = str(gpu_thread_count) + os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1' + os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' + hvd.init() + + config = tf.ConfigProto() + config.gpu_options.visible_device_list = str(hvd.local_rank()) + config.gpu_options.force_gpu_compatible = True # Force pinned memory + config.intra_op_parallelism_threads = 1 # Avoid pool of Eigen threads + config.inter_op_parallelism_threads = 5 + + cmdline = add_cli_args() + FLAGS, unknown_args = cmdline.parse_known_args() + + # these random seeds are only intended for test purpose. + # for now, such seed settings could promise no assert failure when running tornasole_rules test_rules.py with config.yaml + # if you wish to change the seed settings, notice that certain steps' tensor value may be capable of variation + if FLAGS.random_seed: + random.seed(5 * (1 + hvd.rank())) + np.random.seed(7 * (1 + hvd.rank())) + tf.set_random_seed(31 * (1 + hvd.rank())) + + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + + FLAGS.data_dir = None if FLAGS.data_dir == "" else FLAGS.data_dir + FLAGS.log_dir = None if FLAGS.log_dir == "" else FLAGS.log_dir + + if FLAGS.eval: + FLAGS.log_name = 'eval_' + FLAGS.log_name + if FLAGS.local_ckpt: + do_checkpoint = hvd.local_rank() == 0 + else: + do_checkpoint = hvd.rank() == 0 + if hvd.local_rank() == 0 and FLAGS.clear_log and os.path.isdir(FLAGS.log_dir): + shutil.rmtree(FLAGS.log_dir) + barrier = hvd.allreduce(tf.constant(0, dtype=tf.float32)) + tf.Session(config=config).run(barrier) + + if hvd.local_rank() == 0 and not os.path.isdir(FLAGS.log_dir): + os.makedirs(FLAGS.log_dir) + barrier = hvd.allreduce(tf.constant(0, dtype=tf.float32)) + tf.Session(config=config).run(barrier) + + logger = logging.getLogger(FLAGS.log_name) + logger.setLevel(logging.INFO) # INFO, ERROR + # file handler which logs debug messages + # console handler + ch = logging.StreamHandler() + ch.setLevel(logging.INFO) + # add formatter to the handlers + # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + formatter = logging.Formatter('%(message)s') + ch.setFormatter(formatter) + logger.addHandler(ch) + if not hvd.rank(): + fh = logging.FileHandler(os.path.join(FLAGS.log_dir, FLAGS.log_name)) + fh.setLevel(logging.DEBUG) + fh.setFormatter(formatter) + # add handlers to logger + logger.addHandler(fh) + + height, width = 224, 224 + global_batch_size = FLAGS.batch_size * hvd.size() + rank0log(logger, 'PY' + str(sys.version) + 'TF' + str(tf.__version__)) + rank0log(logger, "Horovod size: ", hvd.size()) + + if FLAGS.data_dir: + filename_pattern = os.path.join(FLAGS.data_dir, '%s-*') + train_filenames = sorted(tf.gfile.Glob(filename_pattern % 'train')) + eval_filenames = sorted(tf.gfile.Glob(filename_pattern % 'validation')) + num_training_samples = get_num_records(train_filenames) + rank0log(logger, "Using data from: ", FLAGS.data_dir) + if not FLAGS.eval: + rank0log(logger, 'Found ', num_training_samples, ' training samples') + else: + if not FLAGS.synthetic: + FLAGS.synthetic = True + rank0log(logger, 'data_dir missing. Using synthetic data. ' \ + 'If you want to run on real data' \ + 'pass --data_dir PATH_TO_DATA') + train_filenames = eval_filenames = [] + num_training_samples = 1281167 + training_samples_per_rank = num_training_samples // hvd.size() + + if FLAGS.num_batches: + nstep = FLAGS.num_batches + FLAGS.num_epochs = max(nstep * global_batch_size // num_training_samples, 1) + elif FLAGS.num_epochs: + nstep = num_training_samples * FLAGS.num_epochs // global_batch_size + else: + raise ValueError("Either num_epochs or num_batches has to be passed") + nstep_per_epoch = num_training_samples // global_batch_size + decay_steps = nstep + + if FLAGS.lr_decay_mode == 'steps': + steps = [int(x) * nstep_per_epoch for x in FLAGS.lr_decay_steps.split(',')] + lr_steps = [float(x) for x in FLAGS.lr_decay_lrs.split(',')] + else: + steps = [] + lr_steps = [] + + if not FLAGS.lr: + if FLAGS.use_larc: + FLAGS.lr = 3.7 + else: + FLAGS.lr = (hvd.size() * FLAGS.batch_size * 0.1) / 256 + if not FLAGS.save_checkpoints_steps: + # default to save one checkpoint per epoch + FLAGS.save_checkpoints_steps = nstep_per_epoch + if not FLAGS.save_summary_steps: + # default to save one checkpoint per epoch + FLAGS.save_summary_steps = nstep_per_epoch + + if not FLAGS.eval: + rank0log(logger, 'Using a learning rate of ', FLAGS.lr) + rank0log(logger, 'Checkpointing every ' + str(FLAGS.save_checkpoints_steps) + ' steps') + rank0log(logger, 'Saving summary every ' + str(FLAGS.save_summary_steps) + ' steps') + + warmup_it = nstep_per_epoch * FLAGS.warmup_epochs + if FLAGS.constant_initializer: + initializer_conv = tf.constant_initializer(FLAGS.constant_initializer) + elif FLAGS.adv_conv_init: + initializer_conv = tf.variance_scaling_initializer() + else: + initializer_conv = None + + classifier = tf.estimator.Estimator( + model_fn=cnn_model_function, + model_dir=FLAGS.log_dir, + params={ + 'model': FLAGS.model, + 'decay_steps': decay_steps, + 'n_classes': 1000, + 'dtype': tf.float16 if FLAGS.fp16 else tf.float32, + 'format': 'channels_first', + 'device': '/gpu:0', + 'lr': FLAGS.lr, + 'mom': FLAGS.mom, + 'wdecay': FLAGS.wdecay, + 'use_larc': FLAGS.use_larc, + 'leta': FLAGS.leta, + 'steps': steps, + 'lr_steps': lr_steps, + 'lr_decay_mode': FLAGS.lr_decay_mode, + 'warmup_it': warmup_it, + 'warmup_lr': FLAGS.warmup_lr, + 'cdr_first_decay_ratio': FLAGS.cdr_first_decay_ratio, + 'cdr_t_mul': FLAGS.cdr_t_mul, + 'cdr_m_mul': FLAGS.cdr_m_mul, + 'cdr_alpha': FLAGS.cdr_alpha, + 'lc_periods': FLAGS.lc_periods, + 'lc_alpha': FLAGS.lc_alpha, + 'lc_beta': FLAGS.lc_beta, + 'loss_scale': FLAGS.loss_scale, + 'adv_bn_init': FLAGS.adv_bn_init, + 'conv_init': initializer_conv + }, + config=tf.estimator.RunConfig( + tf_random_seed=31 * (1 + hvd.rank()), + session_config=config, + save_summary_steps=FLAGS.save_summary_steps if do_checkpoint else None, + save_checkpoints_steps=FLAGS.save_checkpoints_steps if do_checkpoint else None, + keep_checkpoint_max=None)) + + if FLAGS.enable_tornasole and hvd.rank() == 0: + hook = get_tornasole_hook(FLAGS) + + if not FLAGS.eval: + num_preproc_threads = 5 + rank0log(logger, "Using preprocessing threads per GPU: ", num_preproc_threads) + training_hooks = [hvd.BroadcastGlobalVariablesHook(0), + PrefillStagingAreasHook()] + if hvd.rank() == 0: + training_hooks.append( LogSessionRunHook(global_batch_size, + num_training_samples, FLAGS.display_every, logger)) + if FLAGS.enable_tornasole: + training_hooks.append(hook) + try: + hook.set_mode(ts.modes.TRAIN) + start_time = time.time() + classifier.train( + input_fn=lambda: make_dataset( + train_filenames, + training_samples_per_rank, + FLAGS.batch_size, height, width, + FLAGS.brightness, FLAGS.contrast, FLAGS.saturation, FLAGS.hue, + training=True, num_threads=num_preproc_threads, + shard=True, synthetic=FLAGS.synthetic, + increased_aug=FLAGS.increased_aug), + max_steps=nstep, + hooks=training_hooks) + rank0log(logger, "Finished in ", time.time() - start_time) + except KeyboardInterrupt: + print("Keyboard interrupt") + elif FLAGS.eval and not FLAGS.synthetic: + rank0log(logger, "Evaluating") + rank0log(logger, "Validation dataset size: {}".format(get_num_records(eval_filenames))) + barrier = hvd.allreduce(tf.constant(0, dtype=tf.float32)) + tf.Session(config=config).run(barrier) + time.sleep(5) # a little extra margin... + if FLAGS.num_gpus == 1: + rank0log(logger, """If you are evaluating checkpoints of a multi-GPU run on a single GPU, + ensure you set --num_gpus to the number of GPUs it was trained on. + This will ensure that the epoch number is accurately displayed in the below logs.""") + try: + ckpts = sort_and_load_ckpts(FLAGS.log_dir) + for i, c in enumerate(ckpts): + if i < len(ckpts) - 1: + if (not FLAGS.eval_interval) or \ + (i % FLAGS.eval_interval != 0): + continue + hook.set_mode(ts.modes.EVAL) + eval_result = classifier.evaluate( + input_fn=lambda: make_dataset( + eval_filenames, + get_num_records(eval_filenames), FLAGS.batch_size, + height, width, + FLAGS.brightness, FLAGS.contrast, FLAGS.saturation, FLAGS.hue, + training=False, shard=True, increased_aug=False), + checkpoint_path=c['path']) + c['epoch'] = c['step'] / (num_training_samples // (FLAGS.batch_size * FLAGS.num_gpus)) + c['top1'] = eval_result['val-top1acc'] + c['top5'] = eval_result['val-top5acc'] + c['loss'] = eval_result['loss'] + rank0log(logger, ' step epoch top1 top5 loss checkpoint_time(UTC)') + barrier = hvd.allreduce(tf.constant(0, dtype=tf.float32)) + for i, c in enumerate(ckpts): + tf.Session(config=config).run(barrier) + if 'top1' not in c: + continue + rank0log(logger,'{:5d} {:5.1f} {:5.3f} {:6.2f} {:6.2f} {time}' + .format(c['step'], + c['epoch'], + c['top1'] * 100, + c['top5'] * 100, + c['loss'], + time=time.strftime('%Y-%m-%d %H:%M:%S', + time.localtime(c['mtime'])))) + rank0log(logger, "Finished evaluation") + except KeyboardInterrupt: + logger.error("Keyboard interrupt") + +if __name__ == '__main__': + main() diff --git a/examples/tensorflow/training_scripts/simple/README.md b/examples/tensorflow/training_scripts/simple/README.md new file mode 100644 index 0000000000..6cb66fa941 --- /dev/null +++ b/examples/tensorflow/training_scripts/simple/README.md @@ -0,0 +1,120 @@ +# Simple Example +We provide a simple example script `simple.py` which is a Tornasole-enabled TensorFlow training script. It uses the Session interface of TensorFlow. +Here we show different scenarios of how to use Tornasole to save different tensors during training for analysis. +Below are listed the changes we made to integrate these different behaviors of Tornasole as well as example commands for you to try. + +## Integrating Tornasole +Below we call out the changes for Tornasole in the above script and describe them + +**Importing TornasoleTF** +``` +import tornasole_tf as ts +``` +**Saving all tensors** +``` +ts.TornasoleHook(..., save_all=True, ...) +``` +**Saving gradients** + +We need to wrap our optimizer with TornasoleOptimizer, and use this optimizer to minimize loss. +This will also enable us to access the gradients during analysis without having to identify which tensors out of the saved ones are the gradients. +``` +opt = TornasoleOptimizer(opt) +optimizer_op = optimizer.minimize(loss, global_step=increment_global_step_op) + +ts.TornasoleHook(..., include_collections=[..,'gradients'], ...) +``` +**Setting save interval** +``` +ts.TornasoleHook(...,save_config=ts.SaveConfig(save_interval=args.tornasole_frequency)...) +``` +**Setting the right mode** + +Since we are only training here, you will see in the code that the +appropriate training mode has been set before the session run calls. +``` +hook.set_mode(ts.modes.TRAIN) +``` +**Passing the hook** + +We need to pass this hook to a monitored session and use this session for running the job. +``` +hook = ts.TornasoleHook(...) +sess = tf.train.MonitoredSession(hooks=[hook]) +``` + +## Running the example +### Environment +Ensure you are in a python environment which has TensorFlow, TornasoleTF and TornasoleCore installed. If you followed the recommended instructions of using Amazon Deep Learning AMI, then you might want to activate the tensorflow_p36 environment as follows. +``` +source activate tensorflow_p36 +``` +### Tornasole Path +We recommend saving tornasole outputs on S3 by passing the +flag `--tornasole_path` in the format `s3://bucket_name/prefix`. +The commands below will be shown with local path however so you can +run them immediately without having to setup S3 permissions. +### Example commands + +#### Running a well behaved job +``` +python simple.py --tornasole_path ~/ts_outputs/ok --lr 0.001 --scale 1 --steps 100 --tornasole_frequency 13 +``` +This will generate output like: +``` +INFO:tornasole:Saving for step 0: 89 objects +INFO:tornasole:Save complete, saved 1462 bytes +Step=0, Loss=83.92036437988281 +Step=1, Loss=92.88887786865234 +Step=2, Loss=119.52877044677734 +Step=3, Loss=63.18230438232422 +[...] +INFO:tornasole:Saving for step 91: 89 objects +INFO:tornasole:Save complete, saved 1462 bytes +Step=96, Loss=129.8429412841797 +Step=97, Loss=95.37699127197266 +Step=98, Loss=89.81304168701172 +Step=99, Loss=75.2679214477539 + +``` +Tornasole is saving all tensors every 13 steps (you can customize to save only certain tensors). +Tensors have been saved in `~/ts_outputs/ok/`. + +#### Running a job which produces nan +Now run the same job, but this time injecting errors (large learning rate, incorrect scaling features): +``` +python simple.py --tornasole_path ~/ts_outputs/not_good --lr 100 --scale 100000000000 --tornasole_frequency 9 --steps 100 +``` +This will generate: +``` +INFO:tornasole:Saving for step 0: 89 objects +INFO:tornasole:Save complete, saved 1462 bytes +Step=0, Loss=1.0731928032228293e+24 +Step=1, Loss=1.1620568222637874e+24 +Step=2, Loss=nan +Step=3, Loss=nan +... +Step=96, Loss=nan +Step=97, Loss=nan +Step=98, Loss=nan +INFO:tornasole:Saving for step 99: 89 objects +INFO:tornasole:Save complete, saved 1462 bytes +Step=99, Loss=nan +``` +Tornasole is saving every 9 steps. +Tensors have been saved in `~/ts_outputs/not_good/`. + +### Analysis +We can invoke a rule provided by Tornasole to monitor tensors for nan. +This can be run even while training is going on, it will continuously monitor tensors and +invoke the rule on each new step. Once the training ends you can stop this job. +You can also do the same analysis after the training job has ended. +``` +python -m tornasole.rules.rule_invoker --trial-dir ~/ts_outputs/not_good --rule-name ExplodingTensor +``` +Refer [this page](docs/analysis/README.md) for more details on analysis. + +### More +Please refer to [Tornasole Tensorflow page](docs/tensorflow/README.md) and the various flags in the script to customize the behavior further. + + diff --git a/examples/tensorflow/training_scripts/simple/simple.py b/examples/tensorflow/training_scripts/simple/simple.py new file mode 100644 index 0000000000..1349ef030c --- /dev/null +++ b/examples/tensorflow/training_scripts/simple/simple.py @@ -0,0 +1,71 @@ +import argparse +import numpy as np +import tensorflow as tf +import tornasole.tensorflow as ts +import random + +parser = argparse.ArgumentParser() +parser.add_argument('--lr', type=float, help="Learning Rate", default=0.001 ) +parser.add_argument('--steps', type=int, help="Number of steps to run", default=100 ) +parser.add_argument('--scale', type=float, help="Scaling factor for inputs", default=1.0 ) +parser.add_argument('--tornasole_path', type=str) +parser.add_argument('--tornasole_frequency', type=int, + help="How often to save TS data", default=10) +parser.add_argument('--random_seed', type=bool, default=False) +feature_parser = parser.add_mutually_exclusive_group(required=False) +feature_parser.add_argument('--reductions', dest='reductions', action='store_true', + help="save reductions of tensors instead of saving full tensors") +feature_parser.add_argument('--no_reductions', dest='reductions', action='store_false', + help="save full tensors") +args = parser.parse_args() + +# these random seeds are only intended for test purpose. +# for now, 2,2,12 could promise no assert failure when running tornasole_rules test_rules.py with config.yaml +# if you wish to change the number, notice that certain steps' tensor value may be capable of variation +if args.random_seed: + tf.set_random_seed(2) + np.random.seed(2) + random.seed(12) + +# Network definition +# Note the use of name scopes +with tf.name_scope('foobar'): + x = tf.placeholder(shape=(None, 2), dtype=tf.float32) + w = tf.Variable(initial_value=[[10.], [10.]], name='weight1') +with tf.name_scope('foobaz'): + w0 = [[1], [1.]] + y = tf.matmul(x, w0) +loss = tf.reduce_mean((tf.matmul(x, w) - y) ** 2, name="loss") + +global_step = tf.Variable(17, name="global_step", trainable=False) +increment_global_step_op = tf.assign(global_step, global_step+1) + +optimizer = tf.train.AdamOptimizer(args.lr) + +# Wrap the optimizer with TornasoleOptimizer so Tornasole can find gradients and optimizer_variables to save +optimizer = ts.TornasoleOptimizer(optimizer) + +# use this wrapped optimizer to minimize loss +optimizer_op = optimizer.minimize(loss, global_step=increment_global_step_op) + +# save tensors as reductions if necessary +rdnc = ts.ReductionConfig(reductions=['mean'],abs_reductions=['max'], norms=['l1']) if args.reductions else None + +# create the hook +# Note that we are saving all tensors here by passing save_all=True +hook = ts.TornasoleHook(out_dir=args.tornasole_path, + save_all=True, + include_collections=['weights', 'gradients'], + save_config=ts.SaveConfig(save_interval=args.tornasole_frequency), + reduction_config=rdnc) + +hook.set_mode(ts.modes.TRAIN) + +# pass the hook to hooks parameter of monitored session +sess = tf.train.MonitoredSession(hooks=[hook]) + +# use this session for running the tensorflow model +for i in range(args.steps): + x_ = np.random.random((10, 2)) * args.scale + _loss, opt, gstep = sess.run([loss, optimizer_op, increment_global_step_op], {x: x_}) + print (f'Step={i}, Loss={_loss}') diff --git a/setup.py b/setup.py index 69f3f98b0b..8bfafe12d6 100644 --- a/setup.py +++ b/setup.py @@ -2,40 +2,97 @@ import sys import setuptools -with open("README.md", "r") as fh: - long_description = fh.read() +CURRENT_VERSION = '0.2.1' +FRAMEWORKS = ['tensorflow', 'pytorch', 'mxnet'] def compile_summary_protobuf(): - proto_path = 'tornasole_core/tfevent' + proto_path = 'tornasole/core/tfevent' proto_files = os.path.join(proto_path, '*.proto') cmd = 'protoc ' + proto_files + ' --python_out=.' print('compiling protobuf files in {}'.format(proto_path)) return os.system('set -ex &&' + cmd) +def get_framework_packages(f): + return ['tornasole.' + f + '*', 'tests.' + f + '*'] + +def get_frameworks_to_build(): + with_frameworks = {} + for f in FRAMEWORKS: + with_frameworks[f] = os.environ.get('TORNASOLE_WITH_' + f.upper(), False) + if with_frameworks[f] in ['1', 'True', 'true']: + with_frameworks[f] = True + else: + with_frameworks[f] = False + enabled_some_framework = any(with_frameworks.values()) + if not enabled_some_framework: + print('Building for all frameworks in one package') + for f in FRAMEWORKS: + with_frameworks[f] = True + return with_frameworks + +def get_packages_to_include(frameworks_to_build): + exclude_packages = [] + include_framework_packages = [] + for f in FRAMEWORKS: + fp = get_framework_packages(f) + exclude_packages.extend(fp) + if frameworks_to_build[f]: + include_framework_packages.extend(fp) + include = setuptools.find_packages(exclude=exclude_packages) + include.extend(include_framework_packages) + packages = setuptools.find_packages(include=include) + print(packages) + return packages + +def get_tests_packages(frameworks_to_build): + tests_packages = ['pytest'] + for f, v in frameworks_to_build.items(): + if v: + if f in ['tensorflow', 'mxnet']: + tests_packages.append(f) + if f == 'pytorch': + tests_packages.extend(['torch', 'torchvision']) + return tests_packages + +def build_package(version): + # todo: fix long description + # with open('docs/'+ name + '/README.md', "r") as fh: + # long_description = fh.read() + + frameworks_to_build = get_frameworks_to_build() + tests_packages = get_tests_packages(frameworks_to_build) + packages = get_packages_to_include(frameworks_to_build) + setuptools.setup( + name='tornasole', + version=version, + author="The Tornasole Team", + author_email="tornasole@amazon.com", + description="Tornasole", + # long_description=long_description, + # long_description_content_type="text/markdown", + url="https://github.com/awslabs/tornasole_core", + packages=packages, + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + ], + #pinning aioboto3 version as aiobot3 is pinning versions + # https://github.com/aio-libs/aiobotocore/issues/718 + install_requires = ['aioboto3==6.4.1', 'nest_asyncio', + 'protobuf>=3.6.0' ,'botocore==1.12.91', + 'boto3==1.9.91', 'aiobotocore==0.10.2', + 'numpy', 'joblib'], + setup_requires=["pytest-runner"], + tests_require=tests_packages, + python_requires='>=3.6' + ) if compile_summary_protobuf() != 0: print('ERROR: Compiling summary protocol buffers failed. You will not be ' - 'able to use the logging APIs for visualizing MXNet data in TensorBoard. ' - 'Please make sure that you have installed protobuf3 compiler and runtime correctly.') + 'able to use Tornasole.' + 'Please make sure that you have installed protobuf3 ' + 'compiler and runtime correctly.') sys.exit(1) -setuptools.setup( - name="tornasole_core", - version="0.2", - author="The Tornasole Team", - author_email="tornasole@amazon.com", - description="Tornasole Core", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/awslabs/tornasole_core", - packages=setuptools.find_packages(), - classifiers=[ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: Apache Software License", - "Operating System :: OS Independent", - ], - #pinning aioboto3 version as aiobot3 is pinning versions https://github.com/aio-libs/aiobotocore/issues/718 - install_requires = ['aioboto3==6.4.1', 'nest_asyncio', 'protobuf>=3.6.0' ,'botocore==1.12.91','boto3==1.9.91', 'aiobotocore==0.10.2'], - setup_requires=["pytest-runner"], - tests_require=["pytest", "tensorflow"], -) +build_package(version=CURRENT_VERSION) \ No newline at end of file diff --git a/tornasole_core/tfrecord/__init__.py b/tests/__init__.py similarity index 100% rename from tornasole_core/tfrecord/__init__.py rename to tests/__init__.py diff --git a/tests/analysis/__init__.py b/tests/analysis/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/analysis/config.yaml b/tests/analysis/config.yaml new file mode 100644 index 0000000000..1b90cee95c --- /dev/null +++ b/tests/analysis/config.yaml @@ -0,0 +1,193 @@ +- + - values # values + - [simple.py: &simple + $CODEBUILD_SRC_DIR/examples/tensorflow/training_scripts/simple/simple.py, + torch_simple.py: &torch_simple + $CODEBUILD_SRC_DIR/examples/pytorch/scripts/simple.py, + train_imagenet_resnet_hvd.py: &train_imagenet_resnet_hvd + $CODEBUILD_SRC_DIR/examples/tensorflow/training_scripts/resnet50/train_imagenet_resnet_hvd.py, + mnist_gluon_basic_hook_demo.py: &mnist_gluon_basic_hook_demo + $CODEBUILD_SRC_DIR/examples/mxnet/scripts/mnist_gluon_basic_hook_demo.py, + mnist_gluon_all_zero_demo.py: &mnist_gluon_all_zero_demo + $CODEBUILD_SRC_DIR/examples/mxnet/scripts/mnist_gluon_all_zero_demo.py, + mnist_gluon_vg_demo.py: &mnist_gluon_vg_demo + $CODEBUILD_SRC_DIR/examples/mxnet/scripts/mnist_gluon_vg_demo.py, + invoker.py: &invoker + $CODEBUILD_SRC_DIR/tests/analysis/invoker.py, + Enable: &Enable # enable the test case + True, + Disable: &Disable # disable the test case + False, + ] + +# test cases for tensorflow +- # exploding_tensor/tf/false + - tensorflow + - *Enable + - [*simple, + --lr 0.001 --scale 1 --steps 68 --tornasole_frequency 13 --random_seed True, + *invoker, + --rule_name explodingtensor --flag False --end_step 66 + ] +- # exploding_tensor/tf/true + - tensorflow + - *Enable + - [*simple, + --lr 1000000000000 --scale 10000000000000 --tornasole_frequency 9 --steps 66 --random_seed True, + *invoker, + --rule_name explodingtensor --flag True --end_step 64 + ] + +- # vanishing_grad/tf/false + - tensorflow + - *Enable + - [*simple, + --lr 0.001 --scale 1 --steps 68 --tornasole_frequency 13 --random_seed True, + *invoker, + --rule_name vanishinggradient --flag False --end_step 66 + ] +- # vanishing_grad/tf/true + - tensorflow + - *Disable # tested on remote ubuntu machine with tensorflow_p36 venv + - [*train_imagenet_resnet_hvd, # this training script will generate 190 steps + --clear_log --enable_tornasole --tornasole_save_weights --tornasole_save_gradients + --tornasole_step_interval 10 --constant_initializer 0.01 --random_seed True, + *invoker, + --rule_name vanishinggradient --flag True --end_step 181 + ] + +- # weight_update_ratio/tf/false + - tensorflow + - *Enable + - [*simple, + --lr 0.001 --scale 1 --steps 75 --tornasole_frequency 1 --random_seed True, + *invoker, + --rule_name weightupdateratio --flag False --end_step 71 + ] +- # weight_update_ratio/tf/true + - tensorflow + - *Enable + - [*simple, + --lr 0.0000000001 --scale 1 --steps 75 --tornasole_frequency 1 --random_seed True, + *invoker, + --rule_name weightupdateratio --flag True --end_step 71 + ] + + +# test cases for mxnet +- # exploding_tensor/mxnet/false + - mxnet + - *Enable + - [*mnist_gluon_basic_hook_demo, + --random_seed True --num_steps 6, + *invoker, + --rule_name explodingtensor --flag False --end_step 4 + ] +- # exploding_tensor/mxnet/true + - mxnet + - *Enable + - [*mnist_gluon_basic_hook_demo, + --learning_rate 1000000000 --random_seed True --num_steps 6, + *invoker, + --rule_name explodingtensor --flag True --end_step 4 + ] + +- # vanishing_grad/mxnet/false + - mxnet + - *Enable + - [*mnist_gluon_basic_hook_demo, + --random_seed True --num_steps 6, + *invoker, + --rule_name vanishinggradient --flag False --end_step 4 + ] +- # vanishing_grad/mxnet/true + - mxnet + - *Enable + - [*mnist_gluon_vg_demo, # this training script will generate 1300 steps + --random_seed True --num_steps 33 --tornasole_frequency 30, + *invoker, + --rule_name vanishinggradient --flag True --start_step 1 --end_step 31 + ] # notice, step 0 always returns False + +- # weight_update_ratio/mxnet/false + - mxnet + - *Enable + - [*mnist_gluon_basic_hook_demo, + --random_seed True --num_steps 6, + *invoker, + --rule_name weightupdateratio --flag False --end_step 4 + ] +- # weight_update_ratio/mxnet/true + - mxnet + - *Enable + - [*mnist_gluon_basic_hook_demo, + --learning_rate 0.0000000001 --random_seed True --num_steps 6, + *invoker, + --rule_name weightupdateratio --flag True --end_step 4 + ] +- # all_zero/mxnet/false + - mxnet + - *Enable + - [*mnist_gluon_basic_hook_demo, + --random_seed True --num_steps 5, + *invoker, + --rule_name allzero --flag False --end_step 3 --collection weights + ] +- # all_zero/mxnet/true + - mxnet + - *Enable + - [*mnist_gluon_all_zero_demo, + --random_seed True --num_steps 5, + *invoker, + --rule_name allzero --flag True --end_step 3 --collections weights --collections ReluActivation --collections bias + ] + +# test cases for pytorch +- # exploding_tensor/pytorch/false + - pytorch + - *Enable + - [*torch_simple, + --lr .01 --momentum 0.8 --tornasole-frequency 8 --steps 100 --hook-type saveall --random-seed True, + *invoker, + --rule_name explodingtensor --end_step 97 --flag False + ] +- # exploding_tensor/pytorch/true + - pytorch + - *Enable + - [*torch_simple, + --lr 1000000.0 --momentum 10 --tornasole-frequency 8 --steps 100 --hook-type saveall --random-seed True, + *invoker, + --rule_name explodingtensor --end_step 97 --flag True + ] +- # vanishing_grad/pytorch/false + - pytorch + - *Enable + - [*torch_simple, + --lr .01 --momentum 0.9 --tornasole-frequency 3 --steps 100 --hook-type saveall --random-seed True, + *invoker, + --rule_name vanishinggradient --end_step 97 --flag False + ] +- # vanishing_grad/pytorch/true + - pytorch + - *Enable # tested on remote ubuntu machine with tensorflow_p36 venv + - [*torch_simple, # this training script will generate 100 steps + --lr 1.0 --momentum 0.9 --tornasole-frequency 3 --steps 100 --hook-type saveall --random-seed True, + *invoker, + --rule_name vanishinggradient --end_step 97 --flag True + ] +- # weight_update_ratio/pytorch/false + - pytorch + - *Enable + - [*torch_simple, + --lr .0001 --momentum 0.08 --tornasole-frequency 1 --steps 100 --hook-type saveall --random-seed True, + *invoker, + --rule_name weightupdateratio --start_step 8 --end_step 97 --flag False + ] +- # weight_update_ratio/pytorch/true + - pytorch + - *Enable + - [*torch_simple, + --lr .0000000000001 --momentum .000001 --tornasole-frequency 1 --steps 100 --hook-type saveall --random-seed True, + *invoker, + --rule_name weightupdateratio --start_step 8 --end_step 97 --flag True + ] diff --git a/tests/analysis/conftest.py b/tests/analysis/conftest.py new file mode 100644 index 0000000000..12eb078cd4 --- /dev/null +++ b/tests/analysis/conftest.py @@ -0,0 +1,3 @@ +def pytest_addoption(parser): + parser.addoption('--mode', action='store', dest='mode') + parser.addoption('--path_to_config', action='store', dest='path_to_config') diff --git a/tests/analysis/exceptions/__init__.py b/tests/analysis/exceptions/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/analysis/exceptions/test_exceptions.py b/tests/analysis/exceptions/test_exceptions.py new file mode 100644 index 0000000000..3789ca76f7 --- /dev/null +++ b/tests/analysis/exceptions/test_exceptions.py @@ -0,0 +1,65 @@ +import uuid +from tests.analysis.utils import generate_data +from tornasole.trials import create_trial +from tornasole.exceptions import * +import boto3 as boto3 + +def del_s3(bucket, file_path): + s3_client = boto3.client('s3') + s3_client.delete_object(Bucket=bucket, Key=file_path) + +def test_refresh_tensors(): + trial_name = str(uuid.uuid4()) + path = 's3://tornasolecodebuildtest/rules/tensors/ts_output/train/' + bucket = 'tornasolecodebuildtest' + num_steps = 8 + num_tensors = 10 + for i in range(num_steps): + if i % 2 == 0: + continue + generate_data(path=path, trial=trial_name, num_tensors=num_tensors, + step=i, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3)) + tr = create_trial(path + trial_name) + assert len(tr.available_steps()) == 4 + + try: + tr.tensor('bar') + assert False + except TensorUnavailable: + pass + del_s3(bucket, file_path=path) + + assert tr.tensor('foo_1') is not None + assert tr.tensor('foo_1').value(num_steps - 1) is not None + try: + tr.tensor('foo_1').value(num_steps - 2) + assert False + except StepUnavailable: + pass + + try: + tr.tensor('foo_1').value(num_steps*2) + assert False + except StepNotYetAvailable: + pass + + for i in range(num_steps, num_steps*2): + if i % 2 == 0: + continue + generate_data(path=path, trial=trial_name, num_tensors=num_tensors, + step=i, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3)) + + assert tr.tensor('foo_1').value(num_steps + 1) is not None + try: + tr.tensor('foo_1').value(num_steps) + assert False + except StepUnavailable: + pass + + try: + tr.tensor('foo_1').value(num_steps * 3) + assert False + except StepNotYetAvailable: + pass + + diff --git a/tests/analysis/integration_testing_rules.py b/tests/analysis/integration_testing_rules.py new file mode 100644 index 0000000000..5abab08a6c --- /dev/null +++ b/tests/analysis/integration_testing_rules.py @@ -0,0 +1,171 @@ +import shutil +import os +from multiprocessing import * +from tornasole.core.utils import get_logger +import yaml +import time +import asyncio +import aioboto3 +from tornasole.core.access_layer.s3handler import ReadObjectRequest, S3Handler, ListRequest +import logging.handlers +import time + +logger = get_logger() + +# store path to config file and test mode for testing rule scrip with training script +class TestRules(): + def __init__(self, mode, path_to_config): + """ + :param mode: mode could be either 'tensorflow' or 'mxnet' + :param path_to_config: the path of config file which contains path to training and test scripts and corresponding arg strings + """ + self.mode = mode + self.path_to_config = path_to_config + + # mode is either 'serial' or 'parallel' + def configure_log(self, path_train_script, path_test_script, trial_dir, mode): + location = 's3' if trial_dir.startswith('s3') else 'local' + training_script_name = path_train_script.split('/')[-1].strip('.py') + test_script_name = path_test_script.split('/')[-1].strip('.py') + # add independent logger for serial job + fh = logging.FileHandler(os.path.join(os.getcwd(), + format(f"{training_script_name}_{test_script_name}_{location}_{mode}"))) + logger = logging.getLogger('tornasole') + logging.basicConfig(level=logging.INFO) + fh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')) + logger.addHandler(fh) # enable to write log into log file + return logger + + # delete the s3 folders using aioboto3 + async def del_folder(self, bucket, keys): + loop = asyncio.get_event_loop() + client = aioboto3.client('s3', loop=loop) + await asyncio.gather(*[client.delete_object(Bucket=bucket, Key=key) for key in keys]) + await client.close() + + # delete outputs generated by all training processes + # local_trials: trial dirs on local, e.g., './output/trial' + def delete_local_trials(self, local_trials): + for trial in local_trials: + trial_root = trial.split('/')[1] + if os.path.exists(trial): + shutil.rmtree(trial_root) + + # delete the s3 folders using aioboto3 + # s3_trials: trial dirs on s3, e.g., 's3://bucket_name/trial' + def delete_s3_trials(self, s3_trials): + s3_handler = S3Handler() + list_req = [] + bucket_name = '' + for trial in s3_trials: + bucket_name = trial.split('/')[2] + trial_name = trial.split('/')[3] + list_req.append(ListRequest(Bucket=bucket_name, Prefix=trial_name)) + keys = s3_handler.list_prefixes(list_req) + # flat nested list + keys = [item for sublist in keys for item in sublist] + loop = asyncio.get_event_loop() + task = loop.create_task(self.del_folder(bucket_name, keys)) + loop.run_until_complete(task) + + # run a 'job' in serial. a 'job' is a training/test scripts combination + def run_job_in_serial(self, path_train_script, train_script_args, path_test_script, test_script_args, trial_dir): + self.run_train(path_train_script, train_script_args, path_test_script, trial_dir) + logger.info(f'Finished Serial training job: {path_train_script}') + self.run_test(path_test_script, test_script_args, path_train_script, trial_dir) + logger.info(f'Finished Serial testing job: {path_test_script}') + + # run a training script only + def run_train(self, path_train_script, train_script_args, path_test_script, trial_dir): + logger.info("running training script {}".format(path_train_script)) + if path_train_script.split('/')[-1] == 'mnist_gluon_vg_demo.py' \ + or path_train_script.split('/')[-1] == 'mnist_gluon_basic_hook_demo.py': + commands = format(f"python {path_train_script} --output-uri {trial_dir} {train_script_args}") + else: + commands = format(f"TORNASOLE_LOG_LEVEL=info python {path_train_script} --tornasole_path {trial_dir} {train_script_args}") + os.system(commands) # os.system(commands) enables the usage of cmd executable prompts + logger.info(f'Finished Parallel training job: {path_train_script}') + + # run a test script only + def run_test(self, path_test_script, test_script_args, path_train_script, trial_dir): + logger.info("running test script {}".format(path_test_script)) + commands = format(f"TORNASOLE_LOG_LEVEL=debug python {path_test_script} --tornasole_path {trial_dir} {test_script_args}") + os.system(commands) # os.system(commands) enables the usage of cmd executable prompts + logger.info(f'Finished Parallel testing job: {path_test_script}') + + # run 'job's provided by user. a 'job' is a training/test scripts combination + # mode: testing mode, either 'auto' or 'manual' + # jobs: a list of lists, the sublist is called a ‘job’ + # each job is run in serial and parallel on both local and s3 + def run_jobs(self): + # load config file + f = open(self.path_to_config) + jobs = yaml.load(f) + process_list = [] + local_trials = set() + s3_trials = set() + # execute all the 'job's + for job in jobs: + # format of a 'job' is: + # - tensorflow/mxnet + # - *Enable/*Disable + # - [, + # , + # , + # + # ] + if job[0] != 'tensorflow' and job[0] != 'pytorch' and job[0] != 'mxnet' and job[0] != 'values': + raise Exception('Wrong test case category', job[0]) + # only run the tests which mode is what we want + if job[0] == self.mode and job[1]: + # run 'job' in serial on local and s3 + for trial_dir in ['./local_test/trial', 's3://tornasolecodebuildtest/trial']: + time_stamp = time.time() + name = 'serial_{}_{}_{}_{}'.format(job[2][0], job[2][2], trial_dir+str(time_stamp), 'serial') + process_list.append(Process(name=name, target=self.run_job_in_serial, args=(job[2][0], job[2][1], job[2][2], job[2][3], trial_dir+str(time_stamp)))) + local_trials.add(trial_dir+str(time_stamp)) if trial_dir.startswith('.') else s3_trials.add(trial_dir+str(time_stamp)) + + # run 'job' in parallel on local and s3 + for trial_dir in ['./local_test/trial', 's3://tornasolecodebuildtest/trial']: + time_stamp = time.time() + name = 'train_parallel_{}_{}'.format(job[2][0], trial_dir + str(time_stamp)) + process_list.append(Process(name=name, + target=self.run_train, args=(job[2][0], job[2][1], job[2][2], trial_dir+str(time_stamp)))) + name = 'test_parallel_{}_{}'.format(job[2][2], trial_dir + str(time_stamp)) + process_list.append(Process(name=name, target=self.run_test, args=(job[2][2], job[2][3], job[2][0], trial_dir+str(time_stamp)))) + local_trials.add(trial_dir+str(time_stamp)) if trial_dir.startswith('.') else s3_trials.add(trial_dir+str(time_stamp)) + + # execute all 'job's in parallel + for process in process_list: + process.start() + ended_processes = set() + while True: + if len(ended_processes) == len(process_list): + break + for process in process_list: + if process not in ended_processes and not process.is_alive(): + ended_processes.add(process) + logger.info('Process {} ended with exit code {}'.format(process.name, process.exitcode)) + process.join() + time.sleep(2) + + # once all jobs are finished, delete the outputs on local and s3 + self.delete_local_trials(local_trials) + self.delete_s3_trials(s3_trials) + +# only for codebuilding test +# enable args string with pytest +def test_test_rules(request): + mode = request.config.getoption('mode') + path_to_config = request.config.getoption('path_to_config') + TestRules(mode=mode, path_to_config=path_to_config).run_jobs() + +# test on local machine +# TestRules(mode='tensorflow', path_to_config='./config.yaml').run_jobs() +# + + + + + + diff --git a/tests/analysis/invoker.py b/tests/analysis/invoker.py new file mode 100644 index 0000000000..d918425f2c --- /dev/null +++ b/tests/analysis/invoker.py @@ -0,0 +1,86 @@ +from tornasole.exceptions import * +from tornasole.core.utils import get_logger +logger = get_logger() + +def invoke_rule(rule_obj, flag, start_step, end_step): + step = start_step if start_step is not None else 0 + logger.info('Started execution of rule {}'.format(type(rule_obj).__name__)) + return_false = False + while (end_step is None) or (step < end_step): # if end_step is not provided, do infinite checking + try: + rule_obj.invoke(step) + if flag == 'False': + return_false = True + elif flag == 'True': + # every step should return True in this case, + # meaning exception condition should be met + assert False + step += 1 + except StepUnavailable as e: + logger.info(e) + step += 1 + except TensorUnavailableForStep as e: + logger.info(e) + step += 1 + except RuleEvaluationConditionMet as e: + logger.info(e) + step += 1 + + # if flag is False, return_false should be True after the loop + if flag == 'False': + assert return_false + logger.info('Ending execution of rule {} with step={} '.format(rule_obj.__class__.__name__, step)) + + +if __name__ == '__main__': + import argparse + from tornasole.trials import create_trial + + parser = argparse.ArgumentParser() + parser.add_argument('--tornasole_path', type=str) + parser.add_argument('--rule_name', type=str) + parser.add_argument('--start_step', type=int) + parser.add_argument('--end_step', type=int) + parser.add_argument('--flag', type=str, default=None) + + parser.add_argument('--weightupdateratio_large_threshold', type=float, default=10) + parser.add_argument('--weightupdateratio_small_threshold', type=float, default=0.00000001) + + parser.add_argument('--vanishinggradient_threshold', type=float, default=0.0000001) + parser.add_argument('--collections', default=[], type=str, action='append', + help="""List of collection names. The rule will inspect tensors belonging to those collections. Required for allzero + rule.""") + parser.add_argument('--tensor-regex', default=[], type=str, action='append', + help="""List of regex patterns. The rule will inspect tensors that match these + patterns. Required for allzero + rule.""") + args = parser.parse_args() + if args.rule_name is None: + raise RuntimeError('Needs rule name to invoke') + + tr = create_trial(args.tornasole_path, range_steps=(args.start_step, args.end_step)) + if args.rule_name.lower() == 'vanishinggradient': + from tornasole.rules.generic import VanishingGradient + r = VanishingGradient(tr, threshold=args.vanishinggradient_threshold) + elif args.rule_name.lower() == 'explodingtensor': + from tornasole.rules.generic import ExplodingTensor + r = ExplodingTensor(tr) + elif args.rule_name.lower() == 'weightupdateratio': + from tornasole.rules.generic import WeightUpdateRatio + r = WeightUpdateRatio(tr, + large_threshold=args.weightupdateratio_large_threshold, + small_threshold=args.weightupdateratio_small_threshold) + elif args.rule_name.lower() == 'allzero': + if len(args.collections) == 0 and len(args.tensor_regex) == 0: + raise ValueError('Please provide either the list of collection names or list of regex patterns for invoking ' + 'this rule.') + from tornasole.rules.generic import AllZero + r = AllZero(tr, args.collections, args.tensor_regex) + else: + raise ValueError('Please invoke any rules which take multiple trials, ' + 'or custom rules by passing the rule object to ' + 'invoke_rule() function. We do not currently ' + 'support running such rules from this python script.' + 'Please refer to examples/scripts/ for examples' + 'on how to call invoke_rule') + invoke_rule(r, flag=args.flag, start_step=args.start_step, end_step=args.end_step) diff --git a/tests/analysis/rules/__init__.py b/tests/analysis/rules/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/analysis/rules/test_invoker.py b/tests/analysis/rules/test_invoker.py new file mode 100644 index 0000000000..17a38cdb2d --- /dev/null +++ b/tests/analysis/rules/test_invoker.py @@ -0,0 +1,35 @@ +from tests.analysis.utils import generate_data + +from tornasole.rules.generic import ExplodingTensor +from tornasole.trials import create_trial +import uuid +import numpy as np +from tornasole.exceptions import * +from tornasole.rules.rule_invoker import invoke_rule + +def test_invoker_exception(): + run_id = str(uuid.uuid4()) + base_path = 'ts_output/rule_invoker/' + path = base_path + run_id + + num_tensors = 3 + + generate_data(path=base_path, trial=run_id, num_tensors=num_tensors, + step=0, tname_prefix='foo', worker='algo-1', shape=(1,), + data=np.array([np.nan])) + generate_data(path=base_path, trial=run_id, num_tensors=num_tensors, + step=1, tname_prefix='foo', worker='algo-1', shape=(1,)) + generate_data(path=base_path, trial=run_id, num_tensors=num_tensors, + step=2, tname_prefix='foo', worker='algo-1', shape=(1,), + data=np.array([np.nan])) + + tr = create_trial(path) + r = ExplodingTensor(tr) + + c = 0 + for start_step in range(2): + try: + invoke_rule(r, start_step=start_step, end_step=3, raise_rule_eval=True) + except RuleEvaluationConditionMet as e: + c += 1 + assert c == 2 \ No newline at end of file diff --git a/tests/analysis/rules/test_rule_no_refresh.py b/tests/analysis/rules/test_rule_no_refresh.py new file mode 100644 index 0000000000..f93ac250d1 --- /dev/null +++ b/tests/analysis/rules/test_rule_no_refresh.py @@ -0,0 +1,59 @@ +from tests.analysis.utils import generate_data + +from tornasole.rules import Rule, RequiredTensors +from tornasole.trials import create_trial +import uuid +from tornasole.exceptions import StepNotYetAvailable +from tornasole.analysis.utils import no_refresh + + +def test_no_refresh_invocation(): + class TestRule(Rule): + def __init__(self, base_trial): + super().__init__(base_trial=base_trial) + + def required_tensors(self, step): + reqt = RequiredTensors(self.base_trial) + for t in self.base_trial.tensors(): + reqt.need_tensor(t, steps=[step]) + return [reqt] + + def invoke_at_step(self, step): + for t in self.base_trial.tensors(): + if step == 0: + assert self.base_trial.tensor(t).value(step + 1) is not None + elif step == 1: + try: + self.base_trial.tensor(t).value(step + 1) + assert False + except StepNotYetAvailable: + pass + run_id = str(uuid.uuid4()) + base_path = 'ts_output/rule_no_refresh/' + path = base_path + run_id + + num_tensors = 3 + + generate_data(path=base_path, trial=run_id, num_tensors=num_tensors, + step=0, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3)) + generate_data(path=base_path, trial=run_id, num_tensors=num_tensors, + step=1, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3)) + + tr = create_trial(path) + r = TestRule(tr) + r.invoke(0) + r.invoke(1) + + generate_data(path=base_path, trial=run_id, num_tensors=num_tensors, + step=2, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3)) + + # will not see step2 data + with no_refresh(tr): + r.invoke_at_step(1) + + # below will refresh + try: + r.invoke(1) + assert False + except AssertionError: + pass diff --git a/tests/analysis/tensors/__init__.py b/tests/analysis/tensors/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/analysis/tensors/test_refresh.py b/tests/analysis/tensors/test_refresh.py new file mode 100644 index 0000000000..16c91ee170 --- /dev/null +++ b/tests/analysis/tensors/test_refresh.py @@ -0,0 +1,54 @@ +import uuid +from tests.analysis.utils import generate_data +from tornasole.trials import create_trial +from tornasole.exceptions import * + +def test_refresh_tensors(): + trial_name = str(uuid.uuid4()) + path = 's3://tornasole-testing/rules/tensors/ts_output/train/' + num_steps = 8 + num_tensors = 10 + for i in range(num_steps): + if i % 2 == 0: + continue + generate_data(path=path, trial=trial_name, num_tensors=num_tensors, + step=i, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3)) + tr = create_trial(path + trial_name) + assert len(tr.available_steps()) == 4 + + try: + tr.tensor('bar') + assert False + except TensorUnavailable: + pass + + assert tr.tensor('foo_1') is not None + # available + assert tr.tensor('foo_1').value(num_steps - 1) is not None + # not saved + try: + tr.tensor('foo_1').value(num_steps - 2) + assert False + except StepUnavailable: + pass + + for i in range(num_steps, num_steps*2): + if i % 2 == 0: + continue + generate_data(path=path, trial=trial_name, num_tensors=num_tensors, + step=i, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3)) + + # refreshed + assert tr.tensor('foo_1').value(num_steps + 1) is not None + try: + tr.tensor('foo_1').value(num_steps) + assert False + except StepUnavailable: + pass + + try: + tr.tensor('foo_1').value(num_steps * 3) + assert False + except StepNotYetAvailable: + pass + diff --git a/tests/analysis/trials/__init__.py b/tests/analysis/trials/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/analysis/trials/test_create.py b/tests/analysis/trials/test_create.py new file mode 100644 index 0000000000..8840fdc63c --- /dev/null +++ b/tests/analysis/trials/test_create.py @@ -0,0 +1,25 @@ +import uuid +from tests.analysis.utils import generate_data +from tornasole.trials import create_trial + +def test_creation_local(): + trial_name = str(uuid.uuid4()) + path = 'ts_output/train/' + num_steps = 20 + num_tensors = 10 + for i in range(num_steps): + generate_data(path=path, trial=trial_name, num_tensors=num_tensors, + step=i, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3)) + tr = create_trial(path + '/' + trial_name, range_steps=(0,5)) + assert len(tr.available_steps()) == 5 + +def test_creation_s3(): + trial_name = str(uuid.uuid4()) + path = 's3://tornasole-testing/rules/ts_output/train/' + num_steps = 8 + num_tensors = 10 + for i in range(num_steps): + generate_data(path=path, trial=trial_name, num_tensors=num_tensors, + step=i, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3)) + tr = create_trial(path + trial_name, range_steps=(0,5)) + assert len(tr.available_steps()) == 5 diff --git a/tests/analysis/trials/test_local.py b/tests/analysis/trials/test_local.py new file mode 100644 index 0000000000..13460c38f2 --- /dev/null +++ b/tests/analysis/trials/test_local.py @@ -0,0 +1,25 @@ +import uuid +import os +from tests.analysis.utils import generate_data, check_trial +from tornasole.trials.trial_catalog import LocalTrialCatalog +from tornasole.trials import LocalTrial + +def check_local(localdir, trial_name, num_steps, num_tensors): + tc = LocalTrialCatalog(localdir=localdir) + assert trial_name in tc.list_candidates() + path = os.path.join(localdir, trial_name) + trial_obj = LocalTrial(name=trial_name, dirname=path) + tc.add_trial(trial_name, trial_obj) + trial_obj2 = tc.get_trial(trial_name) + assert trial_obj == trial_obj2 + check_trial(trial_obj, num_tensors=num_tensors, num_steps=num_steps) + +def test_local(): + trial_name = str(uuid.uuid4()) + path = 'ts_output/train/' + num_steps = 20 + num_tensors = 10 + for i in range(num_steps): + generate_data(path=path, trial=trial_name, num_tensors=10, + step=i, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3), rank=0) + check_local(path, trial_name, num_steps=num_steps, num_tensors=num_tensors) diff --git a/tests/analysis/trials/test_modes.py b/tests/analysis/trials/test_modes.py new file mode 100644 index 0000000000..23bafe9dcf --- /dev/null +++ b/tests/analysis/trials/test_modes.py @@ -0,0 +1,75 @@ +from tornasole import modes +import shutil, os +import numpy as np +from tornasole.trials import create_trial +from tornasole.core.tensor import StepState +from datetime import datetime +from tornasole.core.writer import FileWriter +from tornasole.core.collection_manager import CollectionManager + +def test_modes_on_global_data(): + pass # other tests in create, local, s3 do this + + +def test_mode_data(): + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + path = 'ts_outputs' + + c = CollectionManager() + c.add("default") + c.get("default").tensor_names = ["arr"] + c.export(os.path.join(path, run_id, "collections.ts")) + tr = create_trial('ts_outputs/' + run_id) + + for s in range(0, 10): + fw = FileWriter(logdir='ts_outputs', trial=run_id, step=s) + if s % 2 == 0: + fw.write_tensor(tdata=np.array([[1.0, 2.0], [3.0, 4.0]], + dtype=np.float32), + tname='arr', mode=modes.TRAIN, + mode_step=s // 2) + else: + fw.write_tensor(tdata=np.array([[1.0, 2.0], [3.0, 4.0]], + dtype=np.float32), + tname='arr', mode=modes.EVAL, + mode_step=s // 2) + fw.close() + + if s % 2 == 0: + assert tr.has_passed_step(s // 2, mode=modes.TRAIN) == StepState.AVAILABLE + assert tr.has_passed_step(s // 2, mode=modes.EVAL) == StepState.NOT_YET_AVAILABLE + else: + assert tr.has_passed_step(s // 2, mode=modes.EVAL) == StepState.AVAILABLE + + assert tr.has_passed_step(s) == StepState.AVAILABLE + assert tr.has_passed_step(s+1) == StepState.NOT_YET_AVAILABLE + assert tr.has_passed_step(s + 1, mode=modes.TRAIN) == StepState.NOT_YET_AVAILABLE + + assert len(tr.tensors()) == 1 + assert len(tr.available_steps()) == 10 + assert len(tr.available_steps(mode=modes.TRAIN)) == 5 + assert len(tr.available_steps(mode=modes.EVAL)) == 5 + assert len(tr.modes()) == 2 + + for i in range(10): + if i % 2 == 0: + assert tr.mode(i) == modes.TRAIN + else: + assert tr.mode(i) == modes.EVAL + assert tr.mode_step(i) == i//2 + + for i in range(5): + assert tr.global_step(modes.TRAIN, i) == (i * 2) + assert tr.global_step(modes.EVAL, i) == (i * 2) + 1 + + assert len(tr.tensor('arr').steps()) == 10 + assert len(tr.tensor('arr').steps(mode=modes.TRAIN)) == 5 + assert len(tr.tensor('arr').steps(mode=modes.EVAL)) == 5 + + for i in range(10): + assert tr.tensor('arr').value(i) is not None + if i < 5 : + assert tr.tensor('arr').value(i, mode=modes.TRAIN) is not None + assert tr.tensor('arr').value(i, mode=modes.EVAL) is not None + + shutil.rmtree('ts_outputs/' + run_id) diff --git a/tests/analysis/trials/test_refresh.py b/tests/analysis/trials/test_refresh.py new file mode 100644 index 0000000000..8bf9f85ad2 --- /dev/null +++ b/tests/analysis/trials/test_refresh.py @@ -0,0 +1,98 @@ +import uuid +from tests.analysis.utils import generate_data +from tornasole.trials import create_trial +from tornasole.analysis.utils import no_refresh + + +def help_test_refresh_with_range(path): + trial_name = str(uuid.uuid4()) + num_steps = 8 + num_tensors = 10 + for i in range(num_steps): + generate_data(path=path, trial=trial_name, num_tensors=num_tensors, + step=i, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3)) + tr = create_trial(path + trial_name, range_steps=(0,5)) + assert len(tr.available_steps()) == 5 + for i in range(num_steps, num_steps*2): + generate_data(path=path, trial=trial_name, num_tensors=num_tensors, + step=i, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3), export_colls=False) + assert len(tr.available_steps()) == 5 + +def help_test_refresh(path): + trial_name = str(uuid.uuid4()) + num_steps = 8 + num_tensors = 10 + for i in range(num_steps): + generate_data(path=path, trial=trial_name, num_tensors=num_tensors, + step=i, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3)) + tr = create_trial(path + trial_name) + + assert 'foo_' + str(num_tensors+1) not in tr.tensors() + assert 'foo_1' in tr.tensors() + assert len(tr.available_steps()) == num_steps + assert len(tr.tensor('foo_1').steps()) == num_steps + + for i in range(num_steps, num_steps*2): + generate_data(path=path, trial=trial_name, num_tensors=num_tensors, + step=i, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3), export_colls=False) + assert len(tr.tensor('foo_1').steps()) == num_steps*2 + assert len(tr.available_steps()) == num_steps*2 + + generate_data(path=path, trial=trial_name, num_tensors=num_tensors, + step=num_steps*2 + 1, + tname_prefix='foo', worker='algo-1', shape=(3, 3, 3), export_colls=False) + assert len(tr.available_steps()) == num_steps * 2 + 1 + + generate_data(path=path, trial=trial_name, num_tensors=num_tensors + 3, + step=num_steps * 2 + 2, + tname_prefix='foo', worker='algo-1', shape=(3, 3, 3), export_colls=False) + assert tr.tensor('foo_' + str(num_tensors+1)) is not None + +def help_test_no_refresh(path): + trial_name = str(uuid.uuid4()) + num_steps = 8 + num_tensors = 10 + + for i in range(num_steps): + generate_data(path=path, trial=trial_name, num_tensors=num_tensors, + step=i, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3)) + tr = create_trial(path + trial_name) + + assert 'foo_' + str(num_tensors+1) not in tr.tensors() + assert 'foo_1' in tr.tensors() + assert len(tr.available_steps()) == num_steps + assert len(tr.tensor('foo_1').steps()) == num_steps + + for i in range(num_steps, num_steps*2): + generate_data(path=path, trial=trial_name, num_tensors=num_tensors, + step=i, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3), export_colls=False) + + with no_refresh([tr]) as [tr]: + assert len(tr.tensor('foo_1').steps()) == num_steps + assert len(tr.available_steps()) == num_steps + + with no_refresh([tr]): + assert len(tr.tensor('foo_1').steps()) == num_steps + assert len(tr.available_steps()) == num_steps + + with no_refresh(tr): + assert len(tr.tensor('foo_1').steps()) == num_steps + assert len(tr.available_steps()) == num_steps + +def test_no_refresh_local(): + help_test_no_refresh('ts_output/train/') + +def test_no_refresh_s3(): + help_test_no_refresh('s3://tornasole-testing/rules/ts_output/train/') + +def test_refresh_with_range_local(): + help_test_refresh_with_range('ts_output/train/') + +def test_refresh_with_range_s3(): + help_test_refresh_with_range('s3://tornasole-testing/rules/ts_output/train/') + +def test_refresh_local(): + help_test_refresh('ts_output/train/') + +def test_refresh_s3(): + help_test_refresh('s3://tornasole-testing/rules/ts_output/train/') diff --git a/tests/analysis/trials/test_s3.py b/tests/analysis/trials/test_s3.py new file mode 100644 index 0000000000..fd6a803cb9 --- /dev/null +++ b/tests/analysis/trials/test_s3.py @@ -0,0 +1,75 @@ +import aioboto3 +import asyncio + +from tornasole.core.access_layer.s3handler import * +from tornasole.trials import S3Trial +from tornasole.core.collection_manager import CollectionManager +import uuid +import os +from tornasole.core.utils import is_s3 +from tests.analysis.utils import generate_data, check_trial + +def check_s3_trial(path, num_steps=20, num_tensors=10): + _, bucket, prefix = is_s3(path) + trial_obj = S3Trial(name=prefix, bucket_name=bucket, prefix_name=prefix) + check_trial(trial_obj, num_steps=num_steps, num_tensors=num_tensors) + +async def del_folder(bucket, keys): + loop = asyncio.get_event_loop() + client = aioboto3.client('s3', loop=loop) + await asyncio.gather(*[client.delete_object(Bucket=bucket, Key=key) for key in keys]) + await client.close() + +def test_s3(): + trial_name = str(uuid.uuid4()) + bucket = 'tornasole-testing' + path = 's3://' + os.path.join(bucket, 'tornasole_outputs/') + num_steps = 20 + num_tensors = 10 + for i in range(num_steps): + generate_data(path=path, trial=trial_name, num_tensors=10, + step=i, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3), rank=0) + check_s3_trial(os.path.join(path, trial_name), num_steps=num_steps, num_tensors=num_tensors) + + # delete the bucket after the test + s3_handler = S3Handler() + list_req = [ListRequest(Bucket='tornasole-testing', Prefix="tornasole_outputs/" + trial_name)] + keys = s3_handler.list_prefixes(list_req)[0] + + loop = asyncio.get_event_loop() + task = loop.create_task(del_folder('tornasole-testing', keys)) + loop.run_until_complete(task) + +def help_test_multiple_trials(num_steps = 20, num_tensors = 10): + trial_name = str(uuid.uuid4()) + bucket = 'tornasole-testing' + path = 's3://' + os.path.join(bucket, 'tornasole_outputs/') + + c = CollectionManager() + c.add("default") + c.get("default").tensor_names = ["foo_" + str(i) for i in range(num_tensors)] + c.export(path + trial_name + "/collections.ts") + c.export(path + trial_name + "/collections.ts") + for i in range(num_steps): + generate_data(path=path, trial=trial_name, num_tensors=num_tensors, + step=i, tname_prefix='foo', worker='algo-1', shape=(3, 3, 3), rank=0) + _, bucket, prefix = is_s3(os.path.join(path, trial_name)) + trial_obj = S3Trial(name=prefix, bucket_name=bucket, prefix_name=prefix) + return trial_obj, trial_name + +def test_multiple_s3_trials(num_trials = 4, num_steps = 5, num_tensors = 5): + data = [help_test_multiple_trials(num_steps, num_tensors) for i in range(num_trials)] + trials = [d[0] for d in data] + names = [d[1] for d in data] + evals = [check_trial(trial_obj, num_steps=num_steps, num_tensors=num_tensors) for trial_obj in trials] + + # delete the folders after the test + for name in names: + s3_handler = S3Handler() + list_req = [ListRequest(Bucket='tornasole-testing', Prefix="tornasole_outputs/" + name)] + keys = s3_handler.list_prefixes(list_req)[0] + + loop = asyncio.get_event_loop() + task = loop.create_task(del_folder('tornasole-testing', keys)) + loop.run_until_complete(task) + diff --git a/tests/analysis/utils.py b/tests/analysis/utils.py new file mode 100644 index 0000000000..e254845fce --- /dev/null +++ b/tests/analysis/utils.py @@ -0,0 +1,30 @@ +from tornasole.core.writer import FileWriter +import numpy as np +from tornasole.core.collection_manager import CollectionManager +import os + + +def generate_data(path, trial, step, tname_prefix, + num_tensors, worker, shape, dtype=np.float32, + rank=None, mode=None, mode_step=None, export_colls=True, + data=None): + with FileWriter(logdir=path, trial=trial, step=step, worker=worker) as fw: + for i in range(num_tensors): + if data is None: + data = np.ones(shape=shape, dtype=dtype) * step + fw.write_tensor(tdata=data, tname=f'{tname_prefix}_{i}', + mode=mode, mode_step=mode_step) + if export_colls: + c = CollectionManager() + c.add("default") + c.get("default").tensor_names = ["foo_" + str(i) for i in range(num_tensors)] + c.export(os.path.join(path, trial, "collections.ts")) + + +def check_trial(trial_obj, num_steps, num_tensors): + assert len(trial_obj.tensors()) == num_tensors + for t in trial_obj.tensors(): + assert len(trial_obj.tensor(t).steps()) == num_steps + for s in trial_obj.tensor(t).steps(): + v = trial_obj.tensor(t).value(s) + assert v is not None diff --git a/tests/core/__init__.py b/tests/core/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/test_collections.py b/tests/core/test_collections.py index 2873589e09..badaa4b262 100644 --- a/tests/core/test_collections.py +++ b/tests/core/test_collections.py @@ -1,6 +1,6 @@ -from tornasole_core.collection import Collection -from tornasole_core.collection_manager import CollectionManager -from tornasole_core.reduction_config import ReductionConfig +from tornasole.core.collection import Collection +from tornasole.core.collection_manager import CollectionManager +from tornasole.core.reduction_config import ReductionConfig def test_export_load(): # with none as save config diff --git a/tests/core/test_handler.py b/tests/core/test_handler.py index 510764c927..54e8e994af 100644 --- a/tests/core/test_handler.py +++ b/tests/core/test_handler.py @@ -1,7 +1,7 @@ import pytest import numpy as np -from tornasole_core.access_layer.s3handler import * -from tornasole_core.tfrecord.tensor_reader import * +from tornasole.core.access_layer.s3handler import * +from tornasole.core.tfrecord.tensor_reader import * ######## HELPER CLASSES AND FUNCTIONS ####### class TensorLocation: def __init__(self, event_file_name, start=0, length=None): @@ -12,9 +12,9 @@ def __init__(self, event_file_name, start=0, length=None): class Index(): def __init__(self): self.dummy = dict() - self.dummy["s3://ljain-tests/tfevents"] = dict() + self.dummy["s3://tornasolecodebuildtest/tfevents"] = dict() for i in range(5000): - self.dummy["s3://ljain-tests/tfevents"]["demo_" + str(i)] = [(0, TensorLocation("s3://ljain-tests/tfevents/demo_"+str(i)+".out.tfevents"))] + self.dummy["s3://tornasolecodebuildtest/tfevents"]["demo_" + str(i)] = [(0, TensorLocation("s3://tornasolecodebuildtest/tfevents/demo_"+str(i)+".out.tfevents"))] # input to get_index_for_tensors is a dict {path:{tensornames:[step_nums]}} # output of that fn is dict {path:{tname:[(step_num, TensorLocation)]}} @@ -64,7 +64,7 @@ def read_record(data, check=True): # If the corresponding tensor is not fetchable, then None is stored for its dictionary entry. def get_tensors(index, s3_handler, tlist, num_async_calls=500, timer=False): object_requests = [] - bucket = "ljain-tests" + bucket = "tornasolecodebuildtest" prefix = "tfevents" index_dict = dict() parent_path = "s3://" + bucket + "/" + prefix @@ -119,11 +119,11 @@ def test_download_objects(compare_speeds = False): def test_list_objects(): # s3trial = S3Trial('test', 'ljain-tests', 'demo') s3_handler = S3Handler() - req1 = ListRequest('ljain-tests', 'tfevents', '', '') - req2 = ListRequest('ljain-tests', 'rand_4mb_1000', '', '') - req3 = ListRequest('ljain-tests', 'rand_8mb_1000', '', '') - req4 = ListRequest('ljain-tests', 'demo_dir_structure/attempts/', '/') - req5 = ListRequest('ljain-tests', 'demo_dir_structure/attempts/', '/', 'demo_dir_structure/attempts/help') + req1 = ListRequest('tornasolecodebuildtest', 'tfevents', '', '') + req2 = ListRequest('tornasolecodebuildtest', 'rand_4mb_1000', '', '') + req3 = ListRequest('tornasolecodebuildtest', 'rand_8mb_1000', '', '') + req4 = ListRequest('tornasolecodebuildtest', 'demo_dir_structure/attempts/', '/') + req5 = ListRequest('tornasolecodebuildtest', 'demo_dir_structure/attempts/', '/', 'demo_dir_structure/attempts/help') files = s3_handler.list_prefixes([req1, req2, req3, req4, req5]) # test StartAfter and delimiters assert len(files[3]) == 5 and len(files[4]) == 3 diff --git a/tests/core/test_index.py b/tests/core/test_index.py index 1b4f97b808..a54c474643 100644 --- a/tests/core/test_index.py +++ b/tests/core/test_index.py @@ -1,9 +1,9 @@ import csv -from tornasole_core.writer import FileWriter -from tornasole_core.tfevent.event_file_writer import * -from tornasole_core.reader import FileReader -from tornasole_core.tfevent.util import EventFileLocation -from tornasole_core.indexutils import * +from tornasole.core.writer import FileWriter +from tornasole.core.tfevent.event_file_writer import * +from tornasole.core.reader import FileReader +from tornasole.core.tfevent.util import EventFileLocation +from tornasole.core.indexutils import * import shutil import os diff --git a/tests/core/test_modes.py b/tests/core/test_modes.py index c430f5fd58..96e61afd52 100644 --- a/tests/core/test_modes.py +++ b/tests/core/test_modes.py @@ -1,7 +1,7 @@ -from tornasole_core.writer import FileWriter -from tornasole_core.reader import FileReader +from tornasole.core.writer import FileWriter +from tornasole.core.reader import FileReader import numpy as np -from tornasole_core.modes import ModeKeys +from tornasole.core.modes import ModeKeys from datetime import datetime import glob import shutil diff --git a/tests/core/test_numpy.py b/tests/core/test_numpy.py index 0212ea28e3..6cd88cf8f6 100644 --- a/tests/core/test_numpy.py +++ b/tests/core/test_numpy.py @@ -1,8 +1,8 @@ import numpy as np import pytest import uuid -from tornasole_core.writer import FileWriter -from tornasole_core.reader import FileReader +from tornasole.core.writer import FileWriter +from tornasole.core.reader import FileReader def rw(path): """ diff --git a/tests/core/test_reduction_config.py b/tests/core/test_reduction_config.py index d21b85ac6e..9f49f85398 100644 --- a/tests/core/test_reduction_config.py +++ b/tests/core/test_reduction_config.py @@ -1,5 +1,5 @@ import pytest -from tornasole_core.reduction_config import ReductionConfig +from tornasole.core.reduction_config import ReductionConfig def test_export_load(): r1 = ReductionConfig(only_shape=True, reductions=['min'], norms=['l2']) diff --git a/tests/core/test_save_config.py b/tests/core/test_save_config.py index 08f207be1c..e6384efc1f 100644 --- a/tests/core/test_save_config.py +++ b/tests/core/test_save_config.py @@ -1,5 +1,5 @@ import pytest -from tornasole_core.save_config import SaveConfig +from tornasole.core.save_config import SaveConfig def test_export_load(): r1 = SaveConfig(save_interval=11, skip_num_steps=10, save_steps=[50], when_nan=['loss:0']) diff --git a/tests/core/test_training_end.py b/tests/core/test_training_end.py index 3445a50c85..18fac918f2 100644 --- a/tests/core/test_training_end.py +++ b/tests/core/test_training_end.py @@ -1,9 +1,9 @@ -import boto3 as boto3 -from tornasole_core.access_layer.utils import has_training_ended -from tornasole_core.access_layer.utils import training_has_ended +from tornasole.core.access_layer.utils import has_training_ended +from tornasole.core.access_layer.utils import training_has_ended import shutil -from tornasole_core.utils import is_s3 -from tornasole_core.access_layer.file import ensure_dir +import boto3 +from tornasole.core.utils import is_s3 +from tornasole.core.access_layer.file import ensure_dir def del_s3(bucket,file_path): s3_client = boto3.client('s3') @@ -27,5 +27,6 @@ def test_s3_training_end(): del_s3(bucket, s3dir) def test_negative_s3_training_end(): + bucket = 'tornasolecodebuildtest' s3dir = 's3://tornasolecodebuildtest/training_end_test_dir_negative' assert has_training_ended(s3dir) == False \ No newline at end of file diff --git a/tests/core/test_utils.py b/tests/core/test_utils.py index 942d53588f..16b50435a0 100644 --- a/tests/core/test_utils.py +++ b/tests/core/test_utils.py @@ -1,4 +1,4 @@ -from tornasole_core.utils import is_s3, check_dir_exists +from tornasole.core.utils import is_s3, check_dir_exists def test_normal(): rval = is_s3('a/b/c') diff --git a/tests/mxnet/__init__.py b/tests/mxnet/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/mxnet/mnist_gluon_model.py b/tests/mxnet/mnist_gluon_model.py new file mode 100644 index 0000000000..7c7df27779 --- /dev/null +++ b/tests/mxnet/mnist_gluon_model.py @@ -0,0 +1,100 @@ +from mxnet import gluon, init, autograd +from mxnet.gluon import nn +from mxnet.gluon.data.vision import datasets, transforms +import time +import mxnet as mx +from tornasole import modes +import numpy as np + +def acc(output, label): + return (output.argmax(axis=1) == + label.astype('float32')).mean().asscalar() + + +def run_mnist_gluon_model(hook=None, hybridize=False, set_modes=False, + num_steps_train=None, num_steps_eval=None, make_input_zero=False, normalize_mean=0.13, + normalize_std=0.31): + batch_size = 1024 + if make_input_zero: + mnist_train = datasets.FashionMNIST(train=True, + transform=lambda data, label: (data.astype(np.float32) * 0, label)) + normalize_mean=0.0 + else: + mnist_train = datasets.FashionMNIST(train=True) + + X, y = mnist_train[0] + ('X shape: ', X.shape, 'X dtype', X.dtype, 'y:', y) + + text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat', + 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot'] + X, y = mnist_train[0:10] + transformer = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(normalize_mean, 0.31)]) + + mnist_train = mnist_train.transform_first(transformer) + mnist_valid = gluon.data.vision.FashionMNIST(train=False) + + train_data = gluon.data.DataLoader( + mnist_train, batch_size=batch_size, shuffle=True, num_workers=4) + valid_data = gluon.data.DataLoader( + mnist_valid.transform_first(transformer), + batch_size=batch_size, num_workers=4) + + # Create Model in Gluon + net = nn.HybridSequential() + net.add(nn.Conv2D(channels=6, kernel_size=5, activation='relu'), + nn.MaxPool2D(pool_size=2, strides=2), + nn.Conv2D(channels=16, kernel_size=3, activation='relu'), + nn.MaxPool2D(pool_size=2, strides=2), + nn.Flatten(), + nn.Dense(120, activation="relu"), + nn.Dense(84, activation="relu"), + nn.Dense(10)) + net.initialize(init=init.Xavier(),ctx=mx.cpu()) + if hybridize: + net.hybridize(()) + + if hook is not None: + # Register the forward Hook + hook.register_hook(net) + + softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss() + trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1}) + + # Start the training. + for epoch in range(2): + train_loss, train_acc, valid_acc = 0., 0., 0. + tic = time.time() + if set_modes: + hook.set_mode(modes.TRAIN) + + i = 0 + for data, label in train_data: + data = data.as_in_context(mx.cpu(0)) + # forward + backward + with autograd.record(): + output = net(data) + loss = softmax_cross_entropy(output, label) + loss.backward() + # update parameters + trainer.step(batch_size) + # calculate training metrics + train_loss += loss.mean().asscalar() + train_acc += acc(output, label) + i += 1 + if num_steps_train is not None and i > num_steps_train: + break + # calculate validation accuracy + if set_modes: + hook.set_mode(modes.EVAL) + i = 0 + for data, label in valid_data: + data = data.as_in_context(mx.cpu(0)) + valid_acc += acc(net(data), label) + i += 1 + if num_steps_eval is not None and i > num_steps_eval: + break + print("Epoch %d: loss %.3f, train acc %.3f, test acc %.3f, in %.1f sec" % ( + epoch, train_loss/len(train_data), train_acc/len(train_data), + valid_acc/len(valid_data), time.time()-tic)) diff --git a/tests/mxnet/test_hook.py b/tests/mxnet/test_hook.py new file mode 100644 index 0000000000..b6754f08f3 --- /dev/null +++ b/tests/mxnet/test_hook.py @@ -0,0 +1,17 @@ +from .mnist_gluon_model import run_mnist_gluon_model +from tornasole.mxnet.hook import TornasoleHook as t_hook +from tornasole import SaveConfig, modes +from tornasole.mxnet import reset_collections +from datetime import datetime +import shutil +from tornasole.core.access_layer.utils import has_training_ended + +def test_hook(): + reset_collections() + save_config = SaveConfig(save_steps=[0,1,2,3]) + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + out_dir='./newlogsRunTest/' + run_id + hook = t_hook(out_dir=out_dir, save_config=save_config) + assert (has_training_ended(out_dir) == False) + run_mnist_gluon_model(hook=hook, num_steps_train=10, num_steps_eval=10) + shutil.rmtree(out_dir) diff --git a/tests/mxnet/test_hook_all_zero.py b/tests/mxnet/test_hook_all_zero.py new file mode 100644 index 0000000000..9f60929664 --- /dev/null +++ b/tests/mxnet/test_hook_all_zero.py @@ -0,0 +1,36 @@ +from .mnist_gluon_model import run_mnist_gluon_model +from tornasole.mxnet.hook import TornasoleHook as t_hook +from tornasole.mxnet import Collection, reset_collections +from tornasole import SaveConfig +from tornasole.trials import create_trial +import tornasole.mxnet as tm +from datetime import datetime +import numpy as np +import shutil + +def test_hook_all_zero(): + reset_collections() + tm.get_collection('ReluActivation').include(["relu*", "input_*"]) + save_config = SaveConfig(save_steps=[0,1,2,3]) + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + out_dir = './newlogsRunTest/' + run_id + print("Registering the hook with out_dir {0}".format(out_dir)) + hook = t_hook(out_dir=out_dir, save_config=save_config, include_collections=['ReluActivation','weights', 'bias','gradients']) + run_mnist_gluon_model(hook=hook, num_steps_train=10, num_steps_eval=10, make_input_zero=True) + + + print("Created the trial with out_dir {0}".format(out_dir)) + tr = create_trial(out_dir) + assert tr + assert len(tr.available_steps()) == 4 + + tnames = tr.tensors_matching_regex('conv._input') + print(tnames) + tname = tr.tensors_matching_regex('conv._input')[0] + print(tname) + print(tr.tensor(tname).steps()) + conv_tensor_value = tr.tensor(tname).value(step_num=0) + is_zero = np.all(conv_tensor_value==0) + assert is_zero == True + + shutil.rmtree(out_dir) \ No newline at end of file diff --git a/tests/mxnet/test_hook_custom_collection.py b/tests/mxnet/test_hook_custom_collection.py new file mode 100644 index 0000000000..2631e65915 --- /dev/null +++ b/tests/mxnet/test_hook_custom_collection.py @@ -0,0 +1,17 @@ +from .mnist_gluon_model import run_mnist_gluon_model +from tornasole.mxnet.hook import TornasoleHook as t_hook +from tornasole.mxnet import Collection, SaveConfig, reset_collections +import tornasole.mxnet as tm +from datetime import datetime +import shutil + +def test_hook_custom_collection(): + reset_collections() + tm.get_collection('ReluActivation').include(["relu*", "input_*"]) + save_config = SaveConfig(save_steps=[0,1,2,3]) + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + out_dir = './newlogsRunTest/' + run_id + hook = t_hook(out_dir=out_dir, save_config=save_config, include_collections=['ReluActivation']) + run_mnist_gluon_model(hook=hook, num_steps_train=10, num_steps_eval=10) + shutil.rmtree(out_dir) + diff --git a/tests/mxnet/test_hook_reduce_config.py b/tests/mxnet/test_hook_reduce_config.py new file mode 100644 index 0000000000..37580e17b3 --- /dev/null +++ b/tests/mxnet/test_hook_reduce_config.py @@ -0,0 +1,61 @@ +from .mnist_gluon_model import run_mnist_gluon_model +from tornasole.mxnet.hook import TornasoleHook as t_hook +from tornasole.mxnet import SaveConfig, Collection, ReductionConfig, reset_collections +import tornasole.mxnet as tm +from tornasole.trials import create_trial +import shutil + +from datetime import datetime + +def test_save_config(): + reset_collections() + global_reduce_config = ReductionConfig(reductions=["max", "mean"]) + global_save_config = SaveConfig(save_steps=[0,1,2,3]) + + tm.get_collection("ReluActivation").include(["relu*"]) + tm.get_collection("ReluActivation").set_save_config(SaveConfig(save_steps=[4,5,6])) + tm.get_collection("ReluActivation").set_reduction_config(ReductionConfig(reductions=["min"], abs_reductions=["max"])) + + tm.get_collection("flatten").include(["flatten*"]) + tm.get_collection("flatten").set_save_config(SaveConfig(save_steps=[4,5,6])) + tm.get_collection("flatten").set_reduction_config(ReductionConfig(norms=["l1"], abs_norms=["l2"])) + + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + out_dir = './newlogsRunTest/' + run_id + hook = t_hook(out_dir=out_dir, save_config=global_save_config, include_collections=['weights', 'bias','gradients', + 'default', 'ReluActivation', 'flatten'], + reduction_config=global_reduce_config) + run_mnist_gluon_model(hook=hook, num_steps_train=10, num_steps_eval=10) + + + #Testing + tr = create_trial(out_dir) + assert tr + assert len(tr.available_steps())==7 + + tname = tr.tensors_matching_regex('conv._weight')[0] + print(tr.tensors()) + # Global reduction with max and mean + weight_tensor = tr.tensor(tname) + max_val = weight_tensor.reduction_value(step_num=1, abs=False, reduction_name='max') + assert max_val != None + mean_val = weight_tensor.reduction_value(step_num=1, abs=False, reduction_name='mean') + assert mean_val != None + + # custom reduction at step 4 with reduction = 'min and abs reduction = 'max' + tname = tr.tensors_matching_regex('conv._relu_input_0')[0] + relu_input = tr.tensor(tname) + min_val = relu_input.reduction_value(step_num=4, abs=False, reduction_name='min') + assert min_val != None + abs_max_val = relu_input.reduction_value(step_num=4, abs=True, reduction_name='max') + assert abs_max_val != None + + # Custom reduction with normalization + tname = tr.tensors_matching_regex('flatten._input_0')[0] + flatten_input = tr.tensor(tname) + l1_norm = flatten_input.reduction_value(step_num=4, abs=False, reduction_name='l1') + assert l1_norm != None + l2_norm = flatten_input.reduction_value(step_num=4, abs=True, reduction_name='l2') + assert l2_norm != None + + shutil.rmtree(out_dir) \ No newline at end of file diff --git a/tests/mxnet/test_hook_save_all.py b/tests/mxnet/test_hook_save_all.py new file mode 100644 index 0000000000..4176bb0808 --- /dev/null +++ b/tests/mxnet/test_hook_save_all.py @@ -0,0 +1,15 @@ +from .mnist_gluon_model import run_mnist_gluon_model +from tornasole.mxnet.hook import TornasoleHook as t_hook +from tornasole.mxnet import SaveConfig, reset_collections +import shutil +from datetime import datetime + +def test_save_all(): + reset_collections() + save_config = SaveConfig(save_steps=[0,1,2,3]) + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + out_dir = './newlogsRunTest/' + run_id + hook = t_hook(out_dir=out_dir, save_config=save_config, save_all=True) + run_mnist_gluon_model(hook=hook, num_steps_train=7, num_steps_eval=5) + shutil.rmtree(out_dir) + diff --git a/tests/mxnet/test_hook_save_config.py b/tests/mxnet/test_hook_save_config.py new file mode 100644 index 0000000000..6dcb17c0f4 --- /dev/null +++ b/tests/mxnet/test_hook_save_config.py @@ -0,0 +1,22 @@ +from .mnist_gluon_model import run_mnist_gluon_model +from tornasole.mxnet.hook import TornasoleHook as t_hook +from tornasole.mxnet import SaveConfig, Collection, reset_collections +import tornasole.mxnet as tm +import shutil + +from datetime import datetime + +def test_save_config(): + reset_collections() + save_config_collection = SaveConfig(save_steps=[4,5,6]) + + custom_collect = tm.get_collection("ReluActivation") + custom_collect.set_save_config(save_config_collection) + custom_collect.include(["relu*", "input_*", "output*"]) + save_config = SaveConfig(save_steps=[0,1,2,3]) + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + out_dir = './newlogsRunTest/' + run_id + hook = t_hook(out_dir=out_dir, save_config=save_config, include_collections=["ReluActivation", 'weights', 'bias','gradients', 'default']) + run_mnist_gluon_model(hook=hook, num_steps_train=10, num_steps_eval=10) + shutil.rmtree(out_dir) + diff --git a/tests/mxnet/test_modes.py b/tests/mxnet/test_modes.py new file mode 100644 index 0000000000..44185728cd --- /dev/null +++ b/tests/mxnet/test_modes.py @@ -0,0 +1,22 @@ +from .mnist_gluon_model import run_mnist_gluon_model +from tornasole.mxnet.hook import TornasoleHook as t_hook +from tornasole.mxnet import SaveConfig, modes, reset_collections +from datetime import datetime +from tornasole.trials import create_trial + +def test_modes(): + reset_collections() + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + path = './newlogsRunTest/' + run_id + hook = t_hook(out_dir=path, + save_config={modes.TRAIN: SaveConfig(save_interval=50), + modes.EVAL: SaveConfig(save_interval=10)}) + run_mnist_gluon_model(hook=hook, set_modes=True) + + tr = create_trial(path) + assert len(tr.modes()) == 2 + assert len(tr.available_steps()) == 5 + assert len(tr.available_steps(mode=modes.TRAIN)) == 3 + assert len(tr.available_steps(mode=modes.EVAL)) == 2 + + diff --git a/tests/mxnet/test_training_end.py b/tests/mxnet/test_training_end.py new file mode 100644 index 0000000000..d60576660a --- /dev/null +++ b/tests/mxnet/test_training_end.py @@ -0,0 +1,33 @@ +import shutil +from tornasole.core.access_layer.utils import has_training_ended +import subprocess +import uuid +import boto3 +import sys + + +def test_end_local_training(): + run_id = str(uuid.uuid4()) + out_dir='./newlogsRunTest/' + run_id + assert has_training_ended(out_dir) == False + subprocess.check_call([sys.executable, "examples/mxnet/scripts/mnist_gluon_basic_hook_demo.py", + "--output-uri", out_dir, '--num_steps', '10']) + assert has_training_ended(out_dir) + shutil.rmtree(out_dir) + + +def del_s3(bucket,file_path): + s3_client = boto3.client('s3') + s3_client.delete_object(Bucket=bucket, Key=file_path) + + +def test_end_s3_training(): + run_id = str(uuid.uuid4()) + bucket = 'tornasolecodebuildtest' + key = 'newlogsRunTest/' + run_id + out_dir= bucket + "/" + key + assert has_training_ended(out_dir) == False + subprocess.check_call([sys.executable, "examples/mxnet/scripts/mnist_gluon_basic_hook_demo.py", + "--output-uri", out_dir, '--num_steps', '10']) + assert has_training_ended(out_dir) + del_s3(bucket, key) diff --git a/tests/pytorch/__init__.py b/tests/pytorch/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/pytorch/test_reduce_config.py b/tests/pytorch/test_reduce_config.py new file mode 100644 index 0000000000..09d4def8b9 --- /dev/null +++ b/tests/pytorch/test_reduce_config.py @@ -0,0 +1,104 @@ +from tornasole.pytorch.hook import TornasoleHook as t_hook +from tornasole.pytorch import SaveConfig, Collection, ReductionConfig, reset_collections +import tornasole.pytorch as ts +from tornasole.trials import create_trial +import shutil +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torch.autograd import Variable +from datetime import datetime + + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.add_module('conv1', nn.Conv2d(1, 20, 5, 1)) + self.add_module('relu0', nn.ReLU()) + self.add_module('max_pool', nn.MaxPool2d(2, stride=2)) + self.add_module('conv2', nn.Conv2d(20, 50, 5, 1)) + self.add_module('relu1', nn.ReLU()) + self.add_module('max_pool2', nn.MaxPool2d(2, stride=2)) + self.add_module('fc1', nn.Linear(4*4*50, 500)) + self.add_module('relu2', nn.ReLU()) + self.add_module('fc2', nn.Linear(500, 10)) + + + def forward(self, x): + x = self.relu0(self.conv1(x)) + x = self.max_pool(x) + x = self.relu1(self.conv2(x)) + x = self.max_pool2(x) + x = x.view(-1, 4*4*50) + x = self.relu2(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + +def train(model, device, optimizer, num_steps=500, save_steps=[]): + model.train() + count = 0 + # for batch_idx, (data, target) in enumerate(train_loader): + for i in range(num_steps): + batch_size=32 + data, target = torch.rand(batch_size, 1, 28, 28), torch.rand(batch_size).long() + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(Variable(data, requires_grad = True)) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + +def test_reduce_config(): + reset_collections() + global_reduce_config = ReductionConfig(reductions=["max", "mean"]) + global_save_config = SaveConfig(save_steps=[0,1,2,3]) + + ts.get_collection("ReluActivation").include(["relu*"]) + ts.get_collection("ReluActivation").set_save_config(SaveConfig(save_steps=[4,5,6])) + ts.get_collection("ReluActivation").set_reduction_config(ReductionConfig(reductions=["min"], abs_reductions=["max"])) + + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + out_dir = './newlogsRunTest/' + run_id + hook = t_hook(out_dir=out_dir, save_config=global_save_config, include_collections=['weights', 'bias','gradients', + 'default', 'ReluActivation', 'flatten'], + reduction_config=global_reduce_config) + model = Net().to(torch.device("cpu")) + hook.register_hook(model) + optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) + train(model, torch.device("cpu"), optimizer, num_steps=10, save_steps=[i for i in range(7)]) + + #Testing + tr = create_trial(out_dir) + assert tr + assert len(tr.available_steps())==7 + print(tr.tensors()) + tname = tr.tensors_matching_regex('Net_conv[0-9]+.weight')[0] + print(tr.tensors()) + + # Global reduction with max and mean + weight_tensor = tr.tensor(tname) + max_val = weight_tensor.reduction_value(step_num=1, abs=False, reduction_name='max') + assert max_val != None + mean_val = weight_tensor.reduction_value(step_num=1, abs=False, reduction_name='mean') + assert mean_val != None + + # custom reduction at step 4 with reduction = 'min and abs reduction = 'max' + tname = tr.tensors_matching_regex('relu0_input_0')[0] + relu_input = tr.tensor(tname) + min_val = relu_input.reduction_value(step_num=4, abs=False, reduction_name='min') + assert min_val != None + abs_max_val = relu_input.reduction_value(step_num=4, abs=True, reduction_name='max') + assert abs_max_val != None + + # Custom reduction with normalization + # tname = tr.tensors_matching_regex('flatten._input_0')[0] + # flatten_input = tr.tensor(tname) + # l1_norm = flatten_input.reduction_value(step_num=4, abs=False, reduction_name='l1') + # assert l1_norm != None + # l2_norm = flatten_input.reduction_value(step_num=4, abs=True, reduction_name='l2') + # assert l2_norm != None + + shutil.rmtree(out_dir) + +test_reduce_config() diff --git a/tests/pytorch/test_simple_write.py b/tests/pytorch/test_simple_write.py new file mode 100644 index 0000000000..b00e0e519c --- /dev/null +++ b/tests/pytorch/test_simple_write.py @@ -0,0 +1,201 @@ +from __future__ import print_function +import numpy as np +import argparse +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torch.autograd import Variable +from tornasole.core.writer import FileWriter +from tornasole.pytorch.hook import * +from tornasole.pytorch.torch_collection import * +from tornasole.pytorch import reset_collections +import uuid +from tornasole.trials import create_trial +import shutil +import os + +# from tensorflow.python.tools import inspect_checkpoint as chkp +from tornasole.core.reader import FileReader +from tornasole.core.tfevent.util import EventFileLocation + + +class Net(nn.Module): + def __init__(self, mode='weights-bias-gradients', to_save=[]): + super(Net, self).__init__() + self.add_module('fc1', nn.Linear(20, 500)) + self.add_module('relu1', nn.ReLU()) + self.add_module('fc2', nn.Linear(500, 10)) + self.add_module('relu2', nn.ReLU()) + self.add_module('fc3', nn.Linear(10, 4)) + + self.saved = dict() + self.to_save = to_save + self.step = -1 + self.mode = mode + + for name, param in self.named_parameters(): + pname = 'Net_' + name + self.saved[pname] = dict() + self.saved['gradient/' + pname] = dict() + + if self.mode == 'saveall': + self.saved['fc1_input_0'] = dict() + self.saved['relu1_input_0'] = dict() + self.saved['fc2_input_0'] = dict() + self.saved['relu2_input_0'] = dict() + self.saved['fc3_input_0'] = dict() + self.saved['Net_input_0'] = dict() + self.saved['fc1_output0'] = dict() + self.saved['relu1_output0'] = dict() + self.saved['fc2_output0'] = dict() + self.saved['relu2_output0'] = dict() + self.saved['fc3_output0'] = dict() + self.saved['Net_output0'] = dict() + + + def forward(self, x_in): + self.step += 1 + + for name, param in self.named_parameters(): + pname = 'Net_' + name + self.saved[pname][self.step] = param.data.numpy().copy() + + fc1_out = self.fc1(x_in) + relu1_out = self.relu1(fc1_out) + fc2_out = self.fc2(relu1_out) + relu2_out = self.relu2(fc2_out) + fc3_out = self.fc3(relu2_out) + out = F.log_softmax(fc3_out, dim=1) + + if self.mode == 'saveall': + self.saved['fc1_input_0'][self.step] = x_in.data.numpy().copy() + self.saved['relu1_input_0'][self.step] = fc1_out.data.numpy().copy() + self.saved['fc2_input_0'][self.step] = relu1_out.data.numpy().copy() + self.saved['relu2_input_0'][self.step] = fc2_out.data.numpy().copy() + self.saved['fc3_input_0'][self.step] = relu2_out.data.numpy().copy() + self.saved['Net_input_0'][self.step] = fc3_out.data.numpy().copy() + + self.saved['fc1_output0'][self.step] = fc1_out.data.numpy().copy() + self.saved['relu1_output0'][self.step] = relu1_out.data.numpy().copy() + self.saved['fc2_output0'][self.step] = fc2_out.data.numpy().copy() + self.saved['relu2_output0'][self.step] = relu2_out.data.numpy().copy() + self.saved['fc3_output0'][self.step] = fc3_out.data.numpy().copy() + self.saved['Net_output0'][self.step] = out.data.numpy().copy() + return out + +# Create a tornasole hook. The initilization of hook determines which tensors +# are logged while training is in progress. +# Following function shows the default initilization that enables logging of +# weights, biases and gradients in the model. +def create_tornasole_hook(output_dir, module=None, hook_type='saveall', save_steps=None): + # Create a hook that logs weights, biases, gradients and inputs/ouputs of model + if hook_type == 'saveall': + hook = TornasoleHook(out_dir=output_dir, save_config=SaveConfig(save_steps=save_steps), save_all=True) + elif hook_type == 'module-input-output': + # The names of input and output tensors of a module are in following format + # Inputs : _input_, and + # Output : _output + # In order to log the inputs and output of a module, we will create a collection as follows: + assert module is not None + get_collection('l_mod').add_module_tensors(module, inputs=True, outputs=True) + + # Create a hook that logs weights, biases, gradients and inputs/outputs of model + hook = TornasoleHook(out_dir=output_dir, save_config=SaveConfig(save_steps=save_steps), + include_collections=['weights', 'gradients', 'bias','l_mod']) + elif hook_type == 'weights-bias-gradients': + save_config = SaveConfig(save_steps=save_steps) + # Create a hook that logs ONLY weights, biases, and gradients + hook = TornasoleHook(out_dir=output_dir, save_config=save_config) + return hook + +def train(model, device, optimizer, num_steps=500, save_steps=[]): + model.train() + count = 0 + # for batch_idx, (data, target) in enumerate(train_loader): + for i in range(num_steps): + batch_size=32 + data, target = torch.rand(batch_size, 20), torch.rand(batch_size).long() + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(Variable(data, requires_grad = True)) + loss = F.nll_loss(output, target) + loss.backward() + if i in save_steps: + model.saved['gradient/Net_fc1.weight'][i] = model.fc1.weight.grad.data.numpy().copy() + model.saved['gradient/Net_fc2.weight'][i] = model.fc2.weight.grad.data.numpy().copy() + model.saved['gradient/Net_fc3.weight'][i] = model.fc3.weight.grad.data.numpy().copy() + model.saved['gradient/Net_fc1.bias'][i] = model.fc1.bias.grad.data.numpy().copy() + model.saved['gradient/Net_fc2.bias'][i] = model.fc2.bias.grad.data.numpy().copy() + model.saved['gradient/Net_fc3.bias'][i] = model.fc3.bias.grad.data.numpy().copy() + optimizer.step() + +def delete_local_trials(local_trials): + for trial in local_trials: + shutil.rmtree(trial) + +def test_weights_bias_gradients(): + reset_collections() + prefix = str(uuid.uuid4()) + hook_type = 'weights-bias-gradients' + device = torch.device("cpu") + save_steps = [i * 20 for i in range(5)] + model = Net(mode=hook_type, to_save=save_steps).to(device) + hook = create_tornasole_hook('./test_output/test_weights_bias_gradients/' + prefix, model, hook_type, save_steps=save_steps) + + hook.register_hook(model) + optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) + train(model, device, optimizer, num_steps=101, save_steps=save_steps) + trial = create_trial(path='./test_output/test_weights_bias_gradients/' + prefix, + name='test output') + grads = ['gradient/Net_fc1.weight', 'gradient/Net_fc2.weight', 'gradient/Net_fc3.weight', + 'gradient/Net_fc1.bias', 'gradient/Net_fc2.bias', 'gradient/Net_fc3.bias'] + weights = ['Net_fc1.weight', 'Net_fc2.weight', 'Net_fc3.weight'] + bias = ['Net_fc1.bias', 'Net_fc2.bias', 'Net_fc3.bias'] + + tensors = grads + bias + weights + + assert len(trial.available_steps()) == len(save_steps) + for step in trial.available_steps(): + for tname in tensors: + assert tname in trial.tensors() + assert step in trial.tensor(tname).steps() + saved_tensor = trial.tensor(tname).value(step) + in_memory = model.saved[tname][step] + assert np.allclose(in_memory, saved_tensor) + delete_local_trials(['./test_output/test_weights_bias_gradients/' + prefix]) + + +def test_saveall(): + reset_collections() + prefix = str(uuid.uuid4()) + hook_type = 'saveall' + device = torch.device("cpu") + save_steps = [i * 20 for i in range(5)] + model = Net(mode=hook_type, to_save=save_steps).to(device) + hook = create_tornasole_hook('./test_output/test_saveall/' + prefix, model, hook_type, save_steps=save_steps) + + hook.register_hook(model) + optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) + train(model, device, optimizer, num_steps=101, save_steps=save_steps) + trial = create_trial(path='./test_output/test_saveall/' + prefix, + name='test output') + grads = ['gradient/Net_fc1.weight', 'gradient/Net_fc2.weight', 'gradient/Net_fc3.weight', + 'gradient/Net_fc1.bias', 'gradient/Net_fc2.bias', 'gradient/Net_fc3.bias'] + weights = ['Net_fc1.weight', 'Net_fc2.weight', 'Net_fc3.weight'] + bias = ['Net_fc1.bias', 'Net_fc2.bias', 'Net_fc3.bias'] + inputs = ['fc1_input_0', 'relu1_input_0', 'fc2_input_0', 'relu2_input_0', 'fc3_input_0'] + outputs = ['fc1_output0', 'relu1_output0', 'fc2_output0', 'relu2_output0', 'fc3_output0'] + tensors = grads + bias + weights + inputs + outputs + + assert len(trial.available_steps()) == len(save_steps) + + for step in trial.available_steps(): + for tname in tensors: + assert tname in trial.tensors() + assert step in trial.tensor(tname).steps() + saved_tensor = trial.tensor(tname).value(step) + in_memory = model.saved[tname][step] + assert np.allclose(in_memory, saved_tensor) + + delete_local_trials(['./test_output/test_saveall/' + prefix]) diff --git a/tests/tensorflow/__init__.py b/tests/tensorflow/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/tensorflow/hooks/__init__.py b/tests/tensorflow/hooks/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/tensorflow/hooks/test_estimator_modes.py b/tests/tensorflow/hooks/test_estimator_modes.py new file mode 100644 index 0000000000..710c70834a --- /dev/null +++ b/tests/tensorflow/hooks/test_estimator_modes.py @@ -0,0 +1,171 @@ +import tensorflow as tf +import numpy as np +import shutil +import os +from datetime import datetime +from .utils import TORNASOLE_TF_HOOK_TESTS_DIR + +import tornasole.tensorflow as ts +from tornasole.tensorflow import reset_collections +from tornasole.trials import create_trial + +def help_test_mnist(path, save_config): + trial_dir = path + tf.reset_default_graph() + reset_collections() + + def cnn_model_fn(features, labels, mode): + """Model function for CNN.""" + # Input Layer + input_layer = tf.reshape(features["x"], [-1, 28, 28, 1]) + + # Convolutional Layer #1 + conv1 = tf.layers.conv2d( + inputs=input_layer, + filters=32, + kernel_size=[5, 5], + padding="same", + activation=tf.nn.relu) + + # Pooling Layer #1 + pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) + + # Convolutional Layer #2 and Pooling Layer #2 + conv2 = tf.layers.conv2d( + inputs=pool1, + filters=64, + kernel_size=[5, 5], + padding="same", + activation=tf.nn.relu) + pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) + + # Dense Layer + pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64]) + dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu) + dropout = tf.layers.dropout( + inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN) + + # Logits Layer + logits = tf.layers.dense(inputs=dropout, units=10) + + predictions = { + # Generate predictions (for PREDICT and EVAL mode) + "classes": tf.argmax(input=logits, axis=1), + # Add `softmax_tensor` to the graph. It is used for PREDICT and by the + # `logging_hook`. + "probabilities": tf.nn.softmax(logits, name="softmax_tensor") + } + + if mode == tf.estimator.ModeKeys.PREDICT: + return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) + + # Calculate Loss (for both TRAIN and EVAL modes) + loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) + + # Configure the Training Op (for TRAIN mode) + if mode == tf.estimator.ModeKeys.TRAIN: + optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001) + optimizer = ts.TornasoleOptimizer(optimizer) + train_op = optimizer.minimize( + loss=loss, + global_step=tf.train.get_global_step()) + return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) + + # Add evaluation metrics (for EVAL mode) + eval_metric_ops = { + "accuracy": tf.metrics.accuracy( + labels=labels, predictions=predictions["classes"]) + } + return tf.estimator.EstimatorSpec( + mode=mode, loss=loss, eval_metric_ops=eval_metric_ops) + + # Load training and eval data + ((train_data, train_labels), + (eval_data, eval_labels)) = tf.keras.datasets.mnist.load_data() + + train_data = train_data / np.float32(255) + train_labels = train_labels.astype(np.int32) # not required + + eval_data = eval_data / np.float32(255) + eval_labels = eval_labels.astype(np.int32) # not required + + mnist_classifier = tf.estimator.Estimator( + model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model") + + train_input_fn = tf.estimator.inputs.numpy_input_fn( + x={"x": train_data}, + y=train_labels, + batch_size=100, + num_epochs=None, + shuffle=True) + + eval_input_fn = tf.estimator.inputs.numpy_input_fn( + x={"x": eval_data}, + y=eval_labels, + num_epochs=1, + shuffle=False) + + hook = ts.TornasoleHook(out_dir=trial_dir, + save_config=save_config) + hook.set_mode(ts.modes.TRAIN) + # train one step and display the probabilties + mnist_classifier.train( + input_fn=train_input_fn, + steps=10, + hooks=[hook]) + + hook.set_mode(ts.modes.EVAL) + mnist_classifier.evaluate(input_fn=eval_input_fn, hooks=[hook]) + + hook.set_mode(ts.modes.TRAIN) + mnist_classifier.train( + input_fn=train_input_fn, + steps=20, + hooks=[hook]) + + tr = create_trial(trial_dir) + return tr + +def test_mnist_local(): + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + trial_dir = os.path.join(TORNASOLE_TF_HOOK_TESTS_DIR, run_id) + tr = help_test_mnist(trial_dir, ts.SaveConfig(save_interval=2)) + assert len(tr.available_steps()) == 55 + assert len(tr.available_steps(mode=ts.modes.TRAIN)) == 15 + assert len(tr.available_steps(mode=ts.modes.EVAL)) == 40 + assert len(tr.tensors()) == 16 + shutil.rmtree(trial_dir) + +def test_mnist_s3(): + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + trial_dir = 's3://tornasole-testing/tornasole_tf/hooks/estimator_modes/' + run_id + tr = help_test_mnist(trial_dir, ts.SaveConfig(save_interval=2)) + assert len(tr.available_steps()) == 55 + assert len(tr.available_steps(mode=ts.modes.TRAIN)) == 15 + assert len(tr.available_steps(mode=ts.modes.EVAL)) == 40 + assert len(tr.tensors()) == 16 + + +def test_mnist_local_multi_save_configs(): + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + trial_dir = os.path.join(TORNASOLE_TF_HOOK_TESTS_DIR, run_id) + tr = help_test_mnist(trial_dir, {ts.modes.TRAIN: ts.SaveConfig(save_interval=2), + ts.modes.EVAL: ts.SaveConfig(save_interval=1)}) + assert len(tr.available_steps()) == 94 + assert len(tr.available_steps(mode=ts.modes.TRAIN)) == 15 + assert len(tr.available_steps(mode=ts.modes.EVAL)) == 79 + assert len(tr.tensors()) == 16 + shutil.rmtree(trial_dir) + +def test_mnist_s3_multi_save_configs(): + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + trial_dir = 's3://tornasole-testing/tornasole_tf/hooks/estimator_modes/' + run_id + tr = help_test_mnist(trial_dir, {ts.modes.TRAIN: ts.SaveConfig(save_interval=2), + ts.modes.EVAL: ts.SaveConfig(save_interval=1)}) + assert len(tr.available_steps()) == 94 + assert len(tr.available_steps(mode=ts.modes.TRAIN)) == 15 + assert len(tr.available_steps(mode=ts.modes.EVAL)) == 79 + assert len(tr.tensors()) == 16 + + + diff --git a/tests/tensorflow/hooks/test_reductions.py b/tests/tensorflow/hooks/test_reductions.py new file mode 100644 index 0000000000..0f85a818da --- /dev/null +++ b/tests/tensorflow/hooks/test_reductions.py @@ -0,0 +1,79 @@ +import os +from datetime import datetime +from tornasole.core.reduction_config import ALLOWED_REDUCTIONS, ALLOWED_NORMS +from tornasole.exceptions import * + +def simple_model(hook, steps=10, lr=0.4): + import tensorflow as tf + from tornasole.tensorflow import TornasoleOptimizer + import numpy as np + + # Network definition + with tf.name_scope('foobar'): + x = tf.placeholder(shape=(None, 2), dtype=tf.float32) + w = tf.Variable(initial_value=[[10.], [10.]], name='weight1') + with tf.name_scope('foobaz'): + w0 = [[1], [1.]] + y = tf.matmul(x, w0) + loss = tf.reduce_mean((tf.matmul(x, w) - y) ** 2, name="loss") + + global_step = tf.Variable(17, name="global_step", trainable=False) + increment_global_step_op = tf.assign(global_step, global_step + 1) + + optimizer = tf.train.AdamOptimizer(lr) + optimizer = TornasoleOptimizer(optimizer) + optimizer_op = optimizer.minimize(loss, global_step=increment_global_step_op) + + sess = tf.train.MonitoredSession(hooks=[hook]) + + for i in range(steps): + x_ = np.random.random((10, 2)) * 0.1 + _loss, opt, gstep = sess.run([loss, optimizer_op, increment_global_step_op], {x: x_}) + print(f'Step={i}, Loss={_loss}') + + sess.close() + +def get_dirs_files(path): + entries = os.listdir(path) + onlyfiles = [f for f in entries if os.path.isfile(os.path.join(path, f))] + subdirs = [x for x in entries if x not in onlyfiles] + return subdirs, onlyfiles + + +def test_reductions(): + from tornasole.tensorflow import TornasoleHook, \ + get_collections, ReductionConfig, SaveConfig, reset_collections + import tensorflow as tf + + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + trial_dir = os.path.join('/tmp/tornasole_rules_tests/', run_id) + + tf.reset_default_graph() + reset_collections() + + rdnc = ReductionConfig(reductions=ALLOWED_REDUCTIONS, + abs_reductions=ALLOWED_REDUCTIONS, + norms=ALLOWED_NORMS, + abs_norms=ALLOWED_NORMS) + hook = TornasoleHook(out_dir=trial_dir, + save_config=SaveConfig(save_interval=1), + reduction_config=rdnc) + + simple_model(hook) + _, files = get_dirs_files(trial_dir) + coll = get_collections() + from tornasole.trials import create_trial + + tr = create_trial(trial_dir) + assert len(tr.tensors()) == 2 + for tname in tr.tensors(): + t = tr.tensor(tname) + try: + t.value(0) + assert False + except TensorUnavailableForStep: + pass + assert len(t.reduction_values(0)) == 18 + for r in ALLOWED_REDUCTIONS + ALLOWED_NORMS: + for b in [False, True]: + assert t.reduction_value(0, reduction_name=r, abs=b) is not None \ No newline at end of file diff --git a/tests/tensorflow/hooks/test_save_all_full.py b/tests/tensorflow/hooks/test_save_all_full.py new file mode 100644 index 0000000000..196208940a --- /dev/null +++ b/tests/tensorflow/hooks/test_save_all_full.py @@ -0,0 +1,57 @@ +from .utils import * +from tornasole.tensorflow import reset_collections, get_collections +import pytest +import shutil, glob +from tornasole.core.reader import FileReader + +def test_save_all_full(): + run_id = 'trial_'+datetime.now().strftime('%Y%m%d-%H%M%S%f') + trial_dir = os.path.join(TORNASOLE_TF_HOOK_TESTS_DIR, run_id) + + tf.reset_default_graph() + reset_collections() + + hook = TornasoleHook(out_dir=trial_dir, + save_all=True, + save_config=SaveConfig(save_interval=2)) + simple_model(hook) + _, files = get_dirs_files(trial_dir) + dirs, _ = get_dirs_files(os.path.join(trial_dir, 'events')) + + coll = get_collections() + assert len(coll) == 5 + assert len(coll['weights'].tensor_names) == 1 + assert len(coll['gradients'].tensor_names) == 1 + + assert 'collections.ts' in files + cm = CollectionManager.load(join(trial_dir, 'collections.ts')) + + assert len(cm.collections) == 5 + assert len(cm.collections['weights'].tensor_names) == 1 + assert len(cm.collections['weights'].reduction_tensor_names) == 0 + assert len(cm.collections['gradients'].tensor_names) == 1 + assert len(cm.collections['gradients'].reduction_tensor_names) == 0 + # as we hadn't asked to be saved + assert len(cm.collections['optimizer_variables'].tensor_names) == 0 + assert len(cm.collections['optimizer_variables'].reduction_tensor_names) == 0 + assert len(cm.collections['all'].tensor_names) == 106 + num_tensors_loaded_collection = len(cm.collections['weights'].tensor_names) + \ + len(cm.collections['gradients'].tensor_names) + num_tensors_collection = len(coll['weights'].tensor_names) + \ + len(coll['gradients'].tensor_names) + + assert num_tensors_collection == num_tensors_loaded_collection + assert len(dirs) == 5 + for step in dirs: + i=0 + size = 0 + fs = glob.glob(join(trial_dir, 'events', step, '**', '*.tfevents'), recursive=True) + for f in fs: + fr = FileReader(f) + for x in fr.read_tensors(): + tensor_name, step, tensor_data, mode, mode_step = x + i += 1 + size += tensor_data.nbytes + assert i == 85 + assert size == 1470 + shutil.rmtree(trial_dir) diff --git a/tests/tensorflow/hooks/test_save_config.py b/tests/tensorflow/hooks/test_save_config.py new file mode 100644 index 0000000000..fe8a541d33 --- /dev/null +++ b/tests/tensorflow/hooks/test_save_config.py @@ -0,0 +1,35 @@ +from .utils import * +from tornasole.tensorflow import reset_collections +import shutil + +def test_save_config(): + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + trial_dir = os.path.join(TORNASOLE_TF_HOOK_TESTS_DIR, run_id) + tf.reset_default_graph() + reset_collections() + + hook = TornasoleHook(out_dir=trial_dir, + save_all=False, + save_config=SaveConfig(save_interval=2)) + simple_model(hook) + _, files = get_dirs_files(trial_dir) + steps, _ = get_dirs_files(os.path.join(trial_dir, 'events')) + assert len(steps) == 5 + assert len(files) == 1 + +def test_save_config_skip_steps(): + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + trial_dir = os.path.join(TORNASOLE_TF_HOOK_TESTS_DIR, run_id) + + tf.reset_default_graph() + reset_collections() + + hook = TornasoleHook(out_dir=trial_dir, + save_all=False, + save_config=SaveConfig(save_interval=2, skip_num_steps=8)) + simple_model(hook, steps=20) + _, files = get_dirs_files(trial_dir) + steps, _ = get_dirs_files(os.path.join(trial_dir, 'events')) + assert len(steps) == 6 + + shutil.rmtree(trial_dir) diff --git a/tests/tensorflow/hooks/test_save_reductions.py b/tests/tensorflow/hooks/test_save_reductions.py new file mode 100644 index 0000000000..c84f252e8b --- /dev/null +++ b/tests/tensorflow/hooks/test_save_reductions.py @@ -0,0 +1,63 @@ +from .utils import * +from tornasole.tensorflow import reset_collections, get_collections +import shutil +import glob +from tornasole.core.reader import FileReader + +def test_save_reductions(): + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + trial_dir = os.path.join(TORNASOLE_TF_HOOK_TESTS_DIR, run_id) + + tf.reset_default_graph() + reset_collections() + + rdnc = ReductionConfig(reductions=['min', 'max', 'mean', 'prod', 'std', 'sum', 'variance'], + abs_reductions=['min', 'max', 'mean', 'prod', 'std', 'sum', 'variance'], + norms=['l1', 'l2']) + hook = TornasoleHook(out_dir=trial_dir, + save_config=SaveConfig(save_interval=1), + reduction_config=rdnc) + + simple_model(hook) + _, files = get_dirs_files(trial_dir) + coll = get_collections() + + assert len(coll) == 4 + assert len(coll['weights'].reduction_tensor_names) == 1 + assert len(coll['gradients'].reduction_tensor_names) == 1 + + assert 'collections.ts' in files + cm = CollectionManager.load(join(trial_dir, 'collections.ts')) + assert len(cm.collections) == 4 + assert len(cm.collections['weights'].tensor_names) == 0 + assert len(cm.collections['weights'].reduction_tensor_names) == 1 + assert len(cm.collections['gradients'].tensor_names) == 0 + assert len(cm.collections['gradients'].reduction_tensor_names) == 1 + # as we hadn't asked to be saved + assert len(cm.collections['optimizer_variables'].tensor_names) == 0 + assert len(cm.collections['optimizer_variables'].reduction_tensor_names) == 0 + assert len(cm.collections['default'].tensor_names) == 0 + assert len(cm.collections['default'].reduction_tensor_names) == 0 + num_tensors_loaded_collection = len(cm.collections['weights'].tensor_names) + \ + len(cm.collections['gradients'].tensor_names) + \ + len(cm.collections['default'].tensor_names) + num_tensors_collection = len(coll['weights'].tensor_names) + \ + len(coll['gradients'].tensor_names) + \ + len(coll['default'].tensor_names) + assert num_tensors_collection == num_tensors_loaded_collection + steps, _ = get_dirs_files(os.path.join(trial_dir, 'events')) + assert len(steps) == 10 + for step in steps: + i = 0 + size = 0 + fs = glob.glob(join(trial_dir, 'events', step, '**', '*.tfevents'), recursive=True) + for f in fs: + fr = FileReader(f) + for x in fr.read_tensors(): + tensor_name, step, tensor_data, mode, mode_step = x + i += 1 + size += tensor_data.nbytes if tensor_data is not None else 0 + assert i == 32 + assert size == 128 + + shutil.rmtree(trial_dir) diff --git a/tests/tensorflow/hooks/test_simple_include.py b/tests/tensorflow/hooks/test_simple_include.py new file mode 100644 index 0000000000..5532243741 --- /dev/null +++ b/tests/tensorflow/hooks/test_simple_include.py @@ -0,0 +1,110 @@ +from .utils import * +from tornasole.tensorflow import reset_collections, get_collection +import tornasole.tensorflow as ts +import glob, shutil +from tornasole.core.reader import FileReader + +def test_simple_include(): + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + trial_dir = os.path.join(TORNASOLE_TF_HOOK_TESTS_DIR, run_id) + + tf.reset_default_graph() + reset_collections() + + hook = TornasoleHook(out_dir=trial_dir, + save_config=SaveConfig(save_interval=2)) + get_collection('default').include('loss:0') + simple_model(hook, steps=10) + _, files = get_dirs_files(trial_dir) + steps, _ = get_dirs_files(os.path.join(trial_dir, 'events')) + + cm = CollectionManager.load(join(trial_dir, 'collections.ts')) + assert len(cm.collections['default'].tensor_names) == 1 + assert len(steps) == 5 + for step in steps: + i = 0 + size = 0 + fs = glob.glob(join(trial_dir, 'events', step, '**', '*.tfevents'), + recursive=True) + for f in fs: + fr = FileReader(f) + for x in fr.read_tensors(): + tensor_name, step, tensor_data, mode, mode_step = x + i += 1 + size += tensor_data.nbytes if tensor_data is not None else 0 + assert i == 3 + assert size == 20 + + shutil.rmtree(trial_dir) + +def test_simple_include_regex(): + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + trial_dir = os.path.join(TORNASOLE_TF_HOOK_TESTS_DIR, run_id) + + tf.reset_default_graph() + reset_collections() + + hook = TornasoleHook(out_dir=trial_dir, + include_regex=['loss:0'], + include_collections=[], + save_config=SaveConfig(save_interval=2)) + simple_model(hook, steps=10) + _, files = get_dirs_files(trial_dir) + steps, _ = get_dirs_files(os.path.join(trial_dir, 'events')) + + cm = CollectionManager.load(join(trial_dir, 'collections.ts')) + assert len(cm.collections['default'].tensor_names) == 1 + assert len(steps) == 5 + + for step in steps: + i = 0 + size = 0 + fs = glob.glob(join(trial_dir, 'events', step, '**', '*.tfevents'), + recursive=True) + for f in fs: + fr = FileReader(f) + for x in fr.read_tensors(): + tensor_name, step, tensor_data, mode, mode_step = x + i += 1 + size += tensor_data.nbytes if tensor_data is not None else 0 + assert i == 1 + assert size == 4 + + shutil.rmtree(trial_dir) + +def test_multi_collection_match(): + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + trial_dir = os.path.join(TORNASOLE_TF_HOOK_TESTS_DIR, run_id) + + tf.reset_default_graph() + reset_collections() + + ts.get_collection('trial').include('loss:0') + hook = TornasoleHook(out_dir=trial_dir, + include_regex=['loss:0'], + include_collections=['default', 'trial'], + save_config=SaveConfig(save_interval=2)) + simple_model(hook, steps=10) + _, files = get_dirs_files(trial_dir) + steps, _ = get_dirs_files(os.path.join(trial_dir, 'events')) + + cm = CollectionManager.load(join(trial_dir, 'collections.ts')) + assert len(cm.collections['default'].tensor_names) == 1 + assert len(cm.collections['trial'].tensor_names) == 1 + assert len(steps) == 5 + + for step in steps: + i = 0 + size = 0 + fs = glob.glob(join(trial_dir, 'events', step, '**', '*.tfevents'), + recursive=True) + for f in fs: + fr = FileReader(f) + for x in fr.read_tensors(): + tensor_name, step, tensor_data, mode, mode_step = x + i += 1 + size += tensor_data.nbytes if tensor_data is not None else 0 + assert i == 1 + assert size == 4 + + shutil.rmtree(trial_dir) \ No newline at end of file diff --git a/tests/tensorflow/hooks/test_training_end.py b/tests/tensorflow/hooks/test_training_end.py new file mode 100644 index 0000000000..324f349473 --- /dev/null +++ b/tests/tensorflow/hooks/test_training_end.py @@ -0,0 +1,20 @@ +from .utils import * +from tornasole.tensorflow import reset_collections +import tensorflow as tf +from tornasole.core.access_layer.utils import has_training_ended +import shutil +import os +import sys +import subprocess + +def test_training_job_has_ended(): + tf.reset_default_graph() + reset_collections() + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + trial_dir = os.path.join(TORNASOLE_TF_HOOK_TESTS_DIR, run_id) + subprocess.check_call( + [sys.executable, "examples/tensorflow/training_scripts/simple/simple.py", + "--tornasole_path", trial_dir, + '--steps', '100', '--tornasole_frequency', '50']) + assert has_training_ended(trial_dir) == True + shutil.rmtree(trial_dir) \ No newline at end of file diff --git a/tests/tensorflow/hooks/test_weights_gradients.py b/tests/tensorflow/hooks/test_weights_gradients.py new file mode 100644 index 0000000000..3ebc2a21d8 --- /dev/null +++ b/tests/tensorflow/hooks/test_weights_gradients.py @@ -0,0 +1,29 @@ +from .utils import * +from tornasole.tensorflow import reset_collections + +def test_only_w_g(): + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + trial_dir = os.path.join(TORNASOLE_TF_HOOK_TESTS_DIR, run_id) + + tf.reset_default_graph() + reset_collections() + + hook = TornasoleHook(out_dir=trial_dir, + save_all=False, save_config=SaveConfig(save_interval=2)) + simple_model(hook) + steps, _ = get_dirs_files(os.path.join(trial_dir, 'events')) + _, files = get_dirs_files(trial_dir) + + assert 'collections.ts' in files + cm = CollectionManager.load(join(trial_dir, 'collections.ts')) + num_tensors_loaded_collection = len(cm.collections['weights'].tensor_names) + \ + len(cm.collections['gradients'].tensor_names) + \ + len(cm.collections['default'].tensor_names) + assert num_tensors_loaded_collection == 2 + assert len(steps) == 5 + # for step in steps: + # i = 0 + # filepath, size = get_event_file_path_length(join(rank_dir, step)) + # for (n, t) in get_tensors_from_event_file(filepath): + # i += 1 + # assert i == 2 diff --git a/tests/tensorflow/hooks/test_when_nan.py b/tests/tensorflow/hooks/test_when_nan.py new file mode 100644 index 0000000000..5c13e0ce7c --- /dev/null +++ b/tests/tensorflow/hooks/test_when_nan.py @@ -0,0 +1,32 @@ +from .utils import * +from tornasole.tensorflow import reset_collections + +def test_when_nan(): + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + trial_dir = os.path.join(TORNASOLE_TF_HOOK_TESTS_DIR, run_id) + + tf.reset_default_graph() + tf.set_random_seed(1) + np.random.seed(1) + reset_collections() + + hook = TornasoleHook(out_dir=trial_dir, + save_config=SaveConfig(save_interval=10, when_nan=['loss:0'])) + simple_model(hook, steps=100, lr=4e20) + steps, _ = get_dirs_files(os.path.join(trial_dir, 'events')) + _, files = get_dirs_files(trial_dir) + + assert 'collections.ts' in files + cm = CollectionManager.load(join(trial_dir, 'collections.ts')) + num_tensors_loaded_collection = len(cm.collections['weights'].tensor_names) + \ + len(cm.collections['gradients'].tensor_names) + \ + len(cm.collections['when_nan'].tensor_names) + \ + len(cm.collections['default'].tensor_names) + assert num_tensors_loaded_collection == 3 + + num_steps_with_files = 0 + for step in steps: + filepath, size = get_event_file_path_length(join(trial_dir, 'events', step)) + if size > 0: + num_steps_with_files += 1 + assert num_steps_with_files == 35 diff --git a/tests/tensorflow/hooks/test_write.py b/tests/tensorflow/hooks/test_write.py new file mode 100644 index 0000000000..1480f9245f --- /dev/null +++ b/tests/tensorflow/hooks/test_write.py @@ -0,0 +1,88 @@ +import tensorflow as tf +import numpy as np +from tornasole.tensorflow.hook import TornasoleHook +import tensorflow.train as train +from tensorflow.python.tools import inspect_checkpoint as chkp +import os, shutil +from tornasole.tensorflow import reset_collections +from .utils import * +from tornasole.core.reader import FileReader +from tornasole.core.tfevent.util import EventFileLocation + +def test_tornasole_hook_write(): + learning_rate=1e-3 + batch_size = 5 + run_id = 'trial_' + datetime.now().strftime('%Y%m%d-%H%M%S%f') + data_dir = os.path.join(TORNASOLE_TF_HOOK_TESTS_DIR, run_id) + + tf.reset_default_graph() + tf.set_random_seed(1) + np.random.seed(1) + reset_collections() + + # input + x = tf.placeholder(tf.float32, [None, 784], name='x') + # ground truth output + y = tf.placeholder(tf.float32, [None, 10], name='y') + # weights + W1 = tf.Variable(tf.random.normal([784, 300], stddev=1.0), name='w1') + b1 = tf.Variable(tf.random.normal([300], stddev=1.0), name='b1') + W2 = tf.Variable(tf.random.normal([300, 10], stddev=1.0), name='w2') + b2 = tf.Variable(tf.random.normal([10], stddev=1.0), name='b2') + + # hidden layer, output activation functions + hidden_out = tf.add(tf.matmul(x, W1), b1) + hidden_out = tf.nn.relu(hidden_out) + y_pred = tf.nn.softmax(tf.add(tf.matmul(hidden_out, W2), b2)) + + # clip values to ensure that cross-entropy loss can be evaluated + y_clipped = tf.clip_by_value(y_pred, 1e-10, 0.9999999) + cross_entropy = -tf.reduce_mean(tf.reduce_sum(y * tf.log(y_clipped) + + (1 - y) * tf.log(1 - y_clipped), axis=1), name='loss') + optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy) + + init_op = tf.global_variables_initializer() + + correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_pred, 1)) + accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) + loss = 0.0 + + # remove existing directory so that you can rerun and evaluate against a fresh run instead of old weights saved to disk + + try: + shutil.rmtree(data_dir) + except: + print("No directory of old data found. Continuing...") + pass + + # set up tornasole hook + hook = TornasoleHook(data_dir, save_all=True, include_collections=None, + save_config=SaveConfig(save_interval=999)) + + # the variables we want to save + saving_var_dict = {'w1': W1, 'b1': b1, 'w2': W2, 'b2': b2} + + # train the network for 1000 iterations + with tf.train.MonitoredSession(hooks=[hook]) as sess: + for i in range(1000): + x_in = np.random.normal(0, 1, (batch_size, 784)) + y_truth = np.random.randint(0, 2, (batch_size, 10)) + feed = {x: x_in, y: y_truth} + tensor_dict = {'opt': optimizer, 'w1': W1, 'b1': b1, 'w2': W2, 'b2': b2, 'loss': cross_entropy} + v = sess.run(tensor_dict, feed_dict = feed) + + # track, output loss. currently has random initialization, so it's not that useful + + loss += v['loss']/1000 + + # read saved weights from disk using summary iterator, verify if in-memory weights at end of training + # are identical to the ones we have saved using TornasoleHook + step_dir = EventFileLocation.get_step_dir_path(data_dir, 999) + files = os.listdir(step_dir) + print(v.keys()) + for f in files: + fr = FileReader(os.path.join(step_dir, f)) + for tupv in fr.read_tensors(): + (tensor_name, step, tensor_data, mode, mode_step) = tupv + if tensor_name in v: + assert np.allclose(tensor_data, v[tensor_name]) diff --git a/tests/tensorflow/hooks/utils.py b/tests/tensorflow/hooks/utils.py new file mode 100644 index 0000000000..3205f9c362 --- /dev/null +++ b/tests/tensorflow/hooks/utils.py @@ -0,0 +1,49 @@ +import tensorflow as tf +import numpy as np +from datetime import datetime +import os +from os.path import isfile, join +from tornasole.tensorflow import TornasoleOptimizer, TornasoleHook, SaveConfig, ReductionConfig, Collection, CollectionManager + +TORNASOLE_TF_HOOK_TESTS_DIR = '/tmp/tornasole_tf/tests/' + +def simple_model(hook, steps=10, lr=0.4): + # Network definition + with tf.name_scope('foobar'): + x = tf.placeholder(shape=(None, 2), dtype=tf.float32) + w = tf.Variable(initial_value=[[10.], [10.]], name='weight1') + with tf.name_scope('foobaz'): + w0 = [[1], [1.]] + y = tf.matmul(x, w0) + loss = tf.reduce_mean((tf.matmul(x, w) - y) ** 2, name="loss") + + global_step = tf.Variable(17, name="global_step", trainable=False) + increment_global_step_op = tf.assign(global_step, global_step + 1) + + optimizer = tf.train.AdamOptimizer(lr) + optimizer = TornasoleOptimizer(optimizer) + optimizer_op = optimizer.minimize(loss, global_step=increment_global_step_op) + + sess = tf.train.MonitoredSession(hooks=[hook]) + + for i in range(steps): + x_ = np.random.random((10, 2)) * 0.1 + _loss, opt, gstep = sess.run([loss, optimizer_op, increment_global_step_op], {x: x_}) + print(f'Step={i}, Loss={_loss}') + + sess.close() + +def get_dirs_files(path): + entries = os.listdir(path) + onlyfiles = [f for f in entries if isfile(join(path, f))] + subdirs = [x for x in entries if x not in onlyfiles] + return subdirs, onlyfiles + +def get_event_file_path_length(dir_path): + files = os.listdir(dir_path) + if len(files) > 0: + filepath = join(dir_path, files[0]) + return filepath, os.stat(filepath).st_size + else: + return None, 0 + diff --git a/tests/tensorflow/test_tf_collections.py b/tests/tensorflow/test_tf_collections.py new file mode 100644 index 0000000000..aa5fa678da --- /dev/null +++ b/tests/tensorflow/test_tf_collections.py @@ -0,0 +1,18 @@ +from tornasole.tensorflow import Collection, CollectionManager +from tornasole.tensorflow import add_to_collection, get_collection +import tensorflow as tf + +def test_manager_export_load(): + cm = CollectionManager() + cm.get('default').include('loss') + c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) + rc = tf.math.reduce_max(c) + cm.get('default').add_tensor(c) + cm.get('default').add_reduction_tensor(rc, c) + cm.add(Collection('trial1')) + cm.add('trial2') + cm.get('trial2').include('total_loss') + cm.export('cm.ts') + cm2 = CollectionManager.load('cm.ts') + assert cm == cm2 + diff --git a/tornasole/__init__.py b/tornasole/__init__.py new file mode 100644 index 0000000000..147262b524 --- /dev/null +++ b/tornasole/__init__.py @@ -0,0 +1,4 @@ +from tornasole.core.modes import ModeKeys as modes +from tornasole.core.save_config import SaveConfig +from tornasole.core.reduction_config import ReductionConfig + diff --git a/tornasole/analysis/__init__.py b/tornasole/analysis/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tornasole/analysis/utils.py b/tornasole/analysis/utils.py new file mode 100644 index 0000000000..4dfb8ff44d --- /dev/null +++ b/tornasole/analysis/utils.py @@ -0,0 +1,39 @@ +from contextlib import contextmanager + + +@contextmanager +def no_refresh(trials): + if isinstance(trials, list): + for trial in trials: + trial.dynamic_refresh = False + else: + trial = trials + trial.dynamic_refresh = False + + yield trials + + if isinstance(trials, list): + for trial in trials: + trial.dynamic_refresh = True + else: + trial = trials + trial.dynamic_refresh = True + + +@contextmanager +def refresh(trials): + if isinstance(trials, list): + for trial in trials: + trial.dynamic_refresh = True + else: + trial = trials + trial.dynamic_refresh = True + + yield trials + + if isinstance(trials, list): + for trial in trials: + trial.dynamic_refresh = False + else: + trial = trials + trial.dynamic_refresh = False diff --git a/tornasole/core/__init__.py b/tornasole/core/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tornasole_core/access_layer/__init__.py b/tornasole/core/access_layer/__init__.py similarity index 100% rename from tornasole_core/access_layer/__init__.py rename to tornasole/core/access_layer/__init__.py diff --git a/tornasole_core/access_layer/base.py b/tornasole/core/access_layer/base.py similarity index 87% rename from tornasole_core/access_layer/base.py rename to tornasole/core/access_layer/base.py index 3f0473191d..45d6156ca5 100644 --- a/tornasole_core/access_layer/base.py +++ b/tornasole/core/access_layer/base.py @@ -1,5 +1,5 @@ -import os -from tornasole_core.utils import get_logger +from tornasole.core.utils import get_logger + class TSAccessBase: def __init__(self): diff --git a/tornasole_core/access_layer/file.py b/tornasole/core/access_layer/file.py similarity index 99% rename from tornasole_core/access_layer/file.py rename to tornasole/core/access_layer/file.py index 4b8fbb8bb6..526bc85596 100644 --- a/tornasole_core/access_layer/file.py +++ b/tornasole/core/access_layer/file.py @@ -2,6 +2,7 @@ import os import shutil + def ensure_dir(file_path): directory = os.path.dirname(file_path) if directory and not os.path.exists(directory): diff --git a/tornasole_core/access_layer/s3.py b/tornasole/core/access_layer/s3.py similarity index 97% rename from tornasole_core/access_layer/s3.py rename to tornasole/core/access_layer/s3.py index f686795d73..a1fd0ffc6f 100644 --- a/tornasole_core/access_layer/s3.py +++ b/tornasole/core/access_layer/s3.py @@ -1,6 +1,7 @@ import boto3 import re -from tornasole_core.access_layer.base import TSAccessBase +from tornasole.core.access_layer.base import TSAccessBase + class TSAccessS3(TSAccessBase): def __init__(self, bucket_name, key_name, diff --git a/tornasole_core/access_layer/s3handler.py b/tornasole/core/access_layer/s3handler.py similarity index 99% rename from tornasole_core/access_layer/s3handler.py rename to tornasole/core/access_layer/s3handler.py index 1f6895c9ca..ecc6583006 100644 --- a/tornasole_core/access_layer/s3handler.py +++ b/tornasole/core/access_layer/s3handler.py @@ -1,6 +1,6 @@ import aioboto3 import asyncio -from tornasole_core.utils import is_s3, get_logger +from tornasole.core.utils import is_s3, get_logger import logging import time diff --git a/tornasole_core/access_layer/utils.py b/tornasole/core/access_layer/utils.py similarity index 91% rename from tornasole_core/access_layer/utils.py rename to tornasole/core/access_layer/utils.py index 0c5f1861f4..c6f5b4b5b4 100644 --- a/tornasole_core/access_layer/utils.py +++ b/tornasole/core/access_layer/utils.py @@ -2,11 +2,13 @@ from botocore.exceptions import ClientError from .file import TSAccessFile from .s3 import TSAccessS3 -from tornasole_core.utils import is_s3, get_logger -from tornasole_core.access_layer.s3handler import S3Handler, ListRequest +from tornasole.core.utils import is_s3, get_logger +from tornasole.core.access_layer.s3handler import S3Handler, ListRequest END_OF_JOB_FILENAME = "END_OF_JOB.ts" logger = get_logger() + + def training_has_ended(trial_prefix): file_path=os.path.join(trial_prefix, END_OF_JOB_FILENAME) s3, bucket_name, key_name = is_s3(file_path) @@ -18,6 +20,7 @@ def training_has_ended(trial_prefix): writer.write("end of training job") writer.close() + def has_training_ended(trial_prefix): file_path=os.path.join(trial_prefix, END_OF_JOB_FILENAME) s3, bucket_name, key_name = is_s3(file_path) diff --git a/tornasole/core/actions/__init__.py b/tornasole/core/actions/__init__.py new file mode 100644 index 0000000000..31d556d01a --- /dev/null +++ b/tornasole/core/actions/__init__.py @@ -0,0 +1,2 @@ +from .action_base import Action +from .terminate_smjob import TerminateSagemakerJob diff --git a/tornasole/core/actions/action_base.py b/tornasole/core/actions/action_base.py new file mode 100644 index 0000000000..107775d944 --- /dev/null +++ b/tornasole/core/actions/action_base.py @@ -0,0 +1,8 @@ +from tornasole.core.utils import get_logger + +class Action: + def __init__(self): + self.logger = get_logger() + + def run(self, rule_name, **kwargs): + pass \ No newline at end of file diff --git a/tornasole/core/actions/terminate_smjob.py b/tornasole/core/actions/terminate_smjob.py new file mode 100644 index 0000000000..edaa11053c --- /dev/null +++ b/tornasole/core/actions/terminate_smjob.py @@ -0,0 +1,20 @@ +from tornasole.core.sagemaker_utils import SageMakerUtils +from .action_base import Action + + +class TerminateSagemakerJob(Action): + def __init__(self, sm_job_name): + super().__init__() + self.job_name = sm_job_name + + def run(self, rule_name, **kwargs): + try: + # todo fix hardcoding of arn inside this function + SageMakerUtils.terminate_sagemaker_job(self.job_name) + # tags = [{'Key':"TerminatedBy", 'Value': rule_name} , + # {'Key':'TerminationTime', 'Value': str(time.time())} ] + # SageMakerUtils.add_tags(self.job_name, tags) + except Exception as e: + self.logger.warning("Caught exception when running TerminateSagemakerJob " + "action for smjob:{} Exception:{}".format(self.sm_job_name, e)) + diff --git a/tornasole_core/collection.py b/tornasole/core/collection.py similarity index 100% rename from tornasole_core/collection.py rename to tornasole/core/collection.py diff --git a/tornasole_core/collection_manager.py b/tornasole/core/collection_manager.py similarity index 100% rename from tornasole_core/collection_manager.py rename to tornasole/core/collection_manager.py diff --git a/tornasole_core/indexutils.py b/tornasole/core/indexutils.py similarity index 100% rename from tornasole_core/indexutils.py rename to tornasole/core/indexutils.py diff --git a/tornasole_core/modes.py b/tornasole/core/modes.py similarity index 99% rename from tornasole_core/modes.py rename to tornasole/core/modes.py index 3510be6353..7d7fc5ede5 100644 --- a/tornasole_core/modes.py +++ b/tornasole/core/modes.py @@ -1,5 +1,6 @@ from enum import Enum + # Note that Keras has similar concept of ModeKeys class ModeKeys(Enum): TRAIN = 1 #training/fitting mode diff --git a/tornasole_core/reader.py b/tornasole/core/reader.py similarity index 96% rename from tornasole_core/reader.py rename to tornasole/core/reader.py index 03a87c28bb..d88ce5111b 100644 --- a/tornasole_core/reader.py +++ b/tornasole/core/reader.py @@ -17,8 +17,8 @@ """APIs for logging data in the event file.""" -import time -from tornasole_core.tfevent.event_file_reader import EventFileReader +from tornasole.core.tfevent.event_file_reader import EventFileReader + class FileReader(): def __init__(self, fname, wtype='tfevent', verbose=True): diff --git a/tornasole_core/reduction_config.py b/tornasole/core/reduction_config.py similarity index 100% rename from tornasole_core/reduction_config.py rename to tornasole/core/reduction_config.py diff --git a/tornasole/core/reductions.py b/tornasole/core/reductions.py new file mode 100644 index 0000000000..4afb704a76 --- /dev/null +++ b/tornasole/core/reductions.py @@ -0,0 +1,32 @@ +import numpy as np +from tornasole.core.reduction_config import ALLOWED_REDUCTIONS, ALLOWED_NORMS + + +def get_numpy_reduction(reduction_name, numpy_data, abs=False): + if reduction_name not in ALLOWED_REDUCTIONS and reduction_name not in ALLOWED_NORMS: + raise ValueError('Invalid reduction type %s' % reduction_name) + + if abs: + numpy_data = np.absolute(numpy_data) + return get_basic_numpy_reduction(reduction_name, numpy_data) + + +def get_basic_numpy_reduction(reduction_name, numpy_data): + if reduction_name in ALLOWED_REDUCTIONS: + if reduction_name in ['min', 'max']: + return getattr(np, 'a' + reduction_name)(numpy_data) + elif reduction_name in ['mean', 'prod', 'std', 'sum','variance']: + if reduction_name == 'variance': reduction_name = 'var' + return getattr(np, reduction_name)(numpy_data) + elif reduction_name in ALLOWED_NORMS: + if reduction_name in ['l1', 'l2']: + ord = int(reduction_name[1]) + else: + ord = None + + if abs: + rv = np.linalg.norm(np.absolute(numpy_data), ord=ord) + else: + rv = np.linalg.norm(numpy_data, ord=ord) + return rv + return None \ No newline at end of file diff --git a/tornasole_core/sagemaker_utils.py b/tornasole/core/sagemaker_utils.py similarity index 98% rename from tornasole_core/sagemaker_utils.py rename to tornasole/core/sagemaker_utils.py index 3bffe7c00f..f1398e9f54 100644 --- a/tornasole_core/sagemaker_utils.py +++ b/tornasole/core/sagemaker_utils.py @@ -1,5 +1,6 @@ import boto3 -import botocore + + class SageMakerUtils: @staticmethod def is_sagemaker_job_finished(jobname, returnMock=None): diff --git a/tornasole_core/save_config.py b/tornasole/core/save_config.py similarity index 100% rename from tornasole_core/save_config.py rename to tornasole/core/save_config.py diff --git a/tornasole_core/save_manager.py b/tornasole/core/save_manager.py similarity index 100% rename from tornasole_core/save_manager.py rename to tornasole/core/save_manager.py diff --git a/tornasole/core/tensor.py b/tornasole/core/tensor.py new file mode 100644 index 0000000000..cb6460c6a4 --- /dev/null +++ b/tornasole/core/tensor.py @@ -0,0 +1,225 @@ +from tornasole.core.reductions import get_numpy_reduction +from tornasole.core.modes import ModeKeys +from tornasole.exceptions import * + +from enum import Enum + + +class StepState(Enum): + UNAVAILABLE = 0 + AVAILABLE = 1 + NOT_YET_AVAILABLE = 2 + + +class ModeSteps: + def __init__(self, mode): + self.mode = mode + self._steps = {} + + def steps(self): + ts = list(self._steps.keys()) + ts.sort(key=int) + return ts + + def has_step(self, step_num): + return step_num in self._steps + + def set_step_value(self, step_num, value): + if step_num not in self._steps: + self._steps[step_num] = Step(step_num, value) + else: + s = self._steps[step_num] + s.value = value + + def set_step_reduction_value(self, step_num, red_name, abs, red_value): + if step_num not in self._steps: + s = Step(step_num) + self._steps[step_num] = s + else: + s = self._steps[step_num] + s.set_reduction_value(red_name, abs, red_value) + + def step(self, step_num): + return self._steps[step_num] + + +class Step: + def __init__(self, step_num, value=None): + self.step_num = step_num + self._value = value + + # mapping from (red_name, abs) to value + self._reduction_values = {} + + @property + def value(self): + return self._value + + @value.setter + def value(self, value): + self._value = value + + @value.deleter + def value(self): + del self._value + + def reduction_values(self): + return self._reduction_values + + def reduction_value(self, red_name, abs): + if (red_name, abs) in self._reduction_values: + return self._reduction_values[(red_name, abs)] + + def set_reduction_value(self, red_name, abs, red_value): + self._reduction_values[(red_name, abs)] = red_value + + +# refreshing is always responsibility of tensor class at the highest level API function, +# not ModeSteps/Steps +class Tensor: + def __init__(self, name, trial): + self._mode_steps = {} + self.name = name + self.trial = trial + + def steps(self, mode=ModeKeys.GLOBAL): + self.trial.maybe_refresh(self.name) + if mode == ModeKeys.GLOBAL: + return self._global_steps() + elif mode in self._mode_steps: + return self._mode_steps[mode].steps() + else: + return None + + def _global_steps(self): + gs = [] + for mode in self._mode_steps: + ms = self._mode_steps[mode].steps() + for s in ms: + gs.append(self.trial.global_step(mode, s)) + gs.sort(key=int) + return gs + + def _has_step(self, step_num, mode=ModeKeys.GLOBAL): + if self._has_step_currently(step_num, mode): + return True + else: + self.trial.maybe_refresh(self.name) + if self._has_step_currently(step_num, mode): + return True + return False + + def _has_step_currently(self, step_num, mode): + if mode == ModeKeys.GLOBAL: + return self._has_global_step_currently(step_num) + else: + return self._has_mode_step_currently(step_num, mode) + + def _has_mode_step_currently(self, step_num, mode): + if mode in self._mode_steps: + if self._mode_steps[mode].has_step(step_num): + return True + return False + + def _has_global_step_currently(self, step_num): + # first check if in global mode, + if ModeKeys.GLOBAL in self._mode_steps: + if self._mode_steps[ModeKeys.GLOBAL].has_step(step_num): + return True + else: + # else convert to mode_step and check + mode, mode_step_num = self.trial.mode_modestep(step_num) + if mode in self._mode_steps and \ + self._mode_steps[mode].has_step(mode_step_num): + return True + return False + + def _get_step_currently(self, step_num, mode): + if mode == ModeKeys.GLOBAL and ModeKeys.GLOBAL in self._mode_steps \ + and self._mode_steps[ModeKeys.GLOBAL].has_step(step_num): + # step was saved as GLOBAL step + return self._mode_steps[mode].step(step_num) + else: + if mode == ModeKeys.GLOBAL: + # else convert to mode_step and check + mode, step_num = self.trial.mode_modestep(step_num) + if self._has_mode_step_currently(step_num, mode): + return self._mode_steps[mode].step(step_num) + return None + + def step(self, step_num, mode=ModeKeys.GLOBAL): + s = self._get_step_currently(step_num, mode) + if s is not None: + return s + else: + self.trial.maybe_refresh(self.name) + ss = self.trial.has_passed_step(step_num, mode) + if ss == StepState.AVAILABLE: + s = self._get_step_currently(step_num, mode) + if s is not None: + return s + raise TensorUnavailableForStep(self.name, step_num, mode) + elif ss == StepState.UNAVAILABLE: + raise StepUnavailable(step_num, mode) + elif ss == StepState.NOT_YET_AVAILABLE: + raise StepNotYetAvailable(step_num, mode) + + assert False, 'Should not happen' + + def value(self, step_num, mode=ModeKeys.GLOBAL): + # step refreshes + s = self.step(step_num=step_num, mode=mode) + if s.value is not None: + return s.value + else: + has_reductions = len(s.reduction_values()) > 0 + raise TensorUnavailableForStep(self.name, step_num, mode, has_reductions) + + def reduction_values(self, step_num, mode=ModeKeys.GLOBAL): + s = self.step(step_num=step_num, mode=mode) + if s is not None: + return s.reduction_values() + else: + assert False, 'Should not happen' + + def reduction_value(self, step_num, reduction_name, mode=ModeKeys.GLOBAL, abs=False): + """ + Returns the value of the reduction requested. + If the tensor was saved as a reduction, then just fetches that. + Else, tries to compute the reduction and returns. If the tensor value is not + available, returns None as reduction + + :param step_num: step number + :param mode: mode of job (train, eval, predict, etc). + If this is None, assumes step number is global + :param reduction_name: name of reduction + :param abs: boolean which represents whether reduction should + be applied on absolute value of the tensor or not + :return: reduction value requested as a float + """ + s = self.step(step_num=step_num, mode=mode) + rv = s.reduction_value(reduction_name, abs) + if rv is not None: + return rv + elif s.value is not None: + return get_numpy_reduction(reduction_name, s.value, abs) + + assert False, 'Should not happen' + + def _create_mode_step(self, mode, mode_step): + mode_step = int(mode_step) + if mode_step < 0: + raise ValueError('mode step number {} for tensor {} ' + 'can not be less than 0'.format(mode_step, self.name)) + if mode not in self._mode_steps: + self._mode_steps[mode] = ModeSteps(mode) + + def add_step(self, mode, mode_step, value): + self._create_mode_step(mode, mode_step) + self._mode_steps[mode].set_step_value(mode_step, value) + + def add_reduction_step(self, mode, mode_step, red_name, abs, red_value): + self._create_mode_step(mode, mode_step) + self._mode_steps[mode].set_step_reduction_value(mode_step, + red_name, abs, red_value) + diff --git a/tornasole_core/tfevent/__init__.py b/tornasole/core/tfevent/__init__.py similarity index 100% rename from tornasole_core/tfevent/__init__.py rename to tornasole/core/tfevent/__init__.py diff --git a/tornasole_core/tfevent/attr_value.proto b/tornasole/core/tfevent/attr_value.proto similarity index 94% rename from tornasole_core/tfevent/attr_value.proto rename to tornasole/core/tfevent/attr_value.proto index 7f42c4dc99..fba7d750d7 100644 --- a/tornasole_core/tfevent/attr_value.proto +++ b/tornasole/core/tfevent/attr_value.proto @@ -6,9 +6,9 @@ option java_outer_classname = "AttrValueProtos"; option java_multiple_files = true; option java_package = "org.tensorflow.framework"; -import "tornasole_core/tfevent/tensor.proto"; -import "tornasole_core/tfevent/tensor_shape.proto"; -import "tornasole_core/tfevent/types.proto"; +import "tornasole/core/tfevent/tensor.proto"; +import "tornasole/core/tfevent/tensor_shape.proto"; +import "tornasole/core/tfevent/types.proto"; // Protocol buffer representing the value for an attr used to configure an Op. // Comment indicates the corresponding attr type. Only the field matching the diff --git a/tornasole_core/tfevent/event.proto b/tornasole/core/tfevent/event.proto similarity index 97% rename from tornasole_core/tfevent/event.proto rename to tornasole/core/tfevent/event.proto index ed278ff82d..3bbae60aa4 100644 --- a/tornasole_core/tfevent/event.proto +++ b/tornasole/core/tfevent/event.proto @@ -6,7 +6,7 @@ option java_outer_classname = "EventProtos"; option java_multiple_files = true; option java_package = "org.tensorflow.util"; -import "tornasole_core/tfevent/summary.proto"; +import "tornasole/core/tfevent/summary.proto"; // Protocol buffer representing an event that happened during // the execution of a Brain model. diff --git a/tornasole_core/tfevent/event_file_reader.py b/tornasole/core/tfevent/event_file_reader.py similarity index 95% rename from tornasole_core/tfevent/event_file_reader.py rename to tornasole/core/tfevent/event_file_reader.py index 3d943e14a1..91396f3a65 100644 --- a/tornasole_core/tfevent/event_file_reader.py +++ b/tornasole/core/tfevent/event_file_reader.py @@ -17,18 +17,14 @@ """Reads events from disk.""" -import tornasole_core.tfevent.types_pb2 as types_pb2 +import tornasole.core.tfevent.types_pb2 as types_pb2 import logging import numpy as np -import os.path -import time - from .event_pb2 import Event -from .summary_pb2 import Summary, SummaryMetadata -from tornasole_core.tfrecord.record_reader import RecordReader -from tornasole_core.modes import ModeKeys, MODE_STEP_PLUGIN_NAME, MODE_PLUGIN_NAME +from tornasole.core.tfrecord.record_reader import RecordReader +from tornasole.core.modes import ModeKeys, MODE_STEP_PLUGIN_NAME, MODE_PLUGIN_NAME #todo: remove this logger perhaps diff --git a/tornasole_core/tfevent/event_file_writer.py b/tornasole/core/tfevent/event_file_writer.py similarity index 97% rename from tornasole_core/tfevent/event_file_writer.py rename to tornasole/core/tfevent/event_file_writer.py index 4ff8a43fa2..da010c20c6 100644 --- a/tornasole_core/tfevent/event_file_writer.py +++ b/tornasole/core/tfevent/event_file_writer.py @@ -23,18 +23,17 @@ import socket import threading import time -import uuid import os import six from .event_pb2 import Event from .summary_pb2 import Summary, SummaryMetadata -from tornasole_core.tfrecord.record_writer import RecordWriter +from tornasole.core.tfrecord.record_writer import RecordWriter from .util import make_tensor_proto, EventFileLocation -from tornasole_core.indexutils import * -from tornasole_core.tfevent.index_file_writer import IndexWriter, IndexArgs -from tornasole_core.utils import is_s3, get_logger -from tornasole_core.modes import ModeKeys, MODE_STEP_PLUGIN_NAME, MODE_PLUGIN_NAME +from tornasole.core.indexutils import * +from tornasole.core.tfevent.index_file_writer import IndexWriter, IndexArgs +from tornasole.core.utils import is_s3, get_logger +from tornasole.core.modes import ModeKeys, MODE_STEP_PLUGIN_NAME, MODE_PLUGIN_NAME logging.basicConfig() diff --git a/tornasole_core/tfevent/graph.proto b/tornasole/core/tfevent/graph.proto similarity index 95% rename from tornasole_core/tfevent/graph.proto rename to tornasole/core/tfevent/graph.proto index 33b77b4a0f..c3a604a054 100644 --- a/tornasole_core/tfevent/graph.proto +++ b/tornasole/core/tfevent/graph.proto @@ -6,8 +6,8 @@ option java_outer_classname = "GraphProtos"; option java_multiple_files = true; option java_package = "org.tensorflow.framework"; -import "tornasole_core/tfevent/node_def.proto"; -import "tornasole_core/tfevent/versions.proto"; +import "tornasole/core/tfevent/node_def.proto"; +import "tornasole/core/tfevent/versions.proto"; // Represents the graph of operations message GraphDef { diff --git a/tornasole_core/tfevent/index_file_writer.py b/tornasole/core/tfevent/index_file_writer.py similarity index 89% rename from tornasole_core/tfevent/index_file_writer.py rename to tornasole/core/tfevent/index_file_writer.py index d3437c1050..ed25497b16 100644 --- a/tornasole_core/tfevent/index_file_writer.py +++ b/tornasole/core/tfevent/index_file_writer.py @@ -1,6 +1,6 @@ -from tornasole_core.access_layer.file import TSAccessFile -from tornasole_core.access_layer.s3 import TSAccessS3 -from tornasole_core.utils import is_s3 +from tornasole.core.access_layer.file import TSAccessFile +from tornasole.core.access_layer.s3 import TSAccessS3 +from tornasole.core.utils import is_s3 class IndexWriter(object): def __init__(self, file_path): diff --git a/tornasole_core/tfevent/node_def.proto b/tornasole/core/tfevent/node_def.proto similarity index 98% rename from tornasole_core/tfevent/node_def.proto rename to tornasole/core/tfevent/node_def.proto index bdaad4147a..8167e79116 100644 --- a/tornasole_core/tfevent/node_def.proto +++ b/tornasole/core/tfevent/node_def.proto @@ -6,7 +6,7 @@ option java_outer_classname = "NodeProto"; option java_multiple_files = true; option java_package = "org.tensorflow.framework"; -import "tornasole_core/tfevent/attr_value.proto"; +import "tornasole/core/tfevent/attr_value.proto"; message NodeDef { // The name given to this operator. Used for naming inputs, diff --git a/tornasole_core/tfevent/resource_handle.proto b/tornasole/core/tfevent/resource_handle.proto similarity index 100% rename from tornasole_core/tfevent/resource_handle.proto rename to tornasole/core/tfevent/resource_handle.proto diff --git a/tornasole_core/tfevent/summary.proto b/tornasole/core/tfevent/summary.proto similarity index 98% rename from tornasole_core/tfevent/summary.proto rename to tornasole/core/tfevent/summary.proto index bd74c9ae04..b5332f23c6 100644 --- a/tornasole_core/tfevent/summary.proto +++ b/tornasole/core/tfevent/summary.proto @@ -6,7 +6,7 @@ option java_outer_classname = "SummaryProtos"; option java_multiple_files = true; option java_package = "org.tensorflow.framework"; -import "tornasole_core/tfevent/tensor.proto"; +import "tornasole/core/tfevent/tensor.proto"; // Metadata associated with a series of Summary data message SummaryDescription { diff --git a/tornasole_core/tfevent/summary_to_event.txt b/tornasole/core/tfevent/summary_to_event.txt similarity index 100% rename from tornasole_core/tfevent/summary_to_event.txt rename to tornasole/core/tfevent/summary_to_event.txt diff --git a/tornasole_core/tfevent/tensor.proto b/tornasole/core/tfevent/tensor.proto similarity index 94% rename from tornasole_core/tfevent/tensor.proto rename to tornasole/core/tfevent/tensor.proto index fe9ebd6eaf..898f2f693f 100644 --- a/tornasole_core/tfevent/tensor.proto +++ b/tornasole/core/tfevent/tensor.proto @@ -6,9 +6,9 @@ option java_outer_classname = "TensorProtos"; option java_multiple_files = true; option java_package = "org.tensorflow.framework"; -import "tornasole_core/tfevent/resource_handle.proto"; -import "tornasole_core/tfevent/tensor_shape.proto"; -import "tornasole_core/tfevent/types.proto"; +import "tornasole/core/tfevent/resource_handle.proto"; +import "tornasole/core/tfevent/tensor_shape.proto"; +import "tornasole/core/tfevent/types.proto"; // Protocol buffer representing a tensor. message TensorProto { diff --git a/tornasole_core/tfevent/tensor_shape.proto b/tornasole/core/tfevent/tensor_shape.proto similarity index 100% rename from tornasole_core/tfevent/tensor_shape.proto rename to tornasole/core/tfevent/tensor_shape.proto diff --git a/tornasole_core/tfevent/types.proto b/tornasole/core/tfevent/types.proto similarity index 100% rename from tornasole_core/tfevent/types.proto rename to tornasole/core/tfevent/types.proto diff --git a/tornasole_core/tfevent/util.py b/tornasole/core/tfevent/util.py similarity index 96% rename from tornasole_core/tfevent/util.py rename to tornasole/core/tfevent/util.py index 955b36f06e..8c444c28b7 100644 --- a/tornasole_core/tfevent/util.py +++ b/tornasole/core/tfevent/util.py @@ -2,8 +2,8 @@ from .tensor_shape_pb2 import TensorShapeProto import numpy as np import os, re -from tornasole_core.utils import get_immediate_subdirectories -from tornasole_core.utils import get_logger +from tornasole.core.utils import get_immediate_subdirectories +from tornasole.core.utils import get_logger logger = get_logger() diff --git a/tornasole_core/tfevent/versions.proto b/tornasole/core/tfevent/versions.proto similarity index 100% rename from tornasole_core/tfevent/versions.proto rename to tornasole/core/tfevent/versions.proto diff --git a/tornasole/core/tfrecord/__init__.py b/tornasole/core/tfrecord/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tornasole_core/tfrecord/_crc32c.py b/tornasole/core/tfrecord/_crc32c.py similarity index 100% rename from tornasole_core/tfrecord/_crc32c.py rename to tornasole/core/tfrecord/_crc32c.py diff --git a/tornasole_core/tfrecord/record_reader.py b/tornasole/core/tfrecord/record_reader.py similarity index 94% rename from tornasole_core/tfrecord/record_reader.py rename to tornasole/core/tfrecord/record_reader.py index 3766256a57..18ec46a0c2 100644 --- a/tornasole_core/tfrecord/record_reader.py +++ b/tornasole/core/tfrecord/record_reader.py @@ -17,10 +17,10 @@ import struct from ._crc32c import crc32c -from tornasole_core.access_layer.file import TSAccessFile -from tornasole_core.access_layer.s3 import TSAccessS3 -from tornasole_core.utils import is_s3 -from tornasole_core.tfrecord.record_writer import CHECKSUM_MAGIC_BYTES +from tornasole.core.access_layer.file import TSAccessFile +from tornasole.core.access_layer.s3 import TSAccessS3 +from tornasole.core.utils import is_s3 +from tornasole.core.tfrecord.record_writer import CHECKSUM_MAGIC_BYTES class RecordReader: """Read records in the following format for a single record event_str: diff --git a/tornasole_core/tfrecord/record_writer.py b/tornasole/core/tfrecord/record_writer.py similarity index 95% rename from tornasole_core/tfrecord/record_writer.py rename to tornasole/core/tfrecord/record_writer.py index cd489ffed3..c7291afd39 100644 --- a/tornasole_core/tfrecord/record_writer.py +++ b/tornasole/core/tfrecord/record_writer.py @@ -19,10 +19,9 @@ import struct from ._crc32c import crc32c -from tornasole_core.access_layer.file import TSAccessFile -from tornasole_core.access_layer.s3 import TSAccessS3 -from tornasole_core.utils import is_s3 -import os +from tornasole.core.access_layer.file import TSAccessFile +from tornasole.core.access_layer.s3 import TSAccessS3 +from tornasole.core.utils import is_s3 CHECKSUM_MAGIC_BYTES = b'0x12345678' diff --git a/tornasole_core/tfrecord/tensor_reader.py b/tornasole/core/tfrecord/tensor_reader.py similarity index 86% rename from tornasole_core/tfrecord/tensor_reader.py rename to tornasole/core/tfrecord/tensor_reader.py index ffa9c3a42c..13677c10fb 100644 --- a/tornasole_core/tfrecord/tensor_reader.py +++ b/tornasole/core/tfrecord/tensor_reader.py @@ -1,11 +1,9 @@ import struct -from tornasole_core.tfevent.event_pb2 import Event -from tornasole_core.tfevent.event_file_reader import get_tensor_data -from tornasole_core.tfevent.util import make_tensor_proto -from tornasole_core.tfevent.summary_pb2 import Summary, SummaryMetadata -from tornasole_core.tfrecord.record_reader import masked_crc32c, u32 -from tornasole_core.tfrecord.record_writer import CHECKSUM_MAGIC_BYTES -from tornasole_core.modes import ModeKeys, MODE_PLUGIN_NAME, MODE_STEP_PLUGIN_NAME +from tornasole.core.tfevent.event_pb2 import Event +from tornasole.core.tfevent.event_file_reader import get_tensor_data +from tornasole.core.tfrecord.record_reader import masked_crc32c +from tornasole.core.tfrecord.record_writer import CHECKSUM_MAGIC_BYTES +from tornasole.core.modes import ModeKeys, MODE_PLUGIN_NAME, MODE_STEP_PLUGIN_NAME class TensorReader: def __init__(self, data): diff --git a/tornasole_core/utils.py b/tornasole/core/utils.py similarity index 92% rename from tornasole_core/utils.py rename to tornasole/core/utils.py index 3225b543c5..6fba25c95d 100644 --- a/tornasole_core/utils.py +++ b/tornasole/core/utils.py @@ -1,6 +1,7 @@ import os import re import logging +import bisect from botocore.exceptions import ClientError @@ -81,7 +82,7 @@ def is_s3(path): def check_dir_exists(path): - from tornasole_core.access_layer.s3handler import S3Handler, ListRequest + from tornasole.core.access_layer.s3handler import S3Handler, ListRequest s3, bucket_name, key_name = is_s3(path) if s3: try: @@ -109,4 +110,11 @@ def match_inc(tname, include): for inc in include: if re.search(inc, tname): return True - return False \ No newline at end of file + return False + + +def index(sorted_list, elem): + i = bisect.bisect_left(sorted_list, elem) + if i != len(sorted_list) and sorted_list[i] == elem: + return i + raise ValueError \ No newline at end of file diff --git a/tornasole_core/writer.py b/tornasole/core/writer.py similarity index 96% rename from tornasole_core/writer.py rename to tornasole/core/writer.py index 966a6482c8..bfc839c761 100644 --- a/tornasole_core/writer.py +++ b/tornasole/core/writer.py @@ -17,10 +17,9 @@ """APIs for logging data in the event file.""" -import time -from tornasole_core.tfevent.event_file_writer import EventFileWriter +from tornasole.core.tfevent.event_file_writer import EventFileWriter import socket -from tornasole_core.modes import ModeKeys +from .modes import ModeKeys class FileWriter(): def __init__(self, logdir, trial, step, worker=None, rank=0, part=0, diff --git a/tornasole/exceptions.py b/tornasole/exceptions.py new file mode 100644 index 0000000000..8dd651b976 --- /dev/null +++ b/tornasole/exceptions.py @@ -0,0 +1,54 @@ +from tornasole.core.modes import ModeKeys as modes + + +class StepNotYetAvailable(Exception): + def __init__(self, step, mode): + self.step = step + self.mode = mode + + def __str__(self): + return 'Step {} of mode {} not yet available'.format(self.step, self.mode) + + +class StepUnavailable(Exception): + def __init__(self, step, mode): + self.step = step + self.mode = mode + + def __str__(self): + return 'Step {} of mode {} is not available as it was not saved'\ + .format(self.step, self.mode) + + +class TensorUnavailableForStep(Exception): + def __init__(self, tname, step, mode=modes.GLOBAL, has_reductions=False): + self.step = step + self.mode = mode + self.tname = tname + self.has_reductions = has_reductions + + def __str__(self): + msg = 'Value for tensor {} is not available for step {} ' \ + 'with mode {} as it was not saved.' \ + ''.format(self.tname, self.step, self.mode.name) + if self.has_reductions: + msg += 'This tensor has reductions saved for this step. ' \ + 'You might want to query for the reductions.' + return msg + +class TensorUnavailable(Exception): + def __init__(self, tname): + self.tname = tname + + def __str__(self): + return 'Tensor {} can not be satisfied. Tornasole does ' \ + 'not know about this tensor.'.format(self.tname) + + +class NoMoreData(Exception): + pass + + +class RuleEvaluationConditionMet(Exception): + def __str__(self): + return 'Rule evaluation resulted in the condition being met' diff --git a/tornasole/mxnet/__init__.py b/tornasole/mxnet/__init__.py new file mode 100644 index 0000000000..cb369d4404 --- /dev/null +++ b/tornasole/mxnet/__init__.py @@ -0,0 +1,5 @@ +from .hook import TornasoleHook +from .mxnet_collection import Collection, CollectionManager +from .mxnet_collection import get_collections, get_collection, get_collection_manager, load_collections, add_to_collection, add_to_default_collection, reset_collections +from tornasole import SaveConfig, ReductionConfig +from tornasole import modes \ No newline at end of file diff --git a/tornasole/mxnet/hook.py b/tornasole/mxnet/hook.py new file mode 100644 index 0000000000..053fa9f5f3 --- /dev/null +++ b/tornasole/mxnet/hook.py @@ -0,0 +1,264 @@ +import mxnet as mx +from tornasole.core.writer import FileWriter +from tornasole.core.save_config import SaveConfig +from tornasole.core.save_manager import SaveManager +from tornasole.core.modes import ModeKeys, ALLOWED_MODES +from tornasole.core.utils import check_dir_exists, get_logger, flatten, is_s3, get_reduction_tensor_name +from tornasole.core.access_layer.utils import training_has_ended +from .mxnet_collection import get_collection_manager, get_collection +from .util import get_aggregated_data, make_numpy_array +import re as _re +import logging +import os + + +logger = get_logger() +import atexit + +INVALID_TAG_CHARACTERS = _re.compile(r'[^-/\w\.]') +COLLECTION_FILE_NAME = 'collections.ts' +DEFAULT_WORKER_NAME = 'worker0' +INPUT_TENSOR_SUFFIX = '_input_' +OUTPUT_TENSOR_SUFFIX = '_output' +GRADIENT_PREFIX = 'gradient/' + + +def default_save_config(): + return SaveConfig() + + +class TornasoleHook: + def __init__(self, + out_dir, + dry_run=False, + worker=DEFAULT_WORKER_NAME, + reduction_config=None, + save_config=default_save_config(), + include_regex=None, + include_collections=['weights', 'bias','gradients', 'default'], + save_all=False): + if not is_s3(out_dir)[0]: + out_dir = os.path.expanduser(out_dir) + check_dir_exists(out_dir) + self.out_dir = out_dir + self.out_base_dir = os.path.dirname(out_dir) + self.run_id = os.path.basename(out_dir) + self.include_collections = include_collections + + self.dry_run = dry_run + self.worker = worker + + self.mode = ModeKeys.GLOBAL + self.mode_steps = {ModeKeys.GLOBAL: -1} + self.local_reductions = [] + self.step = -1 + self.is_recursive = False + self.export_only_once = True + self.last_saved_step = -1 + self.writer = None + self.export_collections = True + self._initialize_collectors(save_all, include_regex) + + atexit.register(self.cleanup) + self.last_block = None + # dictionary of collections that need to be saved in a particular step. + self.collections_in_this_step = None + + self.save_manager = SaveManager(collection_manager=get_collection_manager(), + include_collections_names=self.include_collections, + default_save_config=save_config, + default_reduction_config=reduction_config) + self.prepared_save_manager = False + logger.info('Saving to {}'.format(self.out_dir)) + + def _initialize_collectors(self, save_all, include_regex): + # If user has provided any include_regex, add them to a default collection. + if include_regex is not None: + get_collection('default').include(include_regex) + if 'default' not in self.include_collections: + self.include_collections.append('default') + # If save all is set, create a collector that can save all the tensors + if save_all : + get_collection('all').include([".*"]) + self.include_collections.append('all') + + def set_mode(self, mode): + if mode in ALLOWED_MODES: + self.mode = mode + else: + raise ValueError('Invalid mode {}. Valid modes are {}.' + .format(mode, ','.join(ALLOWED_MODES))) + + if mode not in self.mode_steps: + self.mode_steps[mode] = -1 + + def cleanup(self): + if logger is not None: + logger.debug("Cleanup") + if self.last_saved_step != -1: + get_collection_manager().export_manager(os.path.join(self.out_dir, COLLECTION_FILE_NAME)) + self.export_only_once = False + # Write the gradients of the past step if the writer is still available. + if self.writer is not None: + if self.last_block is not None: + params = self.last_block.collect_params().values() + for param in params: + self.log_param(param) + self.writer.flush() + self.writer.close() + training_has_ended(self.out_dir) + + # Check whether we should log this tensor + def _check_tensor_to_be_logged(self, name): + ss = self.save_manager.should_save_tensor(tensorname=name, mode=self.mode, + step=self.mode_steps[self.mode]) + return ss['step'] + + def _process_step(self): + # returns dictionary of dictionaries: coll_name -> {step: True/False, when_nan: True/False} + # there will be no entry in dictionary for collections where both step and when_nan are False + # This dictionary is stored in self.collections_in_this_step so that we do not need to call this + # function in every forward_hook (recursive) invocation for a given step. + self.collections_in_this_step = self.save_manager.collections_to_save(self.mode, self.mode_steps[self.mode]) + return self.collections_in_this_step + + # This hook is invoked by trainer prior to running the forward pass. + def forward_pre_hook(self, block, input): + # Write the gradients of the past step if the writer is still available. + if self.writer is not None: + params = block.collect_params().values() + for param in params: + self.log_param(param) + self.writer.flush() + self.writer.close() + self.writer = None + + if not self.prepared_save_manager: + # at this point we need all collections to be ready + # this may not be the case at creation of hook + # as user's code after hook might add collections + self.save_manager.prepare() + self.prepared_save_manager = True + + self.mode_steps[self.mode] += 1 + self.step += 1 + logger.debug("Setting the global step to be {0}".format(self.step)) + + # Reset the collections to be saved in this step to be None. + self.collections_in_this_step = None + if self._process_step(): + self.writer = FileWriter(logdir=self.out_base_dir, + trial=self.run_id, + step=self.step, + worker=self.worker) + + if self.last_saved_step != -1 and self.export_only_once: + get_collection_manager().export_manager(os.path.join(self.out_dir, COLLECTION_FILE_NAME)) + self.export_only_once = False + self.last_block = block + + # This hook is invoked by trainer after running the forward pass. + def forward_hook(self, block, input, output): + if not self.collections_in_this_step: + logging.debug("Skipping the global step {0}".format(self.step)) + return + + block_name = block.name + logger.debug("Processing the global step {0} for block {1}".format(self.step, block_name)) + + # Output input tensor + self.log_inputs_to_block(block_name, input) + + # Output output tensors + self.log_outputs_of_block(block_name, output) + self.last_saved_step = self.step + + def _log_ndarray_from_col(self, block_name, var, tensor_suffix, idx): + if var.__class__.__name__ is "NDArray": + self.log_tensor(tensor_name=block_name + tensor_suffix + str(idx), tensor_value=var) + return idx+1 + elif isinstance(var, tuple) or isinstance(var, list): + for val in var: + idx = self._log_ndarray_from_col(block_name, val, tensor_suffix, idx) + else: + logger.warning("output is not ndarray or list of ndarrays, bname:{} output_class:{}".format(block_name, +var.__class__.__name__)) + return idx + + def log_inputs_to_block(self, block_name, input): + idx = 0 + self._log_ndarray_from_col(block_name, input, INPUT_TENSOR_SUFFIX, idx) + + def log_outputs_of_block(self, block_name, output): + idx = 0 + self._log_ndarray_from_col(block_name, output, OUTPUT_TENSOR_SUFFIX, idx) + + def log_param(self, param): + self.log_tensor(tensor_name=param.name, tensor_value=param.data(param.list_ctx()[0])) + # If Gradient for this param is available + if param.grad_req != 'null': + self.log_tensor(tensor_name=GRADIENT_PREFIX + param.name, + tensor_value=param.grad(param.list_ctx()[0])) + + def log_tensor(self, tensor_name, tensor_value): + if self.dry_run or not self._check_tensor_to_be_logged(tensor_name): + return + + # Get the collection to which this tensor belongs + save_colls = self.save_manager.from_collections(tensor_name) + for s_col in save_colls: + if s_col.name in self.collections_in_this_step.keys(): + reduce_config = s_col.get_reduction_config() + if reduce_config: + abs = False + for reduction in reduce_config.reductions + reduce_config.abs_reductions + reduce_config.norms + \ + reduce_config.abs_norms: + if reduction in reduce_config.abs_reductions or reduction in reduce_config.abs_norms: + abs = True + reduction_tensor_name = get_reduction_tensor_name(tensor_name, reduction, abs) + tensor_data = get_aggregated_data(reduction, tensor_value, tensor_name, abs) + tensor_value_np = make_numpy_array(tensor_data) + self.writer.write_tensor(tdata=tensor_value_np, tname=reduction_tensor_name, + mode=self.mode, mode_step=self.mode_steps[self.mode]) + s_col.add_reduction_tensor_name(tensor_name) + return + else: + tensor_value = make_numpy_array(tensor_value) + self.writer.write_tensor(tdata=tensor_value, tname=tensor_name, + mode=self.mode, mode_step=self.mode_steps[self.mode]) + return + + # This function is "applied" to every child in the block. This function in turn + # registers the forward hook to each block. It helps logging the input output tensors + # of that block. + def _recursive_apply(self, block): + block.register_forward_hook(self.forward_hook) + + # This function registers the forward hook. If user wants to register the hook + # for every child in the given block, then the function calls "apply" API for + # registration of the hook. + # The hook is registered recursively, if user has specified the collections that are more than + # the default collectors viz. gradients, weight and bias + def register_hook(self, block): + self.is_recursive=True + if not isinstance(block, mx.gluon.Block): + logger.error("The given block type {0} is not " + "currently supported by Tornasole Hook" + .format(block.__class__.__name__)) + return + block.register_forward_pre_hook(self.forward_pre_hook) + if self.is_recursive: + block.apply(self._recursive_apply) + else: + block.register_forward_hook(self.forward_hook) + + @staticmethod + def clean_tag(name): + if name is not None: + new_name = INVALID_TAG_CHARACTERS.sub('_', name) + new_name = new_name.lstrip('/') # Remove leading slashes + if new_name != name: + logging.warning('Summary name %s is illegal; using %s instead.', name, new_name) + name = new_name + return name + diff --git a/tornasole/mxnet/mxnet_collection.py b/tornasole/mxnet/mxnet_collection.py new file mode 100644 index 0000000000..b44f239b22 --- /dev/null +++ b/tornasole/mxnet/mxnet_collection.py @@ -0,0 +1,85 @@ +from tornasole.core.collection import Collection as BaseCollection +from tornasole.core.collection_manager import CollectionManager as BaseCollectionManager + + +class Collection(BaseCollection): + def add_block_tensors(self, block, inputs=False, outputs=False): + if inputs: + input_tensor_regex = block.name + "_input_*" + self.include(input_tensor_regex) + if outputs: + output_tensor_regex = block.name + "_output" + self.include(output_tensor_regex) + + +class CollectionManager(BaseCollectionManager): + def __init__(self, create_default=True): + super().__init__() + if create_default: + self._register_default_collections() + + def create_collection(self, name): + self.collections[name] = Collection(name) + + def _register_default_collections(self): + weight_collection = Collection('weights', include_regex=['^(?!gradient).*weight']) + bias_collection = Collection('bias', include_regex=['^(?!gradient).*bias']) + gradient_collection = Collection('gradients', include_regex=['^gradient']) + self.add(gradient_collection) + self.add(weight_collection) + self.add(bias_collection) + + @staticmethod + def load(filename): + cm = CollectionManager(create_default=False) + with open(filename, 'r') as f: + line = f.readline() + while line: + c = Collection.load(line.rstrip()) + cm.add(c) + line = f.readline() + return cm + + @staticmethod + def load_from_string(s): + cm = CollectionManager(create_default=False) + lines = s.split('\n') + for line in lines: + c = Collection.load(line.rstrip()) + cm.add(c) + return cm + + def export_manager(self, path): + self.export(path) + + +_collection_manager = CollectionManager() + +def load_collections(path): + global _collection_manager + _collection_manager = CollectionManager.load(path) + +def reset_collections(): + global _collection_manager + del _collection_manager + _collection_manager = CollectionManager() + +def add_to_collection(collection_name, args): + get_collection(collection_name).add(args) + +def get_collection_manager(): + return _collection_manager + +def add_to_default_collection(args): + add_to_collection('default', args) + +def get_collection(collection_name): + try: + c = _collection_manager.get(collection_name) + except KeyError: + _collection_manager.create_collection(collection_name) + c = _collection_manager.get(collection_name) + return c + +def get_collections(): + return _collection_manager.collections diff --git a/tornasole/mxnet/util.py b/tornasole/mxnet/util.py new file mode 100644 index 0000000000..f5ca2005d8 --- /dev/null +++ b/tornasole/mxnet/util.py @@ -0,0 +1,50 @@ +import mxnet as mx +import numpy as np +from mxnet.ndarray import NDArray +from tornasole.core.reduction_config import ALLOWED_REDUCTIONS, ALLOWED_NORMS +from tornasole.core.reductions import get_numpy_reduction + + +def get_aggregated_data(aggregation_name, + tensor_data, tensor_name, abs=False): + reduction_name = aggregation_name + if isinstance(tensor_data, np.ndarray): + return get_numpy_reduction(reduction_name, + tensor_data, abs) + if abs: + tensor_data = mx.ndarray.abs(tensor_data) + + if reduction_name in ALLOWED_REDUCTIONS: + assert hasattr(mx.ndarray, aggregation_name) + f = getattr(mx.ndarray, aggregation_name) + op = f(tensor_data, name=tensor_name) + return op + elif reduction_name in ALLOWED_NORMS: + if reduction_name is "l1": + op = mx.ndarray.norm(data=tensor_data, ord=1) + return op + elif reduction_name is "l2": + op = mx.ndarray.norm(data=tensor_data, ord=2) + return op + else: + raise RuntimeError("Invalid normalization operation {0} for mx.NDArray".format(reduction_name)) + elif hasattr(mx, reduction_name): + f = getattr(mx, reduction_name) + op = f(tensor_data, name=tensor_name) + return op + raise RuntimeError("Invalid aggregation_name {0} for mx.NDArray".format(aggregation_name)) + + +def make_numpy_array(x): + if isinstance(x, np.ndarray): + return x + elif np.isscalar(x): + return np.array([x]) + elif isinstance(x, NDArray): + return x.asnumpy() + elif isinstance(x, tuple): + # todo: fix this, will crash + return np.asarray(x, dtype=x.dtype) + else: + raise TypeError('_make_numpy_array only accepts input types of numpy.ndarray, scalar,' + ' and MXNet NDArray, while received type {}'.format(str(type(x)))) \ No newline at end of file diff --git a/tornasole/pytorch/__init__.py b/tornasole/pytorch/__init__.py new file mode 100644 index 0000000000..30a20014a7 --- /dev/null +++ b/tornasole/pytorch/__init__.py @@ -0,0 +1,8 @@ +from .hook import TornasoleHook +from .torch_collection import Collection, CollectionManager + +from .torch_collection import get_collections, get_collection, \ + load_collections, \ + add_to_collection, add_to_default_collection, reset_collections +from tornasole import SaveConfig, ReductionConfig +from tornasole import modes diff --git a/tornasole/pytorch/hook.py b/tornasole/pytorch/hook.py new file mode 100644 index 0000000000..5426007fae --- /dev/null +++ b/tornasole/pytorch/hook.py @@ -0,0 +1,273 @@ +import torch +from tornasole.core.writer import FileWriter +from tornasole.core.save_config import SaveConfig +from tornasole.core.save_manager import SaveManager +from tornasole.core.modes import ModeKeys, ALLOWED_MODES +from tornasole.core.utils import check_dir_exists, get_logger, flatten, is_s3, get_reduction_tensor_name +from tornasole.pytorch.torch_collection import get_collection_manager, get_collection +from tornasole.pytorch.util import get_aggregated_data, make_numpy_array +from tornasole.core.access_layer.utils import training_has_ended + +import re as _re +import logging +import os + +logger = get_logger() +import atexit + +INVALID_TAG_CHARACTERS = _re.compile(r'[^-/\w\.]') +COLLECTION_FILE_NAME = 'collections.ts' +DEFAULT_WORKER_NAME = 'worker0' +INPUT_TENSOR_SUFFIX = '_input_' +OUTPUT_TENSOR_SUFFIX = '_output' +GRADIENT_PREFIX = 'gradient/' + + +def default_save_config(): + return SaveConfig() + + +class TornasoleHook: + def __init__(self, + out_dir, + dry_run=False, + worker=DEFAULT_WORKER_NAME, + reduction_config=None, + save_config=default_save_config(), + include_regex=None, + include_collections=['weights', 'bias', 'gradients', 'default'], + save_all=False): + if not is_s3(out_dir)[0]: + out_dir = os.path.expanduser(out_dir) + check_dir_exists(out_dir) + self.out_dir = out_dir + self.out_base_dir = os.path.dirname(out_dir) + self.run_id = os.path.basename(out_dir) + self.include_collections = include_collections + + self.dry_run = dry_run + self.worker = worker + + self.mode = ModeKeys.GLOBAL + self.mode_steps = {ModeKeys.GLOBAL: -1} + # self.local_reductions = [] + self.reduction_config = reduction_config + self.step = -1 + self.is_recursive = False + self.export_only_once = True + self.last_saved_step = -1 + self.writer = None + self._initialize_collectors(save_all, include_regex) + + # dictionary of collections that need to be saved in a particular step. + self.collections_in_this_step = None + # mapping of module objects to their names, useful in forward hook for logging input/output of modules + self.module_maps = dict() + self.exported_collection = False + + atexit.register(self.cleanup) + + self.save_manager = SaveManager(collection_manager=get_collection_manager(), + include_collections_names=self.include_collections, + default_save_config=save_config, + default_reduction_config=reduction_config) + self.prepared_save_manager = False + logger.info('Saving to {}'.format(self.out_dir)) + + def _initialize_collectors(self, save_all, include_regex): + # If user has provided any include_regex, add them to a default collection. + if include_regex is not None: + get_collection('default').include(include_regex) + if 'default' not in self.include_collections: + self.include_collections.append('default') + # If save all is set, create a collector that can save all the tensors + if save_all: + get_collection('all').include([".*"]) + self.include_collections.append('all') + + def set_mode(self, mode): + if mode in ALLOWED_MODES: + self.mode = mode + else: + raise ValueError('Invalid mode {}. Valid modes are {}.' + .format(mode, ','.join(ALLOWED_MODES))) + + if mode not in self.mode_steps: + self.mode_steps[mode] = -1 + + def cleanup(self): + if not self.exported_collection: + get_collection_manager().export_manager(os.path.join(self.out_dir, COLLECTION_FILE_NAME)) + # Write the gradients of the past step if the writer is still available. + if self.writer is not None: + self.writer.flush() + self.writer.close() + training_has_ended(self.out_dir) + + # Check whether we should log this tensor + def _check_tensor_to_be_logged(self, name): + ss = self.save_manager.should_save_tensor(tensorname=name, mode=self.mode, + step=self.mode_steps[self.mode]) + return ss['step'] + + def _process_step(self): + # returns dictionary of dictionaries: coll_name -> {step: True/False, when_nan: True/False} + # there will be no entry in dictionary for collections where both step and when_nan are False + # This dictionary is stored in self.collections_in_this_step so that we do not need to call this + # function in every forward_hook (recursive) invocation for a given step. + self.collections_in_this_step = self.save_manager.collections_to_save(self.mode, self.mode_steps[self.mode]) + return self.collections_in_this_step + + # This hook is invoked by trainer prior to running the forward pass. + def forward_pre_hook(self, module, input): + # Write the gradients of the past step if the writer is still available. + if self.writer is not None: + self.writer.flush() + self.writer.close() + self.writer = None + if not self.prepared_save_manager: + # at this point we need all collections to be ready + # this may not be the case at creation of hook + # as user's code after hook might add collections + self.save_manager.prepare() + self.prepared_save_manager = True + + self.mode_steps[self.mode] += 1 + self.step += 1 + logger.debug("Setting the global step to be {0}".format(self.step)) + # Reset the collections to be saved in this step to be None. + self.collections_in_this_step = None + if self._process_step(): + self.writer = FileWriter(logdir=self.out_base_dir, + trial=self.run_id, + step=self.step, + worker=self.worker) + module_name = module._get_name() + params = module.named_parameters() + for name, param in params: + pname = module_name + '_' + name + logger.debug("Processing the global step {0} for parameter {1}".format(self.step, pname)) + self.log_tensor(tensor_name=pname, tensor_value=param.data) + + if self.last_saved_step != -1 and not self.exported_collection: + get_collection_manager().export_manager(os.path.join(self.out_dir, COLLECTION_FILE_NAME)) + self.exported_collection = True + # self.last_block = block + + # This hook is invoked by trainer after running the forward pass. + def forward_hook(self, module, input, output): + if not self.collections_in_this_step: + logging.debug("Skipping the global step {0}".format(self.step)) + return + + module_name = self.module_maps[module] + logger.debug("Processing the global step {0} for module {1}".format(self.step, module_name)) + + # Output input tensor + self.log_inputs_to_module(module_name, input) + + # Output output tensors + self.log_outputs_of_module(module_name, output) + self.last_saved_step = self.step + + def backward_hook(self, tname): + # Helper function that has access to the parameter name via the scope in which it's defined. + def back(grad): + if self._process_step(): + if grad is not None: + logger.debug("Processing the backward step {0} for {1}".format(self.step, tname)) + self.log_tensor(tensor_name=GRADIENT_PREFIX + tname, tensor_value=grad) + return back + + def log_module(self, module_name, var, suffix, idx): + if var.__class__.__name__ is "Tensor": + self.log_tensor(tensor_name=module_name + suffix + str(idx), tensor_value=var) + return idx + 1 + elif isinstance(var, tuple) or isinstance(var, list): + for val in var: + idx = self.log_module(module_name, val, suffix, idx) + else: + logger.warning("var is not Tensor or list of Tensors, module_name:{} {}".format(module_name, var.__class__.__name__)) + + def log_inputs_to_module(self, module_name, input): + idx = 0 + self.log_module(module_name, input, INPUT_TENSOR_SUFFIX, idx) + + def log_outputs_of_module(self, module_name, output): + idx = 0 + self.log_module(module_name, output, OUTPUT_TENSOR_SUFFIX, idx) + + def log_tensor(self, tensor_name, tensor_value): + if self.dry_run or not self._check_tensor_to_be_logged(tensor_name): + return + + # Get the collection to which this tensor belongs + save_colls = self.save_manager.from_collections(tensor_name) + for s_col in save_colls: + if s_col.name in self.collections_in_this_step.keys(): + reduce_config = s_col.get_reduction_config() + if reduce_config: + abs = False + for reduction in reduce_config.reductions + reduce_config.abs_reductions + reduce_config.norms + \ + reduce_config.abs_norms: + if reduction in reduce_config.abs_reductions or reduction in reduce_config.abs_norms: + abs = True + reduction_tensor_name = get_reduction_tensor_name(tensor_name, reduction, abs) + tensor_data = get_aggregated_data(reduction, tensor_value, tensor_name, abs) + tensor_value_np = make_numpy_array(tensor_data) + self.writer.write_tensor(tdata=tensor_value_np, tname=reduction_tensor_name, + mode=self.mode, mode_step=self.mode_steps[self.mode]) + s_col.reduction_tensor_names.add(reduction_tensor_name) + return + else: + tensor_value = make_numpy_array(tensor_value) + self.writer.write_tensor(tdata=tensor_value, tname=tensor_name, + mode=self.mode, mode_step=self.mode_steps[self.mode]) + return + # TODO: remove? not being used anywhere + def close_log(self): + if self._process_step(): + return + self.writer.close() + + # This function is "applied" to every child in the block. This function in turn + # registers the forward hook to each module. It helps logging the input output tensors + # of that module. + + def _recursive_apply(self, module): + module.register_forward_hook(self.forward_hook) + + def _backward_apply(self, module): + params = module.named_parameters() + for name, param in params: + pname = module._get_name() + '_' + name + param.register_hook(self.backward_hook(pname)) + + # This function registers the forward hook. If user wants to register the hook + # for every child in the given block, then the function calls "apply" API for + # registration of the hook. + # The hook is registered recursively, if user has specified the collections that are more than + # the default collectors viz. gradients, weight and bias + def register_hook(self, module): + if not isinstance(module, torch.nn.Module): + logger.error("The given module type {0} is not currently supported by Tornasole Hook".format( + module.__class__.__name__)) + return + module.register_forward_pre_hook(self.forward_pre_hook) + + for layer in list(module.named_modules()): + self.module_maps[layer[1]] = layer[0] + self.module_maps[module] = module._get_name() + module.apply(self._recursive_apply) + self._backward_apply(module) + + @staticmethod + def clean_tag(name): + if name is not None: + new_name = INVALID_TAG_CHARACTERS.sub('_', name) + new_name = new_name.lstrip('/') # Remove leading slashes + if new_name != name: + logging.warning('Summary name %s is illegal; using %s instead.', name, new_name) + name = new_name + return name + diff --git a/tornasole/pytorch/setup.py b/tornasole/pytorch/setup.py new file mode 100644 index 0000000000..952c57b92c --- /dev/null +++ b/tornasole/pytorch/setup.py @@ -0,0 +1,24 @@ +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +setuptools.setup( + name="tornasole_pytorch", + version="0.1", + author="The Tornasole Team", + author_email="tornasole@amazon.com", + description="Tornasole Pytorch", + long_description=long_description, + long_description_content_type="text/markdown", + # url="https://github.com/awslabs/tornasole_pytorch", + install_requires=['tornasole_core', 'numpy'], + packages=setuptools.find_packages(), + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + ], + setup_requires=["pytest-runner"], + tests_require=["pytest", "numpy"] +) diff --git a/tornasole/pytorch/torch_collection.py b/tornasole/pytorch/torch_collection.py new file mode 100644 index 0000000000..487a22153d --- /dev/null +++ b/tornasole/pytorch/torch_collection.py @@ -0,0 +1,90 @@ +from tornasole.core.collection import Collection as BaseCollection +from tornasole.core.collection_manager import CollectionManager as BaseCollectionManager + + +class Collection(BaseCollection): + def add_module_tensors(self, module, inputs=False, outputs=False): + if inputs: + input_tensor_regex = module._get_name() + "_input_*" + self.include(input_tensor_regex) + if outputs: + output_tensor_regex = module._get_name() + "_output" + self.include(output_tensor_regex) + + + +class CollectionManager(BaseCollectionManager): + def __init__(self, create_default=True): + super().__init__() + # self.export_only_once = True + if create_default: + self._register_default_collections() + + def create_collection(self, name): + self.collections[name] = Collection(name) + + def _register_default_collections(self): + weight_collection = Collection('weights', include_regex=["^(?!gradient).*weight"]) + bias_collection = Collection('bias', include_regex=['^(?!gradient).*bias']) + gradient_collection = Collection('gradients', include_regex=['^gradient']) + self.add(gradient_collection) + self.add(weight_collection) + self.add(bias_collection) + + @staticmethod + def load(filename): + cm = CollectionManager(create_default=False) + with open(filename, 'r') as f: + line = f.readline() + while line: + c = Collection.load(line.rstrip()) + cm.add(c) + line = f.readline() + return cm + + @staticmethod + def load_from_string(s): + cm = CollectionManager(create_default=False) + lines = s.split('\n') + for line in lines: + c = Collection.load(line.rstrip()) + cm.add(c) + return cm + + def export_manager(self, path): + self.export(path) + # if self.export_only_once: + # self.export(path) + # self.export_only_once = False + + +_collection_manager = CollectionManager() + +def load_collections(path): + global _collection_manager + _collection_manager = CollectionManager.load(path) + +def reset_collections(): + global _collection_manager + del _collection_manager + _collection_manager = CollectionManager() + +def add_to_collection(collection_name, args): + get_collection(collection_name).add(args) + +def get_collection_manager(): + return _collection_manager + +def add_to_default_collection(args): + add_to_collection('default', args) + +def get_collection(collection_name): + try: + c = _collection_manager.get(collection_name) + except KeyError: + _collection_manager.create_collection(collection_name) + c = _collection_manager.get(collection_name) + return c + +def get_collections(): + return _collection_manager.collections diff --git a/tornasole/pytorch/util.py b/tornasole/pytorch/util.py new file mode 100644 index 0000000000..4b09f11a2b --- /dev/null +++ b/tornasole/pytorch/util.py @@ -0,0 +1,44 @@ +import torch +from tornasole.core.reduction_config import ALLOWED_REDUCTIONS, ALLOWED_NORMS +import numpy as np +from tornasole.core.reductions import get_numpy_reduction + + +def get_aggregated_data(aggregation_name, tensor_data, tensor_name, abs=False): + reduction_name = aggregation_name + if isinstance(tensor_data, np.ndarray): + return get_numpy_reduction(reduction_name, tensor_data, abs) + if abs: + tensor_data = torch.abs(tensor_data) + + if reduction_name in ALLOWED_REDUCTIONS: + assert hasattr(torch.Tensor, aggregation_name) + f = getattr(torch.Tensor, aggregation_name) + op = f(tensor_data) + return op + elif reduction_name in ALLOWED_NORMS: + if aggregation_name in ['l1', 'l2']: + ord = int(aggregation_name[1]) + else: + raise RuntimeError("Invalid normalization operation {0} for torch.Tensor".format(reduction_name)) + op = torch.norm(tensor_data, p=ord) + return op + elif hasattr(torch, aggregation_name): + f = getattr(torch, aggregation_name) + op = f(tensor_data) + return op + raise RuntimeError("Invalid aggregation_name {0}".format(aggregation_name)) + + +def make_numpy_array(x): + if isinstance(x, np.ndarray): + return x + elif np.isscalar(x): + return np.array([x]) + elif isinstance(x, torch.Tensor): + return x.data.numpy() + elif isinstance(x, tuple): + return np.asarray(x, dtype=x.dtype) + else: + raise TypeError('_make_numpy_array only accepts input types of numpy.ndarray, scalar,' + ' and Torch Tensor, while received type {}'.format(str(type(x)))) diff --git a/tornasole/rules/__init__.py b/tornasole/rules/__init__.py new file mode 100644 index 0000000000..0f683ddfe0 --- /dev/null +++ b/tornasole/rules/__init__.py @@ -0,0 +1,2 @@ +from .rule import RequiredTensors, Rule +from .rule_invoker import invoke_rule \ No newline at end of file diff --git a/tornasole/rules/rule.py b/tornasole/rules/rule.py new file mode 100644 index 0000000000..8ff0f8be36 --- /dev/null +++ b/tornasole/rules/rule.py @@ -0,0 +1,111 @@ +from tornasole.exceptions import TensorUnavailable +from tornasole.core.utils import get_logger +from tornasole.analysis.utils import no_refresh +from tornasole.exceptions import RuleEvaluationConditionMet +from abc import ABC, abstractmethod + +logger = get_logger() + +class RequiredTensors: + def __init__(self, trial): + self.trial = trial + self.tensor_names = {} + self.logger = logger + self.should_match_regex = {} + + def need_tensor(self, name, steps, should_match_regex=False): + if name not in self.tensor_names: + self.tensor_names[name] = steps + else: + self.tensor_names[name].extend(steps) + + if should_match_regex: + self.should_match_regex[name] = True + + def _check_if_steps_available(self, tname, steps): + t = self.trial.tensor(tname) + for st in steps: + t.value(st) + + def _fetch_tensors(self): + required_steps = set() + for steps in self.tensor_names.values(): + required_steps = required_steps.union(set(steps)) + required_steps = sorted(required_steps) + self.logger.debug(f"Waiting for required_steps: {required_steps}") + self.trial.wait_for_steps(required_steps) + self.trial.get_tensors(self.tensor_names, + should_regex_match=self.should_match_regex) + for tensorname, steps in self.tensor_names.items(): + # check whether we should match regex for this tensorname + # False refers to the default value if the key does not exist in the dictionary + if self.should_match_regex.get(tensorname, False): + regex = tensorname + tnames = self.trial.tensors_matching_regex([regex]) + else: + tnames = [tensorname] + for tname in tnames: + if not self.trial.has_tensor(tname): + raise TensorUnavailable(tensorname) + else: + self._check_if_steps_available(tname, steps) + +# This is Rule interface +class Rule(ABC): + def __init__(self, base_trial, other_trials=None, + config_json_file_path=None): + self.base_trial = base_trial + self.other_trials = other_trials + + self.trials = [base_trial] + if self.other_trials is not None: + self.trials += [x for x in self.other_trials] + + self.actions = None + self.logger = logger + pass + # TODO parse_json_config_file_path + + @abstractmethod + # returns a list of RequiredTensor objects, one for each trial + def required_tensors(self, step, **kwargs): + # TODO implement to read from jsonconfig file + pass + + # step here is global step + @abstractmethod + def invoke_at_step(self, step, storage_handler=None, **kwargs): + # implementation check for tensor + # do checkpoint if needed at periodic interval --> storage_handler.save("last_processed_tensor",(tensorname,step)) + # checkpoiniting is needed if execution is longer duration, so that we don't + # lose the work done in certain step + pass + + @staticmethod + def _fetch_tensors_for_trials(req_tensors_requests): + for req_tensors_request in req_tensors_requests: + req_tensors_request._fetch_tensors() + + # step specific for which global step this rule was invoked + # storage_handler is used to save & get states across different invocations + def invoke(self, step, storage_handler=None, **kwargs): + self.logger.debug('Invoking rule {} for step {}'.format(self.__class__.__name__, step)) + self.base_trial.wait_for_steps([step]) + req_tensors_requests = self.required_tensors(step) + self._fetch_tensors_for_trials(req_tensors_requests) + + # do not refresh during invoke at step since required tensors are already here + with no_refresh(self.trials): + val = self.invoke_at_step(step) + + if val: + self.run_actions() + raise RuleEvaluationConditionMet + + def register_action(self, actions): + self.actions = actions + + def run_actions(self): + if self.actions is not None: + for action in self.actions: + action.run(rule_name=self.__class__.__name__) diff --git a/tornasole/rules/rule_invoker.py b/tornasole/rules/rule_invoker.py new file mode 100644 index 0000000000..6b0162a9ed --- /dev/null +++ b/tornasole/rules/rule_invoker.py @@ -0,0 +1,66 @@ +from tornasole.exceptions import * +from tornasole.core.utils import get_logger + +logger = get_logger() + +def invoke_rule(rule_obj, start_step=0, end_step=None, raise_rule_eval=False): + step = start_step if start_step is not None else 0 + logger.info('Started execution of rule {}'.format(type(rule_obj).__name__)) + while (end_step is None) or (step < end_step): + try: + rule_obj.invoke(step) + except (TensorUnavailableForStep, StepUnavailable) as e: + logger.debug(str(e)) + except RuleEvaluationConditionMet as e: + if raise_rule_eval: + raise e + step += 1 + +if __name__ == '__main__': + import argparse + from tornasole.trials import create_trial + + parser = argparse.ArgumentParser() + parser.add_argument('--trial-dir', type=str) + parser.add_argument('--rule-name', type=str) + parser.add_argument('--start-step', type=int) + parser.add_argument('--end-step', type=int) + parser.add_argument('--raise-rule-eval-cond-exception', type=bool, default=False) + + parser.add_argument('--collections', default=[], type=str, action='append', + help="""List of collection names. The rule will inspect tensors belonging to those collections. + Required for allzero rule.""") + parser.add_argument('--tensor-regex', default=[], type=str, action='append', + help="""List of regex patterns. The rule will inspect tensors that match these + patterns. Required for allzero rule.""") + args = parser.parse_args() + + if args.rule_name is None: + raise RuntimeError('Needs rule name to invoke') + + tr = create_trial(args.trial_dir, range_steps=(args.start_step, args.end_step)) + if args.rule_name.lower() == 'vanishinggradient': + from tornasole.rules.generic.vanishing_grad import VanishingGradient + r = VanishingGradient(tr) + elif args.rule_name.lower() == 'explodingtensor': + from tornasole.rules.generic.exploding_tensor import ExplodingTensor + r = ExplodingTensor(tr) + elif args.rule_name.lower() == 'weightupdateratio': + from tornasole.rules.generic.weight_update_ratio import WeightUpdateRatio + r = WeightUpdateRatio(tr) + elif args.rule_name.lower() == 'allzero': + if len(args.collections) == 0 and len(args.tensor_regex) == 0: + raise ValueError('Please provide either the list of collection names or list of regex patterns for invoking ' + 'this rule.') + from tornasole.rules.generic.all_zero import AllZero + r = AllZero(tr, args.collections, args.tensor_regex) + else: + raise ValueError('Please invoke any rules which take multiple trials, ' + 'or custom rules by passing the rule object to ' + 'invoke_rule() function. We do not currently ' + 'support running such rules from this python script.' + 'Please refer to examples/scripts/ for examples' + 'on how to call invoke_rule') + + invoke_rule(r, start_step=args.start_step, end_step=args.end_step, + raise_rule_eval=args.raise_rule_eval_cond_exception) diff --git a/tornasole/tensorflow/__init__.py b/tornasole/tensorflow/__init__.py new file mode 100644 index 0000000000..63f9791b83 --- /dev/null +++ b/tornasole/tensorflow/__init__.py @@ -0,0 +1,10 @@ +from .hook import TornasoleHook +from .collection import Collection, CollectionManager + +from .collection import get_collections, get_collection, \ + load_collections, export_collections, \ + add_to_collection, add_to_default_collection, reset_collections + +from .optimizer import TornasoleOptimizer +from tornasole import SaveConfig, ReductionConfig +from tornasole import modes \ No newline at end of file diff --git a/tornasole/tensorflow/collection.py b/tornasole/tensorflow/collection.py new file mode 100644 index 0000000000..b0f6967818 --- /dev/null +++ b/tornasole/tensorflow/collection.py @@ -0,0 +1,153 @@ +import tensorflow as tf +from tornasole.core.save_config import SaveConfig +from tornasole.core.reduction_config import ReductionConfig +from tornasole.core.collection import Collection as BaseCollection +from tornasole.core.collection_manager import CollectionManager as BaseCollectionManager + + +class Collection(BaseCollection): + + def __init__(self, name, include_regex=None, + reduction_config=None, save_config=None): + super().__init__(name, include_regex, reduction_config, save_config) + self.tensors = [] + # has the new tensors added to graph + # reduction_tensor_names has the names of original tensors + # whose reductions these are + self.reduction_tensors_added = [] + + def add(self, arg): + if isinstance(arg, list) or isinstance(arg, set): + for a in arg: + self.add(a) + elif isinstance(arg, tf.Operation): + for t in arg.outputs: + self.add_tensor(t) + elif isinstance(arg, tf.Variable) or isinstance(arg, tf.Tensor): + self.add_tensor(arg) + else: + raise TypeError('Unknown type of argument %s.' + 'Add can only take tf.Operation, tf.Variable, tf.Tensor' + 'and list or set of any of the above.' % arg) + + def add_tensor(self, t): + self.add_tensor_name(t.name) + # tf tries to add variables both by tensor and variable. + # to avoid duplications, we need to check names + for x in self.tensors: + if x.name == t.name: + return + self.tensors.append(t) + + def add_reduction_tensor(self, t, original_tensor): + self.add_reduction_tensor_name(original_tensor.name) + # tf tries to add variables both by tensor and variable. + # to avoid duplications, we need to check names + for x in self.reduction_tensors_added: + if x.name == t.name: + return + self.reduction_tensors_added.append(t) + + def remove_tensor(self, t): + # have to compare names because tensors can have variables, \ + # we don't want to end up comparing tensors and variables + if t.name in self.tensor_names: + found_index = None + for i, lt in enumerate(self.tensors): + if lt.name == t.name: + found_index = i + + self.tensor_names.remove(t.name) + + # this can happen when tensors is cleared but tensor names is not cleared + # because of emptying tensors and reduction_tensors lists in + # prepare_collections + if found_index is None: + raise IndexError('Could not find tensor to remove') + self.tensors.pop(found_index) + + @staticmethod + def load(s): + if s is None or s == str(None): + return None + + separator = '!@' + parts = s.split(separator) + if parts[0] == 'v0': + assert len(parts) == 7 + list_separator = ',' + name = parts[1] + include = [x for x in parts[2].split(list_separator) if x] + tensor_names = set([x for x in parts[3].split(list_separator) if x]) + reduction_tensor_names = set([x for x in parts[4].split(list_separator) if x]) + reduction_config = ReductionConfig.load(parts[5]) + save_config = SaveConfig.load(parts[6]) + c = Collection(name, include_regex=include, + reduction_config=reduction_config, save_config=save_config) + c.reduction_tensor_names = reduction_tensor_names + c.tensor_names = tensor_names + return c + +class CollectionManager(BaseCollectionManager): + def __init__(self, create_default=True): + super().__init__() + if create_default: + self.create_collection('default') + + def create_collection(self, name): + self.collections[name] = Collection(name) + + @staticmethod + def load(filename): + cm = CollectionManager(create_default=False) + with open(filename, 'r') as f: + line = f.readline() + while line: + c = Collection.load(line.rstrip()) + cm.add(c) + line = f.readline() + return cm + + @staticmethod + def load_from_string(s): + cm = CollectionManager(create_default=False) + lines = s.split('\n') + for line in lines: + c = Collection.load(line.rstrip()) + cm.add(c) + return cm + +_collection_manager = CollectionManager() + +def reset_collections(): + global _collection_manager + del _collection_manager + _collection_manager = CollectionManager() + +def add_to_collection(collection_name, args): + get_collection(collection_name).add(args) + +def add_to_default_collection(args): + add_to_collection('default', args) + +def get_collection(collection_name): + try: + c = _collection_manager.get(collection_name) + except KeyError: + _collection_manager.add(collection_name) + c = _collection_manager.get(collection_name) + return c + +def get_collections(): + return _collection_manager.collections + +def export_collections(path): + if _collection_manager: + _collection_manager.export(path) + +def get_collection_manager(): + return _collection_manager + +def load_collections(path): + global _collection_manager + _collection_manager = CollectionManager.load(path) diff --git a/tornasole/tensorflow/hook.py b/tornasole/tensorflow/hook.py new file mode 100644 index 0000000000..8280ca4ffa --- /dev/null +++ b/tornasole/tensorflow/hook.py @@ -0,0 +1,374 @@ +import os +import socket +import atexit +import numpy as np +import tensorflow as tf +from .utils import * +from .reductions import get_tensorflow_reduction +from .collection import * +from tornasole.core.writer import FileWriter +from tornasole.core.utils import get_logger, flatten, get_reduction_tensor_name, \ + check_dir_exists, is_s3, match_inc +from tornasole.core.modes import ModeKeys, ALLOWED_MODES +from tornasole.core.save_config import SaveConfig +from tornasole.core.access_layer.utils import training_has_ended +from .save_manager import TFSaveManager + + +class TornasoleHook(tf.train.SessionRunHook): + def __init__(self, out_dir, + dry_run=False, + worker='worker0', + reduction_config=None, + save_config=SaveConfig(), + include_regex=None, + include_collections=['weights', 'gradients', 'default'], + save_all=False): + """ + A class used to represent the hook which gets attached to the + training process. This takes the form appropriate for the framework + such as tf.train.SessionRunHook for TF, Callback for keras... + + ... + + Attributes + ---------- + out_dir : str + represents a path into which tornasole outputs will be written to + dry_run : bool + when dry run is set, behavior is only described in the log file. + tensors are not actually saved. + worker: string + name of worker in a multi process training job + outputs and tensors are organized by this name during retrieval. + + save_config: SaveConfig object + Takes save config object which is applied as default for all included tensors. + A collection can optionally have its own saveconfig object + which overrides this for its tensors. + + reduction_config: ReductionConfig object + if passed, this reduction config object is used + as default for all tensors included. + A collection has its own saveconfig object + which overrides this for its tensors. if this is not passed, + tensor is saved in full. + + include_regex: list of str + takes as input the list of string representing regular expressions. Tensors whose names match + these regular expressions will be saved. These tensors will be available as part of the `default` + collection. + + include_collections: list of (str or collection objects) + takes as input the collections which should be saved. + if this is empty, it defaults to including all collections from code + + save_all: bool + a shortcut for saving all tensors in the model. + they are all saved in the collection `all` + """ + if not is_s3(out_dir)[0]: + out_dir = os.path.expanduser(out_dir) + check_dir_exists(out_dir) + self.out_dir = out_dir + self.out_base_dir = os.path.dirname(out_dir) + self.run_id = os.path.basename(out_dir) + + self.dry_run = dry_run + self.worker = worker if worker is not None else socket.gethostname() + if include_collections is None: + include_collections = [] + self.include_collections = flatten(include_collections) + if include_regex is not None: + get_collection('default').include(include_regex) + if 'default' not in self.include_collections: + self.include_collections.append('default') + + self.save_all = save_all + if self.save_all: + get_collection('all').include('.*') + if 'all' not in self.include_collections: + self.include_collections.append('all') + + if 'default' not in self.include_collections and get_collection('default').get_include_regex(): + self.logger.warn('The `default` collection was not passed to include_collections.' \ + 'So it is not being saved') + + self.save_manager = TFSaveManager(collection_manager=get_collection_manager(), + include_collections_names=self.include_collections, + default_save_config=save_config, + default_reduction_config=reduction_config) + + self.step = 0 + self.mode = ModeKeys.GLOBAL + self.mode_steps = {ModeKeys.GLOBAL: 0} + self.logger = get_logger() + self.writer = None + self.reduction_original_tensors = {} + self.subgraph_nodes_cache = {} + self.logger.info('Saving to {}'.format(self.out_dir)) + atexit.register(self.cleanup) + + def cleanup(self): + if self.writer is not None: + self.writer.flush() + self.writer.close() + #creates file "trial_prefix/END_OF_JOB.ts" at the end of training job. + # Trial prefix can be s3/local. + training_has_ended(self.out_dir) + + def set_mode(self, mode): + # train + if mode in ALLOWED_MODES: + self.mode = mode + else: + raise ValueError('Invalid mode {}. Valid modes are {}.' + .format(mode, ','.join(ALLOWED_MODES))) + + if mode not in self.mode_steps: + self.mode_steps[mode] = 0 + + def _process_matched_tensor(self, tensor, collection): + reduction_config = self.save_manager.get_reduction_config(collection) + # if reduction config and saveconfig.when_nan are set, the when_nan tensors will be reduced + # todo think about this + if reduction_config: + for reduction in reduction_config.reductions + reduction_config.norms: + self._add_reduction(tensor, reduction, collection, False) + for reduction in reduction_config.abs_reductions + reduction_config.abs_norms: + self._add_reduction(tensor, reduction, collection, True) + # here if reduction config was set, but tensors were added to collection, + # they will be removed and added to reduction_tensors + try: + collection.remove_tensor(tensor) + except IndexError: + # was not in the list + pass + else: + collection.add(tensor) + + def _check_and_add_tensor(self, t): + if t.dtype == tf.resource or t.dtype == tf.variant: + return False + + if not self.graph.is_fetchable(t.op): + return False + + added = False + for coll in self.save_manager.get_all_collections_to_save(): + if match_inc(t.name, coll.get_include_regex()) \ + or t.name in coll.tensor_names: + # or t.name in coll.reduction_tensor_names: + self._process_matched_tensor(t, coll) + # only matches with one collection + added = True + sc = self.save_manager.get_save_config(coll, self.mode) + if sc and match_inc(t.name, sc.when_nan): + # add when_nan tensors to watched, so they are returned + # matches for all collections + # self._process_matched_tensor(t, coll) + self.save_manager.add_when_nan_tensor(coll, t) + added = True + + return added + + def _add_reduction(self, tensor, reduction_name, collection, abs=False): + if tensor.dtype in [tf.bool, tf.string]: + return + tname = get_reduction_tensor_name(tensor.name, reduction_name, abs) + red_tensor = get_tensorflow_reduction(reduction_name, tensor, tname, abs=abs) + self.reduction_original_tensors[red_tensor.name] = tensor + collection.add_reduction_tensor(red_tensor, original_tensor=tensor) + + def _add_tensors(self): + # gradients and optimizer_variables added in user code or TornasoleOptimizer + + total_tensor_count = 0 + # todo: do we ever need inputs of the op + for op in self.graph.get_operations(): + for tensor in op.outputs: + self._check_and_add_tensor(tensor) + total_tensor_count += 1 + for variable in tf.global_variables(): + self._check_and_add_tensor(variable) + total_tensor_count += 1 + return total_tensor_count + + def begin(self): + # todo: handle multiple graphs in the model + self.graph = tf.get_default_graph() + + for coll_name, coll in get_collections().items(): + # hack to make multiple graphs work with the same tensor names + # this can happen when we use same hook for training and evaluation + # what is going on here is that we clear the tensors and reduction tensors + # but we use the tensor names field in collection to readd tensors + # from the new graph to the collection so we can them right + coll.tensors = [] + coll.reduction_tensors = [] + + wts = tf.trainable_variables() + add_to_collection('weights', wts) + + # todo: fix this coll.save_config.when_nan_tensors = [] + + # at this point we need all collections to be ready + # this may not be the case at creation of hook + # as user's code after hook might add collections + self.save_manager.prepare() + + # adds all tensors in graph based on regexes in collections default and other custom ones + self._add_tensors() + self.save_manager.prepare_tensors() + + for coll in self.save_manager.get_all_collections_to_save(): + self.logger.info(f'Saving the collection {coll.name} with {len(coll.tensor_names)} tensors ' \ + f'and {len(coll.reduction_tensors_added)} reductions for {len(coll.reduction_tensor_names)} tensors.') + self.logger.debug(f' Collection {coll.name} has tensors: {coll.tensors}') + self.logger.debug(f' Collection {coll.name} has reductions: {coll.reduction_tensors_added}') + + export_collections(os.path.join(self.out_dir, 'collections.ts')) + self._export_model() + + def _export_model(self): + # todo save model + pass + + def _save_this_step(self): + coll_save_state = self.save_manager.collections_to_save(self.mode, self.mode_steps[self.mode]) + tensors_to_save = {'watched': [], 'added': []} + for coll_name, save_state in coll_save_state.items(): + coll = get_collection(coll_name) + if save_state['step'] or save_state['when_nan']: + tensors_to_save['watched'].extend(coll.tensors) + tensors_to_save['added'].extend(coll.reduction_tensors_added) + if save_state['when_nan']: + tensors_to_save['watched'].extend( + self.save_manager.get_save_config(coll, self.mode).when_nan_tensors) + # dedup watched and added + tensors_to_save['watched'] = list(set(tensors_to_save['watched'])) + tensors_to_save['added'] = list(set(tensors_to_save['added'])) + return tensors_to_save + + def _filter_to_be_saved(self, dict_to_save, fetches): + if not isinstance(fetches, list) and not isinstance(fetches, tuple) \ + and not isinstance(fetches, dict): + fetches = [fetches] + fetches_tuple = tuple(fetches) + if fetches_tuple in self.subgraph_nodes_cache: + subgraph_nodes = self.subgraph_nodes_cache[fetches_tuple] + else: + original_fetch_ops = get_original_fetch_ops(fetches) + dest_names = [n.name for n in original_fetch_ops] + subgraph = tf.compat.v1.graph_util.extract_sub_graph( + tf.get_default_graph().as_graph_def(), dest_names) + _, subgraph_nodes, _ = extract_graph_summary(subgraph) + self.subgraph_nodes_cache[fetches_tuple] = subgraph_nodes + + # this also allows us to skip all the assign, read, initial_value, + # control_dependency nodes in the graph + # check that this run includes the ops whose tensors are to be saved + filtered = [] + skipped = [] + for tensor in dict_to_save['watched']: + if node_name(tensor.name) in subgraph_nodes: + filtered.append(tensor) + else: + skipped.append(tensor) + for tensor in dict_to_save['added']: + assert isinstance(tensor, tf.Tensor) + original_tensor = self.reduction_original_tensors[tensor.name] + if node_name(original_tensor.name) in subgraph_nodes: + filtered.append(tensor) + else: + skipped.append(tensor) + self.logger.debug(f'Skipped {len(skipped)} unreachable tensors: {skipped}') + + # todo(huilgolr) can we filter tensors with (0)shape here + return filtered + + def before_run(self, run_context): + tensors_to_save = self._save_this_step() + if len(tensors_to_save['watched'] + tensors_to_save['added']): + if run_context: + list_to_save = self._filter_to_be_saved(tensors_to_save, + run_context.original_args.fetches) + else: + list_to_save = tensors_to_save['watched'] + tensors_to_save['added'] + else: + list_to_save = [] + # self.logger.info('Skipping step %s' % str(self.step)) + + self.prev_to_be_saved = list_to_save + return tf.train.SessionRunArgs(list_to_save) if list_to_save else None + + def _save_tensor(self, tensor, value, running_size): + running_size += value.nbytes + if tensor.dtype == np.float16: + value = np.float32(value) + running_size += value.nbytes + this_size, this_shape = size_and_shape(value) + if this_size > 0: + self.logger.debug(f' Saving {tensor.name}, type={tensor.dtype}, shape={this_shape},' + + f'size={this_size}, running_size={running_size}') + if not self.dry_run: + self.writer.write_tensor(tdata=value, tname=tensor.name, + mode=self.mode, + mode_step=self.mode_steps[self.mode]) + else: + self.logger.debug(f' Not saving {tensor.name}, type={tensor.dtype}, shape={this_shape},' + + f'size={this_size}, running_size={running_size}') + return running_size + + def _check_when_nan_tensors(self, values): + tensors = self.prev_to_be_saved + is_nan_for_colls = set() + assert len(tensors) == len(values) + for i in range(len(tensors)): + tensor = tensors[i] + value = values[i] + if self.save_manager.is_when_nan_tensor(tensor.name): + is_nan = np.isnan(np.sum(value)) or np.isinf(np.sum(value)) + if is_nan: + is_nan_for_colls.update([x.name for x in self.save_manager.when_nan_collections(tensor.name)]) + if len(is_nan_for_colls) == len(self.save_manager.get_all_collections_to_save()): + # all collections are nan already, don't check other tensors + break + return is_nan_for_colls + + def _get_all_tensors_values(self, results): + for (item, value) in zip(self.prev_to_be_saved, results): + if not isinstance(value, list) or isinstance(value, tuple): + assert not (isinstance(item, list) or isinstance(item, tuple)) + yield item, value + elif isinstance(value, list) or isinstance(value, tuple): + assert (isinstance(item, list) or isinstance(item, tuple)) + for i in range(len(value)): + yield item[i], value[i] + + def after_run(self, run_context, run_values): + if self.prev_to_be_saved: + self.writer = FileWriter(logdir=self.out_base_dir, + trial=self.run_id, + step=self.step, + worker=self.worker) + self.logger.info(f'Saving for step {self.step}: {len(self.prev_to_be_saved)} objects') + running_size = 0 + is_nan_for_collections = self._check_when_nan_tensors(run_values.results) + for (item, value) in self._get_all_tensors_values(run_values.results): + save_state = self.save_manager.should_save_tensor(item.name, self.mode, + self.mode_steps[self.mode]) + from_colls = set([x.name for x in self.save_manager.from_collections(item.name)]) + if save_state['step'] or \ + (save_state['when_nan'] and from_colls.intersection(is_nan_for_collections)): + running_size = self._save_tensor(item, value, running_size) + else: + self.logger.debug(f'Not saving {item} as no nan seen') + self.logger.info(f'Save complete, saved {running_size} bytes') + self.writer.close() + self.step += 1 + self.mode_steps[self.mode] += 1 + + def end(self, sess): + pass + # self.logger.info('End of run') diff --git a/tornasole/tensorflow/keras.py b/tornasole/tensorflow/keras.py new file mode 100644 index 0000000000..113cb729b9 --- /dev/null +++ b/tornasole/tensorflow/keras.py @@ -0,0 +1,152 @@ +import keras +import os +import socket + +from .collection import * +from tornasole.core.writer import FileWriter +from tornasole.core.utils import get_logger, flatten, \ + check_dir_exists, is_s3 +from tornasole.core.modes import ModeKeys +from tornasole.core.save_config import SaveConfig +from tornasole.core.save_manager import SaveManager + + +class TornasoleHook(keras.callbacks.Callback): + def __init__(self, out_dir, + dry_run=False, + worker='worker0', + reduction_config=None, + save_config=SaveConfig(), + # TODO: support include_regex + # include_regex=None, + include_collections=['weights', 'gradients', 'metrics', 'default'], + save_all=False): + if not is_s3(out_dir)[0]: + out_dir = os.path.expanduser(out_dir) + check_dir_exists(out_dir) + self.out_dir = out_dir + self.out_base_dir = os.path.dirname(out_dir) + self.run_id = os.path.basename(out_dir) + + self.dry_run = dry_run + self.worker = worker if worker is not None else socket.gethostname() + if include_collections is None: + include_collections = [] + self.include_collections = flatten(include_collections) + # TODO: support include_regex + # if include_regex is not None: + # get_collection('default').include(include_regex) + # if 'default' not in self.include_collections: + # self.include_collections.append('default') + + self.save_all = save_all + if self.save_all: + get_collection('all').include('.*') + if 'all' not in self.include_collections: + self.include_collections.append('all') + + self.logger = get_logger() + if 'default' not in self.include_collections and get_collection('default').get_include_regex(): + self.logger.warn('The `default` collection was not passed to include_collections.' \ + 'So it is not being saved') + + self.save_manager = SaveManager(collection_manager=get_collection_manager(), + include_collections_names=self.include_collections, + default_save_config=save_config, + default_reduction_config=reduction_config) + + self.step = 0 + self.mode = ModeKeys.GLOBAL + self.mode_steps = {ModeKeys.GLOBAL: 0} + self.writer = None + self.logger.info('Saving to {}'.format(self.out_dir)) + self._collection_created = False + + super().__init__() + + def _export_collections( self, logs): + if self._collection_created: + return + + for k in logs: + get_collection("metrics").add_tensor_name(k) + + for layer in self.model.layers: + ws = layer.get_weights() + if len(ws) == 0: + continue + cfg = layer.get_config() + multi = len(ws) > 1 + for i in range(len(ws)): + tensor_name = cfg['name'] + if multi: + tensor_name += "_" + str(i) + get_collection("weights").add_tensor_name(tensor_name) + + add_to_collection("gradients", []) + + export_collections(os.path.join(self.out_dir, 'collections.ts')) + # at this point we need all collections to be ready + # this may not be the case at creation of hook + # as user's code after hook might add collections + self.save_manager.prepare() + self._collection_created = True + + def on_epoch_end(self, epoch, logs={}): + self.save_metrics(logs=logs, force=True) + self._delete_writer() + + def on_batch_end(self, batch, logs={}): + self._export_collections(logs) + self.save_metrics(logs=logs, force=False) + self.save_layer_data() + self._delete_writer() + self.step += 1 + self.mode_steps[self.mode] += 1 + #print( "Writer=", self.writer) + + + def _create_writer(self): + if self.writer is None: + self.writer = FileWriter(logdir=self.out_base_dir, + trial=self.run_id, + step=self.step, + worker=self.worker) + return self.writer + + def _delete_writer(self): + if self.writer: + self.writer.close() + self.writer = None + + def save_metrics(self, logs, force): + for k in logs: + save_state = self.save_manager.should_save_tensor(k, self.mode, + self.mode_steps[self.mode]) + if save_state['step'] or force: + val = logs[k] + self._create_writer() + self.writer.write_tensor(tname=k, tdata=val) + + def save_layer_data(self): + + assert len(self.model.layers) > 0 + + for layer in self.model.layers: + ws = layer.get_weights() + if len(ws) == 0: + continue + cfg = layer.get_config() + + + multi = len(ws) > 1 + for i, tensor_value in enumerate(ws): + tensor_name = cfg['name'] + if multi: + tensor_name += "_" + str(i) + save_state = self.save_manager.should_save_tensor(tensor_name, self.mode, + self.mode_steps[self.mode]) + if save_state['step']: + self._create_writer() + self.writer.write_tensor(tdata=tensor_value, tname=tensor_name) + \ No newline at end of file diff --git a/tornasole/tensorflow/optimizer.py b/tornasole/tensorflow/optimizer.py new file mode 100644 index 0000000000..818bc94db7 --- /dev/null +++ b/tornasole/tensorflow/optimizer.py @@ -0,0 +1,13 @@ +import tensorflow as tf +from .collection import * + + +class TornasoleOptimizer(tf.train.Optimizer): + def __init__(self, optimizer, use_locking=False, name='Tornasole'): + super(TornasoleOptimizer, self).__init__(use_locking, name) + self.optimizer = optimizer + add_to_collection('optimizer_variables', optimizer.variables()) + + def apply_gradients(self, grads_and_vars, global_step=None, name=None): + add_to_collection('gradients', [g for (g, v) in grads_and_vars]) + return self.optimizer.apply_gradients(grads_and_vars, global_step, name) diff --git a/tornasole/tensorflow/reductions.py b/tornasole/tensorflow/reductions.py new file mode 100644 index 0000000000..753a640a7a --- /dev/null +++ b/tornasole/tensorflow/reductions.py @@ -0,0 +1,37 @@ +import tensorflow as tf +from tornasole.core.reduction_config import ALLOWED_REDUCTIONS, ALLOWED_NORMS + + +def get_tensorflow_reduction(reduction_name, + tensor, tensor_name, abs=False): + if reduction_name in ['std', 'variance', 'l1', 'l2']: + # these reductions create a name with a weird suffix like squeeze or sqrt + # even if we pass the name we want to the op + # so we are using a random name first, then + # using identity op to rename it how we want it + temp_tensor_name = '' + else: + temp_tensor_name = tensor_name + + if reduction_name in ALLOWED_REDUCTIONS: + f = getattr(tf.math, 'reduce_' + reduction_name) + if abs: + op = f(tf.abs(tensor), name=temp_tensor_name) + else: + op = f(tensor, name=temp_tensor_name) + elif reduction_name in ALLOWED_NORMS: + if reduction_name in ['l1', 'l2']: + ord = int(reduction_name[1]) + else: + ord = reduction_name + if abs: + op = tf.norm(tf.abs(tensor), ord=ord, name=temp_tensor_name) + else: + op = tf.norm(tensor, ord=ord, name=temp_tensor_name) + else: + raise RuntimeError(f'Invalid reduction name {reduction_name}') + + if temp_tensor_name != tensor_name: + op = tf.identity(op, name=tensor_name) + return op + diff --git a/tornasole/tensorflow/save_manager.py b/tornasole/tensorflow/save_manager.py new file mode 100644 index 0000000000..90f6400257 --- /dev/null +++ b/tornasole/tensorflow/save_manager.py @@ -0,0 +1,45 @@ +from tornasole.core.save_manager import SaveManager + + +class TFSaveManager(SaveManager): + def __init__(self, collection_manager, include_collections_names, + default_reduction_config, + default_save_config): + super().__init__(collection_manager=collection_manager, + include_collections_names=include_collections_names, + default_reduction_config=default_reduction_config, + default_save_config=default_save_config) + self.when_nan_tensors = {} + + def prepare_tensors(self): + for c_name, c in self.collection_manager.get_collections().items(): + if c_name == 'when_nan': + continue + if c not in self.save_collections: + continue + for t in c.tensors + c.reduction_tensors_added: + self._add_tensor_to_collection(t, c) + + def _add_tensor_to_collection(self, t, c): + if t.name not in self.tensor_to_collection: + self.tensor_to_collection[t.name] = [c] + else: + self.tensor_to_collection[t.name].append(c) + + def add_when_nan_tensor(self, collection, tensor): + self.configs_for_collections[collection.name].add_when_nan_tensor(tensor) + if tensor.name not in self.when_nan_tensors: + self.when_nan_tensors[tensor.name] = [] + self.when_nan_tensors[tensor.name].append(collection) + self._add_tensor_to_collection(tensor, collection) + + if 'when_nan' not in self.collection_manager.collections: + self.collection_manager.create_collection('when_nan') + self.collection_manager.get('when_nan').add_tensor(tensor) + + def is_when_nan_tensor(self, tensor_name): + return tensor_name in self.when_nan_tensors + + def when_nan_collections(self, tensor_name): + return self.when_nan_tensors[tensor_name] + diff --git a/tornasole/tensorflow/utils.py b/tornasole/tensorflow/utils.py new file mode 100644 index 0000000000..ef4bd0d1c4 --- /dev/null +++ b/tornasole/tensorflow/utils.py @@ -0,0 +1,51 @@ +import tensorflow as tf + + +def node_name(n): + if n.startswith("^"): + return n[1:] + else: + return n.split(":")[0] + + +def extract_graph_summary(graph_def): + """Extracts useful information from the graph and returns them.""" + name_to_input_name = {} # Keyed by the dest node name. + name_to_node = {} # Keyed by node name. + + # Keeps track of node sequences. It is important to still output the + # operations in the original order. + name_to_seq_num = {} # Keyed by node name. + seq = 0 + for node in graph_def.node: + n = node_name(node.name) + name_to_node[n] = node + name_to_input_name[n] = [node_name(x) for x in node.input] + name_to_seq_num[n] = seq + seq += 1 + return name_to_input_name, name_to_node, name_to_seq_num + + +def get_original_fetch_ops(fetches): + if isinstance(fetches, tf.Tensor) or isinstance(fetches, tf.Variable): + return [fetches.op] + elif isinstance(fetches, tf.Operation): + return [fetches] + elif isinstance(fetches, list): + rval = [] + for f in fetches: + rval.extend(get_original_fetch_ops(f)) + return rval + elif isinstance(fetches, dict): + rval = [] + for key in fetches: + rval += get_original_fetch_ops(fetches[key]) + return rval + else: + raise RuntimeError('Invalid fetches') + + +def size_and_shape(t): + if type(t) == bytes or type(t) == str: + return (len(t), [len(t)]) + return (t.nbytes, t.shape) diff --git a/tornasole/trials/__init__.py b/tornasole/trials/__init__.py new file mode 100644 index 0000000000..18f4858a49 --- /dev/null +++ b/tornasole/trials/__init__.py @@ -0,0 +1,4 @@ +from .trial import Trial +from .local_trial import LocalTrial +from .s3_trial import S3Trial +from .utils import create_trial \ No newline at end of file diff --git a/tornasole/trials/local_trial.py b/tornasole/trials/local_trial.py new file mode 100644 index 0000000000..85e656fd6b --- /dev/null +++ b/tornasole/trials/local_trial.py @@ -0,0 +1,138 @@ +from .trial import EventFileTensor, Trial +from tornasole.core.utils import index +from tornasole.core.tfevent.util import EventFileLocation +from tornasole.core.collection_manager import CollectionManager +from tornasole.core.reader import FileReader + +import time +import os +import multiprocessing +import struct +from joblib import Parallel, delayed + + +class LocalTrial(Trial): + def __init__(self, name, dirname, + range_steps=None, parallel=True, + check=False): + super().__init__(name, range_steps=range_steps, parallel=parallel, check=check) + dirname = os.path.expanduser(dirname) + self.trial_dir = dirname + self.logger.info(f'Loading trial {name} at path {self.trial_dir}') + self.last_step_loaded = None + self._load_collections() + self._load_tensors() + + def _load_tensors(self): + try: + step_dirs = EventFileLocation.get_step_dirs(self.trial_dir) + except FileNotFoundError: + self.logger.debug('Waiting to see data for steps') + return + + if self.range_steps is not None: + step_dirs = [x for x in step_dirs if self._step_in_range(x)] + + step_dirs.sort() + + if self.last_step_loaded is not None: + self.logger.debug("Trying to load events for steps after {}" + .format(int(self.last_step_loaded))) + + i = index(step_dirs, self.last_step_loaded) + if i == len(step_dirs) - 1: + # no new step + return + else: + step_dirs = step_dirs[i+1:] + + self._read_step_dirs(step_dirs) + + def _load_collections(self): + collections_file_path = os.path.join(self.trial_dir, 'collections.ts') + num_times_before_warning = 10 + while True: + if os.path.exists(collections_file_path): + self.collection_manager = CollectionManager.load(collections_file_path) + self.logger.info(f'Loaded {len(self.collection_manager.collections)} collections') + break + else: + time.sleep(2) + num_times_before_warning -= 1 + if num_times_before_warning < 0: + self.logger.warning('Waiting to read collections') + else: + self.logger.debug('Waiting to read collections') + continue + + def refresh_tensors(self): + self._load_tensors() + + def __hash__(self): + return hash((self.name, self.trial_dir)) + + def __eq__(self, other): + return (self.name, self.trial_dir) == (other.name, other.trial_dir) + + def get_tensors(self, tname_steps_dict, should_regex_match=False): + # now we do not need to do anything since we read the full event file + pass + + def _read_step_dirs(self, step_dirs): + if len(step_dirs) == 0: + return + dirnames_efts = [] + if self.parallel: + # Ugly hack for https://github.com/awslabs/tornasole_rules/issues/66 + # Temp fix with intentional code duplication + # Expected to be fixed with the introduction of indexreader + try: + dirnames_efts = Parallel(n_jobs=multiprocessing.cpu_count(), verbose=0) \ + (delayed(self._read_folder) \ + (EventFileLocation.get_step_dir_path(self.trial_dir, step_dir), + read_data=self.read_data, check=self.check) \ + for step_dir in step_dirs) + # sort them as parallel returns in random order + # we want to sort them by dirname + dirnames_efts.sort(key=lambda x: int(os.path.basename(x[0]))) + except struct.error: + self.logger.warning('Failed to load with parallel. Loading events serially now') + self.parallel = False + self._read_step_dirs(step_dirs) + else: + for step_dir in step_dirs: + step_dir_path = EventFileLocation.get_step_dir_path(self.trial_dir, step_dir) + dirnames_efts.extend(self._read_folder(step_dir_path, + read_data=self.read_data, + check=self.check)) + + for dirname, dir_efts in dirnames_efts: + self._add_tensors_at_steps(dir_efts) + + for dirname, efts in reversed(dirnames_efts): + if len(efts) > 0: + self.last_step_loaded = os.path.basename(dirname) + break + # make last_step_loaded equal to the newest dir which + # had non zero tensors so that we can + # look for newer steps with no tensors again. + # note that if we load a non zero + # number of tensors from a dir, we are guaranteed that + # we can not load more tensors for that step since we use + # temp file for writing event files and do atomic move + + @staticmethod + def _read_folder(dirname, read_data=True, check=True): + res = [] + for fname in os.listdir(dirname): + if fname.endswith(".tfevents"): + full_fname = os.path.join(dirname, fname) + fr = FileReader(fname=full_fname) + summary_values = fr.read_tensors(read_data=read_data, check=check) + for sv in summary_values: + n, s, d, mode, mode_step = sv + eft = EventFileTensor(fname, tensor_name=n, step_num=s, tensor_value=d, + mode=mode, mode_step=mode_step) + res.append(eft) + + return dirname, res diff --git a/tornasole/trials/s3_trial.py b/tornasole/trials/s3_trial.py new file mode 100644 index 0000000000..7b902e9ca6 --- /dev/null +++ b/tornasole/trials/s3_trial.py @@ -0,0 +1,146 @@ +import time +import os + +from tornasole.core.access_layer.s3handler import ReadObjectRequest, ListRequest, S3Handler +from tornasole.core.tfevent.util import EventFileLocation +from tornasole.core.collection_manager import CollectionManager +from tornasole.core.tfrecord.tensor_reader import TensorReader + +from .trial import EventFileTensor, Trial + + +class S3Trial(Trial): + def __init__(self, name, bucket_name, prefix_name, + range_steps=None, + check=False): + """ + :param name: for sagemaker job, this should be sagemaker training job name + :param bucket_name: name of bucket where data is saved + :param prefix_name: name of prefix such that s3://bucket/prefix is where data is saved + :param range_steps: range_steps is a tuple representing (start_step, end_step). + Only the data from steps in between this range will be loaded + :param check: whether to check checksum of data saved + """ + super().__init__(name, range_steps=range_steps, + parallel=False, check=check) + self.logger.info(f'Loading trial {name} at path s3://{bucket_name}/{prefix_name}') + self.bucket_name = bucket_name + self.prefix_name = prefix_name + self.last_event_token = None + + self.s3_handler = S3Handler() + + self._load_collections() + self._load_tensors() + + def _load_tensors(self): + self._read_all_events_file_from_s3() + + def _load_collections(self): + num_times_before_warning = 10 + while True: + # todo get this path from tornasole.core + key = os.path.join(self.prefix_name, 'collections.ts') + collections_req = ReadObjectRequest(self._get_s3_location(key)) + obj_data = self.s3_handler.get_objects([collections_req])[0] + if obj_data is None: + num_times_before_warning -= 1 + if num_times_before_warning < 0: + self.logger.warning('Waiting to read collections') + else: + self.logger.debug('Waiting to read collections') + time.sleep(2) + continue + + obj_data = obj_data.decode('utf-8') + self.collection_manager = CollectionManager.load_from_string(obj_data) + self.logger.debug('Loaded collections for trial {}'.format(self.name)) + return + + def __hash__(self): + return hash((self.name, self.bucket_name, self.prefix_name)) + + def __eq__(self, other): + return (self.name, self.bucket_name, self.prefix_name) \ + == (other.name, other.bucket_name, other.prefix_name) + + def get_tensors(self, tname_steps_dict, should_regex_match=False): + # to be used when getting selective tensors from S3 + # now we do not need to do anything since we read the full event file from S3 + pass + + def refresh_tensors(self): + #TODO if job finished + self._read_all_events_file_from_s3(start_after_key=self.last_event_token) + + def _list_s3_objects(self, bucket, prefix, start_after_key=None): + if start_after_key is None: + start_after_key = prefix + self.logger.debug(f'Trying to load events after {start_after_key}') + list_params = {'Bucket': bucket,'Prefix': prefix, 'StartAfter': start_after_key} + req = ListRequest(**list_params) + objects = self._list_prefixes([req]) + if len(objects) > 0: + self.last_event_token = objects[-1] + return objects + + def _read_all_events_file_from_s3(self, start_after_key=None): + # TODO + # if job ended is saved then there is no more listing required for this bucket prefix + # if job has ended, save that job ended + self.keys = [] + # todo get path for events from tornasole.core + objects = self._list_s3_objects(self.bucket_name, + os.path.join(self.prefix_name, 'events'), + start_after_key) + for objname in objects: + efl = EventFileLocation.match_regex(objname) + if efl: + if (self.range_steps is not None and self._step_in_range(efl.step_num)) or \ + self.range_steps is None: + self.keys.append(objname) + else: + self.logger.debug(f'Skipping object {objname}') + self.logger.debug(f'Loading {len(self.keys)} new steps') + self._read_keys() + + def _read_keys(self): + reqs = [] + filenames = [] + for key in self.keys: + reqs += self._read_key(key) + filenames += [self._get_s3_location(key)] + raw_data = self.s3_handler.get_objects(reqs) + tensors_in_eventfiles = [] + for i in range(len(raw_data)): + data = raw_data[i] + sf = self._read_tensors_from_data(data) + for tup in sf: + n, s, d, mode, mode_step = tup + eft = EventFileTensor(filenames[i], tensor_name=n, step_num=s, tensor_value=d, + mode=mode, mode_step=mode_step) + tensors_in_eventfiles.append(eft) + self._add_tensors_at_steps(tensors_in_eventfiles) + + def _read_key(self, key): + reqs = [] + full_name = self._get_s3_location(key) + self.logger.debug(f'Reading from {full_name}') + req = ReadObjectRequest(full_name) + reqs += [req] + return reqs + + def _get_s3_location(self, obj): + return 's3://' + self.bucket_name + "/" + obj + + def _read_tensors_from_data(self, data): + tr = TensorReader(data) + res = tr.read_tensors(read_data=self.read_data, check=self.check) + return list(res) + + # list_info will be a list of ListRequest objects. Returns list of lists of files for each request + def _list_prefixes(self, list_info): + files = self.s3_handler.list_prefixes(list_info) + if len(files) == 1: + files = files[0] + return files diff --git a/tornasole/trials/trial.py b/tornasole/trials/trial.py new file mode 100644 index 0000000000..fe1ec33171 --- /dev/null +++ b/tornasole/trials/trial.py @@ -0,0 +1,263 @@ +import re +import time +from bisect import bisect_left +from abc import ABC, abstractmethod + +from tornasole.core.tensor import Tensor, StepState +from tornasole.exceptions import * +from tornasole.analysis.utils import refresh + +from tornasole.core.tfevent.util import EventFileLocation +from tornasole.core.utils import get_logger, \ + flatten, reverse_reduction_tensor_name, TORNASOLE_REDUCTIONS_PREFIX +from tornasole.core.modes import ModeKeys + + +class EventFileTensor: + def __init__(self, filename, tensor_name, step_num, tensor_value, + mode=None, mode_step=None): + self.location = EventFileLocation.load_filename(filename) + self.tensor_name = tensor_name + self.tensor_value = tensor_value + self.step_num = step_num + if mode is None: + mode = ModeKeys.GLOBAL + if mode_step is None: + mode_step = step_num + self.mode = mode + self.mode_step = mode_step + + +class Trial(ABC): + def __init__(self, name, range_steps=None, parallel=True, read_data=True, check=False): + self.name = name + self._tensors = {} + + # nested dictionary from mode -> mode_step -> global_step + # will not have global mode as a key + self._mode_to_global = {} + + # dictionary from global_step -> (mode, mode_step) + # can have global mode as a value + self._global_to_mode = {} + + self.logger = get_logger() + self.parallel = parallel + self.read_data = read_data + self.check = check + self.range_steps = range_steps + self.collection_manager = None + + # this is turned off during rule invocation for performance reasons since + # required tensors are already fetched + self.dynamic_refresh = True + + if self.range_steps is not None: + assert self.range_steps[0] is None or \ + (isinstance(self.range_steps[0], int) and self.range_steps[0] >= 0) + assert self.range_steps[1] is None or \ + (isinstance(self.range_steps[1], int) and self.range_steps[1] >= 0) + if self.range_steps[1] is not None and self.range_steps[0] is not None: + assert int(self.range_steps[1]) > int(self.range_steps[0]), "range_steps should be of the form " \ + "(begin, end) where begin is less than end" + if self.range_steps[0] is not None and self.range_steps[1] is not None: + self.logger.info('Trial {} will look for steps between {} and {}' + .format(self.name, self.range_steps[0], self.range_steps[1])) + + @abstractmethod + def _load_tensors(self): + pass + + @abstractmethod + def _load_collections(self): + pass + + @abstractmethod + def refresh_tensors(self): + pass + + def maybe_refresh(self, name=None): + if self.dynamic_refresh: + if name is None: + self.refresh_tensors() + else: + self.refresh_tensor(name) + + def refresh_tensor(self, tname, steps=None): + # for now we load all tensors at once + self.refresh_tensors() + + def tensor(self, tname): + # will not show tensor if it was not written yet + # has tensor will refresh + if self.has_tensor(tname): + return self._tensors[tname] + else: + raise TensorUnavailable(tname) + + def has_tensor(self, tname): + # will return false if tensor was not written yet + if tname not in self._tensors: + self.maybe_refresh(tname) + return tname in self._tensors + + def set_tensor_value(self, event_file_tensor): + eft = event_file_tensor + # todo, use worker_name here + + if TORNASOLE_REDUCTIONS_PREFIX in eft.tensor_name: + tname, red_name, abs = reverse_reduction_tensor_name(eft.tensor_name) + else: + tname = eft.tensor_name + + if tname not in self._tensors: + t = Tensor(tname, trial=self) + self._tensors[tname] = t + t = self._tensors[tname] + + if eft.mode != ModeKeys.GLOBAL: + if eft.mode not in self._mode_to_global: + self._mode_to_global[eft.mode] = {} + if eft.mode_step not in self._mode_to_global[eft.mode]: + self._mode_to_global[eft.mode][eft.mode_step] = eft.step_num + + if eft.step_num not in self._global_to_mode: + self._global_to_mode[eft.step_num] = (eft.mode, eft.mode_step) + + if 'tornasole/reductions/' in eft.tensor_name: + t.add_reduction_step(eft.mode, eft.mode_step, + red_name, abs, eft.tensor_value) + else: + t.add_step(eft.mode, eft.mode_step, eft.tensor_value) + + def tensors(self): + self.maybe_refresh() + ts = list(self._tensors.keys()) + return ts + + def steps(self, mode=ModeKeys.GLOBAL): + return self.available_steps(mode) + + def available_steps(self, mode=ModeKeys.GLOBAL): + self.maybe_refresh() + if mode == ModeKeys.GLOBAL: + return sorted(self._global_to_mode.keys()) + elif mode in self._mode_to_global: + return sorted(self._mode_to_global[mode].keys()) + else: + return [] + + def _global_step_currently(self, mode, mode_step): + if mode == ModeKeys.GLOBAL: + return mode_step + elif mode in self._mode_to_global and \ + mode_step in self._mode_to_global[mode]: + return self._mode_to_global[mode][mode_step] + + def global_step(self, mode, mode_step): + s = self._global_step_currently(mode, mode_step) + if s is not None: + return s + else: + self.maybe_refresh() + return self._global_step_currently(mode, mode_step) + + def _modestep_mode_currently(self, global_step): + if global_step in self._global_to_mode: + return self._global_to_mode[global_step] + + def mode_modestep(self, global_step): + x = self._modestep_mode_currently(global_step) + if x: + return x + else: + self.maybe_refresh() + x = self._modestep_mode_currently(global_step) + if x: + return x + return None, None + + def mode_step(self, global_step): + # can return global step itself in some cases + x = self.mode_modestep(global_step) + if x: + return x[1] + + def mode(self, global_step): + # can return global mode in some cases + x = self.mode_modestep(global_step) + if x: + return x[0] + + def modes(self): + # will not return global mode + return self._mode_to_global.keys() + + def tensors_matching_regex(self, regex_list): + self.maybe_refresh() + matched_tensornames = [] + if not isinstance(regex_list, list): + regex_list = [regex_list] + regex_list = flatten(regex_list) + for tensorname in self._tensors.keys(): + self.logger.debug(f"TNAME: {tensorname}") + for regex_pattern in regex_list: + if re.match(regex_pattern, tensorname): + matched_tensornames.append(tensorname) + self.logger.debug(f"TNAME: {tensorname} matched regex:{regex_pattern}") + break + return matched_tensornames + + def collections(self): + return self.collection_manager.collections + + def collection(self, coll_name): + return self.collection_manager.get(coll_name) + + def tensors_in_collection(self, coll_name): + rval = set() + for x in self.collection(coll_name).get_tensor_names(): + rval.add(x) + for x in self.collection(coll_name).get_reduction_tensor_names(): + rval.add(x) + regex = self.collection(coll_name).get_include_regex() + if regex: + for x in self.tensors_matching_regex(regex): + rval.add(x) + return list(rval) + + def wait_for_steps(self, required_steps, mode=ModeKeys.GLOBAL): + with refresh(self): + for step in required_steps: + while True: + s = self.has_passed_step(step, mode) + if s == StepState.UNAVAILABLE: + raise StepUnavailable(step, mode) + elif s == StepState.AVAILABLE: + break + time.sleep(5) + + def has_passed_step(self, step, mode=ModeKeys.GLOBAL): + available_steps = sorted(self.available_steps(mode=mode)) + bisect_idx = bisect_left(available_steps, step) + if bisect_idx < len(available_steps): + if available_steps[bisect_idx] > step: + return StepState.UNAVAILABLE + elif available_steps[bisect_idx] == step: + return StepState.AVAILABLE + return StepState.NOT_YET_AVAILABLE + + def _add_tensors_at_steps(self, event_file_tensors): + for eft in event_file_tensors: + self.set_tensor_value(eft) + + def _step_in_range(self, x): + if self.range_steps[0] is not None: + begin = int(x) >= int(self.range_steps[0]) + else: + begin = True + if self.range_steps[1] is not None: + end = int(x) < int(self.range_steps[1]) + else: + end = True + return begin and end \ No newline at end of file diff --git a/tornasole/trials/trial_catalog.py b/tornasole/trials/trial_catalog.py new file mode 100644 index 0000000000..df6291fe2d --- /dev/null +++ b/tornasole/trials/trial_catalog.py @@ -0,0 +1,45 @@ +import os + + +class TrialCatalog(object): + def __init__(self): + self.trials = {} + pass + + def add_trial(self, trial_name, trial_object): + self.trials[trial_name] = trial_object + + def get_trial(self, trial_name): + return self.trials[trial_name] + + def get_trials(self): + return self.trials.keys() + + +class LocalTrialCatalog(TrialCatalog): + def __init__(self,localdir): + super().__init__() + self.localdir=localdir + + def list_candidates(self): + files_and_folders = os.listdir(self.localdir) + folders = [x for x in files_and_folders if os.path.isdir(os.path.join(self.localdir, x))] + return folders + +""" +class SageMakerTrialCatalog(TrialCatalog): + def __init__(self,endpoint,port): + super().__init__() + self.endpoint = endpoint + self.port = port + self.client = InfluxDBClient(host=self.endpoint, port=self.port) + self.client.switch_database('tornasole_deb') + + + def list_candidates(self): + points = self.client.query(f"select distinct(expid) from execdata") + res = [] + for p in points.get_points(): + res.append(p['distinct']) + return res +""" \ No newline at end of file diff --git a/tornasole/trials/utils.py b/tornasole/trials/utils.py new file mode 100644 index 0000000000..7737e562c2 --- /dev/null +++ b/tornasole/trials/utils.py @@ -0,0 +1,15 @@ +import os +from tornasole.core.utils import is_s3 +from .local_trial import LocalTrial +from .s3_trial import S3Trial + + +def create_trial(path, name=None, **kwargs): + if name is None: + name = os.path.basename(path) + s3, bucket_name, prefix_name = is_s3(path) + if s3: + return S3Trial(name=name, bucket_name=bucket_name, + prefix_name=prefix_name, **kwargs) + else: + return LocalTrial(name=name, dirname=path, **kwargs) diff --git a/tornasole_core/__init__.py b/tornasole_core/__init__.py deleted file mode 100644 index 5018d0ed12..0000000000 --- a/tornasole_core/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .save_config import SaveConfig -from .reduction_config import ReductionConfig -from .collection_manager import CollectionManager -from .collection import Collection \ No newline at end of file